patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -0,0 +1,7 @@
+package azkaban.imagemgmt.utils;
+
+public interface ImageMgmtConstants {
+ public static final String IMAGE_TYPE = "imageType";
+ public static final String IMAGE_VERSION = "imageVersion";
+
+} | 1 | 1 | 20,186 | Can you please move this interface as class in common Constants file in az-core module? | azkaban-azkaban | java |
|
@@ -43,12 +43,15 @@ TEST(ProcessUtils, isPidAvailable) {
}
{
// pid file which contains pid of current process
- auto pidFile = "/tmp/maybe-non-existing-dir/process_test.pid";
+ auto pidFile = folly::stringPrintf("/tmp/non-existing-dir-%d/process_test.pid", ::getpid());
+ SCOPE_EXIT {
+ ::unlink(pidFile.c_str());
+ ::rmdir(fs::FileUtils::dirname(pidFile.c_str()).c_str());
+ };
auto status = ProcessUtils::makePidFile(pidFile);
ASSERT_TRUE(status.ok()) << status;
status = ProcessUtils::isPidAvailable(pidFile);
ASSERT_FALSE(status.ok());
- ::unlink(pidFile);
}
{
// pid file not exist | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include <gtest/gtest.h>
#include <fstream>
#include "process/ProcessUtils.h"
#include "fs/FileUtils.h"
namespace nebula {
TEST(ProcessUtils, getExePath) {
auto result = ProcessUtils::getExePath();
ASSERT_TRUE(result.ok()) << result.status();
ASSERT_NE(std::string::npos, result.value().find("process_test")) << result.value();
}
TEST(ProcessUtils, getExeCWD) {
auto result = ProcessUtils::getExeCWD();
ASSERT_TRUE(result.ok()) << result.status();
char buffer[PATH_MAX];
::getcwd(buffer, sizeof(buffer));
ASSERT_EQ(buffer, result.value());
}
TEST(ProcessUtils, isPidAvailable) {
{
auto status = ProcessUtils::isPidAvailable(::getpid());
ASSERT_FALSE(status.ok());
}
{
auto status = ProcessUtils::isPidAvailable(0); // idle/swap
ASSERT_FALSE(status.ok());
}
{
auto status = ProcessUtils::isPidAvailable(1); // systemd
ASSERT_FALSE(status.ok());
}
{
// pid file which contains pid of current process
auto pidFile = "/tmp/maybe-non-existing-dir/process_test.pid";
auto status = ProcessUtils::makePidFile(pidFile);
ASSERT_TRUE(status.ok()) << status;
status = ProcessUtils::isPidAvailable(pidFile);
ASSERT_FALSE(status.ok());
::unlink(pidFile);
}
{
// pid file not exist
auto pidFile = "/tmp/definitely-not-exist.pid";
auto status = ProcessUtils::isPidAvailable(pidFile);
ASSERT_TRUE(status.ok()) << status;
}
{
// choose an available pid
auto genPid = [] () {
auto max = ProcessUtils::maxPid();
while (true) {
uint32_t next = static_cast<uint32_t>(folly::Random::rand64());
next %= max;
if (::kill(next, 0) == -1 && errno == ESRCH) {
return next;
}
}
};
auto pidFile = "/tmp/process_test.pid";
auto status = ProcessUtils::makePidFile(pidFile, genPid());
ASSERT_TRUE(status.ok()) << status;
// there are chances that the chosen pid was occupied already,
// but the chances are negligible, so be it.
status = ProcessUtils::isPidAvailable(pidFile);
ASSERT_TRUE(status.ok()) << status;
::unlink(pidFile);
}
}
TEST(ProcessUtils, getProcessName) {
auto result = ProcessUtils::getProcessName();
ASSERT_TRUE(result.ok()) << result.status();
ASSERT_NE(std::string::npos, result.value().find("process_test")) << result.value();
}
TEST(ProcessUtils, runCommand) {
auto status1 = ProcessUtils::runCommand("echo $HOME");
ASSERT_TRUE(status1.ok()) << status1.status();
EXPECT_EQ(std::string(getenv("HOME")),
folly::rtrimWhitespace(status1.value()).toString());
// Try large output
auto status2 = ProcessUtils::runCommand("cat /etc/profile");
ASSERT_TRUE(status2.ok()) << status2.status();
std::ifstream is("/etc/profile", std::ios::ate);
auto size = is.tellg();
EXPECT_EQ(size, status2.value().size());
std::string buf(size, '\0');
is.seekg(0);
is.read(&buf[0], size);
EXPECT_EQ(buf, status2.value());
}
} // namespace nebula
| 1 | 17,102 | Why not TempDir? | vesoft-inc-nebula | cpp |
@@ -0,0 +1,16 @@
+using System;
+using MvvmCross.Binding.BindingContext;
+using MvvmCross.Core.ViewModels;
+using MvvmCross.Core.Views;
+
+namespace MvvmCross.Forms.Views
+{
+ public interface IMvxElement : IMvxView, IMvxBindingContextOwner
+ {
+ }
+
+ public interface IMvxElement<TViewModel>
+ : IMvxElement, IMvxView<TViewModel> where TViewModel : class, IMvxViewModel
+ {
+ }
+} | 1 | 1 | 13,283 | IMvxFormsView -> IMvxElement is a breaking change, isn't it? | MvvmCross-MvvmCross | .cs |
|
@@ -33,6 +33,7 @@ import yaml
from molecule import config
from molecule import state
from molecule import utilities
+from molecule import ansible_galaxy_install
from molecule.provisioners import baseprovisioner
from molecule.provisioners import dockerprovisioner
from molecule.provisioners import openstackprovisioner | 1 | # Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import fcntl
import os
import re
import struct
import sys
import termios
import subprocess
import tabulate
import yaml
from molecule import config
from molecule import state
from molecule import utilities
from molecule.provisioners import baseprovisioner
from molecule.provisioners import dockerprovisioner
from molecule.provisioners import openstackprovisioner
from molecule.provisioners import proxmoxprovisioner
from molecule.provisioners import vagrantprovisioner
LOG = utilities.get_logger(__name__)
class Molecule(object):
def __init__(self, args):
self._env = os.environ.copy()
self._args = args
self._provisioner = None
self.config = config.Config()
def main(self):
if not os.path.exists(self.config.config['molecule']['molecule_dir']):
os.makedirs(self.config.config['molecule']['molecule_dir'])
self._state = state.State(
state_file=self.config.config.get('molecule').get('state_file'))
try:
self._provisioner = self.get_provisioner()
except baseprovisioner.InvalidProviderSpecified:
LOG.error("Invalid provider '{}'".format(self._args['--provider']))
self._args['--provider'] = None
self._args['--platform'] = None
self._provisioner = self.get_provisioner()
self._print_valid_providers()
utilities.sysexit()
except baseprovisioner.InvalidPlatformSpecified:
LOG.error("Invalid platform '{}'".format(self._args['--platform']))
self._args['--provider'] = None
self._args['--platform'] = None
self._provisioner = self.get_provisioner()
self._print_valid_platforms()
utilities.sysexit()
# updates instances config with full machine names
self.config.populate_instance_names(self._provisioner.platform)
if self._args.get('--debug'):
utilities.debug('RUNNING CONFIG',
yaml.dump(self.config.config,
default_flow_style=False,
indent=2))
self._add_or_update_vars('group_vars')
self._add_or_update_vars('host_vars')
self._symlink_vars()
def get_provisioner(self):
if 'vagrant' in self.config.config:
return vagrantprovisioner.VagrantProvisioner(self)
elif 'proxmox' in self.config.config:
return proxmoxprovisioner.ProxmoxProvisioner(self)
elif 'docker' in self.config.config:
return dockerprovisioner.DockerProvisioner(self)
elif 'openstack' in self.config.config:
return openstackprovisioner.OpenstackProvisioner(self)
else:
return None
def _write_ssh_config(self):
try:
out = self._provisioner.conf(ssh_config=True)
ssh_config = self._provisioner.ssh_config_file
if ssh_config is None:
return
except subprocess.CalledProcessError as e:
LOG.error('ERROR: {}'.format(e))
LOG.error("Does your vagrant VM exist?")
utilities.sysexit(e.returncode)
utilities.write_file(ssh_config, out)
def _print_valid_platforms(self, porcelain=False):
if not porcelain:
LOG.info("AVAILABLE PLATFORMS")
data = []
default_platform = self._provisioner.default_platform
for platform in self._provisioner.valid_platforms:
if porcelain:
default = 'd' if platform['name'] == default_platform else ''
else:
default = ' (default)' if platform[
'name'] == default_platform else ''
data.append([platform['name'], default])
self._display_tabulate_data(data)
def _print_valid_providers(self, porcelain=False):
if not porcelain:
LOG.info("AVAILABLE PROVIDERS")
data = []
default_provider = self._provisioner.default_provider
for provider in self._provisioner.valid_providers:
if porcelain:
default = 'd' if provider['name'] == default_provider else ''
else:
default = ' (default)' if provider[
'name'] == default_provider else ''
data.append([provider['name'], default])
self._display_tabulate_data(data)
def _sigwinch_passthrough(self, sig, data):
TIOCGWINSZ = 1074295912 # assume
if 'TIOCGWINSZ' in dir(termios):
TIOCGWINSZ = termios.TIOCGWINSZ
s = struct.pack('HHHH', 0, 0, 0, 0)
a = struct.unpack('HHHH', fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ,
s))
self._pt.setwinsize(a[0], a[1])
def _parse_provisioning_output(self, output):
"""
Parses the output of the provisioning method.
:param output:
:return: True if the playbook is idempotent, otherwise False
"""
# remove blank lines to make regex matches easier
output = re.sub("\n\s*\n*", "\n", output)
# look for any non-zero changed lines
changed = re.search(r'(changed=[1-9][0-9]*)', output)
# Look for the tasks that have changed.
p = re.compile(ur'NI: (.*$)', re.MULTILINE | re.IGNORECASE)
changed_tasks = re.findall(p, output)
if changed:
return False, changed_tasks
return True, []
def _remove_templates(self):
"""
Removes the templates created by molecule.
:return: None
"""
os.remove(self.config.config['molecule']['rakefile_file'])
if self._state.customconf is False:
os.remove(self.config.config['ansible']['config_file'])
def _create_templates(self):
"""
Creates the templates used by molecule.
:return: None
"""
# ansible.cfg
kwargs = {'molecule_dir':
self.config.config['molecule']['molecule_dir']}
if not os.path.isfile(self.config.config['ansible']['config_file']):
utilities.write_template(
self.config.config['molecule']['ansible_config_template'],
self.config.config['ansible']['config_file'], kwargs=kwargs)
self._state.change_state('customconf', False)
else:
self._state.change_state('customconf', True)
# rakefile
kwargs = {
'state_file': self.config.config['molecule']['state_file'],
'serverspec_dir': self.config.config['molecule']['serverspec_dir']
}
utilities.write_template(
self.config.config['molecule']['rakefile_template'],
self.config.config['molecule']['rakefile_file'], kwargs=kwargs)
def _instances_state(self):
"""
Creates a dict of formatted instances names and the group(s) they're
part of to be added to state.
:return: Dict containing state information about current instances
"""
instances = collections.defaultdict(dict)
for instance in self._provisioner.instances:
instance_name = utilities.format_instance_name(
instance['name'], self._provisioner._platform,
self._provisioner.instances)
if 'ansible_groups' in instance:
instances[instance_name][
'groups'] = [x for x in instance['ansible_groups']]
else:
instances[instance_name]['groups'] = []
return dict(instances)
def _write_instances_state(self):
self._state.change_state('hosts', self._instances_state())
def _create_inventory_file(self):
"""
Creates the inventory file used by molecule and later passed to ansible-playbook.
:return: None
"""
inventory = ''
for instance in self._provisioner.instances:
inventory += self._provisioner.inventory_entry(instance)
# get a list of all groups and hosts in those groups
groups = {}
for instance in self._provisioner.instances:
if 'ansible_groups' in instance:
for group in instance['ansible_groups']:
if group not in groups:
groups[group] = []
groups[group].append(instance['name'])
if self._args.get('--platform') == 'all':
self._provisioner.platform = 'all'
for group, instances in groups.iteritems():
inventory += '\n[{}]\n'.format(group)
for instance in instances:
inventory += '{}\n'.format(utilities.format_instance_name(
instance, self._provisioner.platform,
self._provisioner.instances))
inventory_file = self.config.config['ansible']['inventory_file']
try:
utilities.write_file(inventory_file, inventory)
except IOError:
LOG.warning('WARNING: could not write inventory file {}'.format(
inventory_file))
def _add_or_update_vars(self, target):
"""Creates or updates to host/group variables if needed."""
if target in self.config.config['ansible']:
vars_target = self.config.config['ansible'][target]
else:
return
molecule_dir = self.config.config['molecule']['molecule_dir']
target_vars_path = os.path.join(molecule_dir, target)
if not os.path.exists(os.path.abspath(target_vars_path)):
os.mkdir(os.path.abspath(target_vars_path))
for target in vars_target.keys():
target_var_content = vars_target[target][0]
utilities.write_file(
os.path.join(
os.path.abspath(target_vars_path), target),
"---\n" + yaml.dump(target_var_content,
default_flow_style=False))
def _symlink_vars(self):
"""Creates or updates the symlink to group_vars if needed."""
SYMLINK_NAME = 'group_vars'
group_vars_target = self.config.config.get('molecule',
{}).get('group_vars')
molecule_dir = self.config.config['molecule']['molecule_dir']
group_vars_link_path = os.path.join(molecule_dir, SYMLINK_NAME)
# Remove any previous symlink.
if os.path.lexists(group_vars_link_path):
try:
os.unlink(group_vars_link_path)
except:
pass
# Do not create the symlink if nothing is specified in the config.
if not group_vars_target:
return
# Otherwise create the new symlink.
symlink = os.path.join(
os.path.abspath(molecule_dir), group_vars_target)
if not os.path.exists(symlink):
LOG.error(
'ERROR: the group_vars path {} does not exist. Check your configuration file'.format(
group_vars_target))
utilities.sysexit()
os.symlink(group_vars_target, group_vars_link_path)
def _display_tabulate_data(self, data, headers=None):
"""
Shows the tabulate data on the screen.
If not header is defined, only the data is displayed, otherwise, the results will be shown in a table.
"""
# Nothing to display if there is no data.
if not data:
return
# Initialize empty headers if none are provided.
if not headers:
headers = []
# Define the table format based on the headers content.
table_format = "fancy_grid" if headers else "plain"
# Print the results.
print(tabulate.tabulate(data, headers, tablefmt=table_format))
def _remove_inventory_file(self):
if os._exists(self.config.config['ansible']['inventory_file']):
os.remove(self.config.config['ansible']['inventory_file'])
| 1 | 6,476 | This should be imported alphabetically. | ansible-community-molecule | py |
@@ -11,7 +11,7 @@ use Shopsys\FrameworkBundle\Component\Grid\Ordering\OrderableEntityInterface;
use Shopsys\FrameworkBundle\Component\Money\Money;
use Shopsys\FrameworkBundle\Model\Localization\AbstractTranslatableEntity;
use Shopsys\FrameworkBundle\Model\Payment\Payment;
-use Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency;
+use Shopsys\FrameworkBundle\Model\Pricing\Price;
use Shopsys\FrameworkBundle\Model\Transport\Exception\TransportDomainNotFoundException;
/** | 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Transport;
use Doctrine\Common\Collections\ArrayCollection;
use Doctrine\ORM\Mapping as ORM;
use Doctrine\ORM\Mapping\ManyToMany;
use Gedmo\Mapping\Annotation as Gedmo;
use Prezent\Doctrine\Translatable\Annotation as Prezent;
use Shopsys\FrameworkBundle\Component\Grid\Ordering\OrderableEntityInterface;
use Shopsys\FrameworkBundle\Component\Money\Money;
use Shopsys\FrameworkBundle\Model\Localization\AbstractTranslatableEntity;
use Shopsys\FrameworkBundle\Model\Payment\Payment;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency;
use Shopsys\FrameworkBundle\Model\Transport\Exception\TransportDomainNotFoundException;
/**
* @ORM\Table(name="transports")
* @ORM\Entity
*
* @method TransportTranslation translation(?string $locale = null)
*/
class Transport extends AbstractTranslatableEntity implements OrderableEntityInterface
{
protected const GEDMO_SORTABLE_LAST_POSITION = -1;
/**
* @var int
*
* @ORM\Column(type="integer")
* @ORM\Id
* @ORM\GeneratedValue(strategy="IDENTITY")
*/
protected $id;
/**
* @var \Shopsys\FrameworkBundle\Model\Transport\TransportTranslation[]|\Doctrine\Common\Collections\Collection
*
* @Prezent\Translations(targetEntity="Shopsys\FrameworkBundle\Model\Transport\TransportTranslation")
*/
protected $translations;
/**
* @var \Shopsys\FrameworkBundle\Model\Transport\TransportDomain[]|\Doctrine\Common\Collections\Collection
*
* @ORM\OneToMany(targetEntity="Shopsys\FrameworkBundle\Model\Transport\TransportDomain", mappedBy="transport", cascade={"persist"}, fetch="EXTRA_LAZY")
*/
protected $domains;
/**
* @var \Shopsys\FrameworkBundle\Model\Transport\TransportPrice[]|\Doctrine\Common\Collections\Collection
*
* @ORM\OneToMany(targetEntity="Shopsys\FrameworkBundle\Model\Transport\TransportPrice", mappedBy="transport", cascade={"persist"})
*/
protected $prices;
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat
*
* @ORM\ManyToOne(targetEntity="Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat")
* @ORM\JoinColumn(nullable=false)
*/
protected $vat;
/**
* @var bool
*
* @ORM\Column(type="boolean")
*/
protected $hidden;
/**
* @var bool
*
* @ORM\Column(type="boolean")
*/
protected $deleted;
/**
* @var int|null
*
* @Gedmo\SortablePosition
* @ORM\Column(type="integer", nullable=false)
*/
protected $position;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\Payment[]|\Doctrine\Common\Collections\Collection
* @ManyToMany(targetEntity="Shopsys\FrameworkBundle\Model\Payment\Payment", mappedBy="transports", cascade={"persist"})
*/
protected $payments;
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportData $transportData
*/
public function __construct(TransportData $transportData)
{
$this->translations = new ArrayCollection();
$this->domains = new ArrayCollection();
$this->vat = $transportData->vat;
$this->hidden = $transportData->hidden;
$this->deleted = false;
$this->setTranslations($transportData);
$this->createDomains($transportData);
$this->prices = new ArrayCollection();
$this->position = static::GEDMO_SORTABLE_LAST_POSITION;
$this->payments = new ArrayCollection();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportData $transportData
*/
public function edit(TransportData $transportData)
{
$this->vat = $transportData->vat;
$this->hidden = $transportData->hidden;
$this->setTranslations($transportData);
$this->setDomains($transportData);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportData $transportData
*/
protected function setTranslations(TransportData $transportData)
{
foreach ($transportData->name as $locale => $name) {
$this->translation($locale)->setName($name);
}
foreach ($transportData->description as $locale => $description) {
$this->translation($locale)->setDescription($description);
}
foreach ($transportData->instructions as $locale => $instructions) {
$this->translation($locale)->setInstructions($instructions);
}
}
/**
* @return int
*/
public function getId()
{
return $this->id;
}
/**
* @param string|null $locale
* @return string
*/
public function getName($locale = null)
{
return $this->translation($locale)->getName();
}
/**
* @param string|null $locale
* @return string|null
*/
public function getDescription($locale = null)
{
return $this->translation($locale)->getDescription();
}
/**
* @param string|null $locale
* @return string|null
*/
public function getInstructions($locale = null)
{
return $this->translation($locale)->getInstructions();
}
/**
* @param int $domainId
* @return bool
*/
public function isEnabled(int $domainId)
{
return $this->getTransportDomain($domainId)->isEnabled();
}
/**
* @return \Shopsys\FrameworkBundle\Model\Transport\TransportPrice[]
*/
public function getPrices()
{
return $this->prices->toArray();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency $currency
* @return \Shopsys\FrameworkBundle\Model\Transport\TransportPrice
*/
public function getPrice(Currency $currency)
{
foreach ($this->prices as $price) {
if ($price->getCurrency() === $currency) {
return $price;
}
}
$message = 'Transport price with currency ID ' . $currency->getId()
. ' from transport with ID ' . $this->getId() . 'not found.';
throw new \Shopsys\FrameworkBundle\Model\Transport\Exception\TransportPriceNotFoundException($message);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportPriceFactoryInterface $transportPriceFactory
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency $currency
* @param \Shopsys\FrameworkBundle\Component\Money\Money $price
*/
public function setPrice(
TransportPriceFactoryInterface $transportPriceFactory,
Currency $currency,
Money $price
) {
foreach ($this->prices as $transportInputPrice) {
if ($transportInputPrice->getCurrency() === $currency) {
$transportInputPrice->setPrice($price);
return;
}
}
$this->prices->add($transportPriceFactory->create($this, $currency, $price));
}
/**
* @return \Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat
*/
public function getVat()
{
return $this->vat;
}
/**
* @return bool
*/
public function isHidden()
{
return $this->hidden;
}
/**
* @return bool
*/
public function isDeleted()
{
return $this->deleted;
}
public function markAsDeleted()
{
$this->deleted = true;
}
/**
* @return int|null
*/
public function getPosition()
{
return $this->position;
}
/**
* @param int $position
*/
public function setPosition($position)
{
$this->position = $position;
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportData $transportData
*/
protected function setDomains(TransportData $transportData)
{
foreach ($this->domains as $transportDomain) {
$domainId = $transportDomain->getDomainId();
$transportDomain->setEnabled($transportData->enabled[$domainId]);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\TransportData $transportData
*/
protected function createDomains(TransportData $transportData)
{
$domainIds = array_keys($transportData->enabled);
foreach ($domainIds as $domainId) {
$transportDomain = new TransportDomain($this, $domainId);
$this->domains->add($transportDomain);
}
$this->setDomains($transportData);
}
/**
* @return \Shopsys\FrameworkBundle\Model\Transport\TransportTranslation
*/
protected function createTranslation()
{
return new TransportTranslation();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\Payment $payment
*/
public function addPayment(Payment $payment)
{
if (!$this->payments->contains($payment)) {
$this->payments->add($payment);
$payment->addTransport($this);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\Payment[] $payments
*/
public function setPayments(array $payments)
{
foreach ($this->payments as $currentPayment) {
if (!in_array($currentPayment, $payments, true)) {
$this->removePayment($currentPayment);
}
}
foreach ($payments as $newPayment) {
$this->addPayment($newPayment);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\Payment $payment
*/
public function removePayment(Payment $payment)
{
if ($this->payments->contains($payment)) {
$this->payments->removeElement($payment);
$payment->removeTransport($this);
}
}
/**
* @return \Shopsys\FrameworkBundle\Model\Payment\Payment[]
*/
public function getPayments()
{
return $this->payments->toArray();
}
/**
* @param int $domainId
* @return \Shopsys\FrameworkBundle\Model\Transport\TransportDomain
*/
protected function getTransportDomain(int $domainId)
{
foreach ($this->domains as $transportDomain) {
if ($transportDomain->getDomainId() === $domainId) {
return $transportDomain;
}
}
throw new TransportDomainNotFoundException($this->id, $domainId);
}
}
| 1 | 20,036 | Please apply same changes as for Payment | shopsys-shopsys | php |
@@ -55,9 +55,13 @@ import { Cell, Grid, Row } from '../material-components';
import PageHeader from './PageHeader';
import Layout from './layout/Layout';
import { CORE_WIDGETS } from '../googlesitekit/widgets/datastore/constants';
+import { useHasScrolledEffect } from '../hooks/useHasScrolledEffect';
+import ScrollEffect from './ScrollEffect';
const { useSelect } = Data;
function DashboardEntityApp() {
+ useHasScrolledEffect();
+
const currentEntityURL = useSelect( ( select ) =>
select( CORE_SITE ).getCurrentEntityURL()
); | 1 | /**
* DashboardEntityApp component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import classnames from 'classnames';
/**
* WordPress dependencies
*/
import { createInterpolateElement, Fragment } from '@wordpress/element';
import { __, sprintf } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import Header from './Header';
import {
CONTEXT_ENTITY_DASHBOARD_TRAFFIC,
CONTEXT_ENTITY_DASHBOARD_CONTENT,
CONTEXT_ENTITY_DASHBOARD_SPEED,
CONTEXT_ENTITY_DASHBOARD_MONETIZATION,
} from '../googlesitekit/widgets/default-contexts';
import WidgetContextRenderer from '../googlesitekit/widgets/components/WidgetContextRenderer';
import EntitySearchInput from './EntitySearchInput';
import DateRangeSelector from './DateRangeSelector';
import HelpMenu from './help/HelpMenu';
import {
ANCHOR_ID_CONTENT,
ANCHOR_ID_MONETIZATION,
ANCHOR_ID_SPEED,
ANCHOR_ID_TRAFFIC,
} from '../googlesitekit/constants';
import { CORE_SITE } from '../googlesitekit/datastore/site/constants';
import Link from './Link';
import VisuallyHidden from './VisuallyHidden';
import { Cell, Grid, Row } from '../material-components';
import PageHeader from './PageHeader';
import Layout from './layout/Layout';
import { CORE_WIDGETS } from '../googlesitekit/widgets/datastore/constants';
const { useSelect } = Data;
function DashboardEntityApp() {
const currentEntityURL = useSelect( ( select ) =>
select( CORE_SITE ).getCurrentEntityURL()
);
const permaLink = useSelect( ( select ) =>
select( CORE_SITE ).getPermaLinkParam()
);
const dashboardURL = useSelect( ( select ) =>
select( CORE_SITE ).getAdminURL( 'googlesitekit-dashboard' )
);
const isTrafficActive = useSelect( ( select ) =>
select( CORE_WIDGETS ).isWidgetContextActive(
CONTEXT_ENTITY_DASHBOARD_TRAFFIC
)
);
const isContentActive = useSelect( ( select ) =>
select( CORE_WIDGETS ).isWidgetContextActive(
CONTEXT_ENTITY_DASHBOARD_CONTENT
)
);
const isSpeedActive = useSelect( ( select ) =>
select( CORE_WIDGETS ).isWidgetContextActive(
CONTEXT_ENTITY_DASHBOARD_SPEED
)
);
const isMonetizationActive = useSelect( ( select ) =>
select( CORE_WIDGETS ).isWidgetContextActive(
CONTEXT_ENTITY_DASHBOARD_MONETIZATION
)
);
let lastWidgetAnchor = null;
if ( isMonetizationActive ) {
lastWidgetAnchor = ANCHOR_ID_MONETIZATION;
} else if ( isSpeedActive ) {
lastWidgetAnchor = ANCHOR_ID_SPEED;
} else if ( isContentActive ) {
lastWidgetAnchor = ANCHOR_ID_CONTENT;
} else if ( isTrafficActive ) {
lastWidgetAnchor = ANCHOR_ID_TRAFFIC;
}
if ( currentEntityURL === null ) {
return (
<div className="googlesitekit-widget-context googlesitekit-module-page googlesitekit-dashboard-single-url">
<Grid>
<Row>
<Cell size={ 12 }>
<Fragment>
<Link href={ dashboardURL } inherit back small>
{ __(
'Back to the Site Kit Dashboard',
'google-site-kit'
) }
</Link>
<PageHeader
title={ __(
'Detailed Page Stats',
'google-site-kit'
) }
className="googlesitekit-heading-2 googlesitekit-dashboard-single-url__heading"
fullWidth
/>
<Layout className="googlesitekit-dashboard-single-url__entity-header">
<Grid>
<Row>
<Cell size={ 12 }>
<p>
{ createInterpolateElement(
sprintf(
/* translators: %s: current entity URL. */
__(
'It looks like the URL %s is not part of this site or is not based on standard WordPress content types, therefore there is no data available to display. Visit our <link1>support forums</link1> or <link2><VisuallyHidden>Site Kit </VisuallyHidden>website</link2> for support or further information.',
'google-site-kit'
),
`<strong>${ permaLink }</strong>`
),
{
strong: <strong />,
link1: (
<Link
href="https://wordpress.org/support/plugin/google-site-kit/"
external
inherit
/>
),
link2: (
<Link
href="https://sitekit.withgoogle.com/documentation/troubleshooting/dashboard/#url-not-part-of-this-site"
external
inherit
/>
),
VisuallyHidden: (
<VisuallyHidden />
),
}
) }
</p>
</Cell>
</Row>
</Grid>
</Layout>
</Fragment>
</Cell>
</Row>
</Grid>
</div>
);
}
return (
<Fragment>
<Header showNavigation>
<EntitySearchInput />
<DateRangeSelector />
<HelpMenu />
</Header>
<WidgetContextRenderer
id={ ANCHOR_ID_TRAFFIC }
slug={ CONTEXT_ENTITY_DASHBOARD_TRAFFIC }
className={ classnames( {
'googlesitekit-widget-context--last':
lastWidgetAnchor === ANCHOR_ID_TRAFFIC,
} ) }
/>
<WidgetContextRenderer
id={ ANCHOR_ID_CONTENT }
slug={ CONTEXT_ENTITY_DASHBOARD_CONTENT }
className={ classnames( {
'googlesitekit-widget-context--last':
lastWidgetAnchor === ANCHOR_ID_CONTENT,
} ) }
/>
<WidgetContextRenderer
id={ ANCHOR_ID_SPEED }
slug={ CONTEXT_ENTITY_DASHBOARD_SPEED }
className={ classnames( {
'googlesitekit-widget-context--last':
lastWidgetAnchor === ANCHOR_ID_SPEED,
} ) }
/>
<WidgetContextRenderer
id={ ANCHOR_ID_MONETIZATION }
slug={ CONTEXT_ENTITY_DASHBOARD_MONETIZATION }
className={ classnames( {
'googlesitekit-widget-context--last':
lastWidgetAnchor === ANCHOR_ID_MONETIZATION,
} ) }
/>
</Fragment>
);
}
export default DashboardEntityApp;
| 1 | 42,717 | This should only be used in the `ScrollEffect` component, as you have in the `DashboardMainApp` below | google-site-kit-wp | js |
@@ -269,6 +269,8 @@ def api(require_login=True, schema=None, enabled=True,
is_admin=user.is_admin,
is_active=user.is_active)
+ g.token_str = token
+
if not g.auth.is_active:
raise ApiException(
requests.codes.forbidden, | 1 | # Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
API routes.
NOTE: By default, SQLAlchemy expires all objects when the transaction is committed:
http://docs.sqlalchemy.org/en/latest/orm/session_api.html#sqlalchemy.orm.session.Session.commit
We disable this behavior because it can cause unexpected queries with
major performance implications. See `expire_on_commit=False` in `__init__.py`.
"""
import calendar
from collections import defaultdict
from datetime import datetime, timedelta
from functools import wraps
import gzip
import json
import pathlib
import time
import boto3
from botocore.exceptions import ClientError
from flask import abort, g, redirect, request, Response
from flask_cors import CORS
from flask_json import as_json, jsonify
import httpagentparser
from jsonschema import Draft4Validator, ValidationError
import requests
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import undefer
import stripe
from . import app, db
from .analytics import MIXPANEL_EVENT, mp
from .auth import (_delete_user, consume_code_string, issue_code,
issue_token, try_login, verify_token_string,
reset_password, exp_from_token, _create_user,
_enable_user, _disable_user, revoke_token_string,
reset_password_from_email, change_password, activate_response,
AuthException, ValidationException, ConflictException,
NotFoundException, CredentialException)
from .const import (FTS_LANGUAGE, PaymentPlan, PUBLIC, TEAM, VALID_NAME_RE,
VALID_EMAIL_RE, VALID_USERNAME_RE)
from .core import (decode_node, find_object_hashes, hash_contents,
FileNode, GroupNode, RootNode, TableNode, LATEST_TAG, README)
from .mail import send_invitation_email
from .models import (Access, Comment, Customer, Event, Instance,
InstanceBlobAssoc, Invitation, Log, Package, S3Blob, Tag, User, Version)
from .schemas import (GET_OBJECTS_SCHEMA, LOG_SCHEMA, PACKAGE_SCHEMA,
PASSWORD_RESET_SCHEMA, USERNAME_EMAIL_SCHEMA, EMAIL_SCHEMA,
USERNAME_PASSWORD_SCHEMA, USERNAME_SCHEMA, USERNAME_PASSWORD_EMAIL_SCHEMA)
from .search import keywords_tsvector, tsvector_concat
QUILT_CDN = 'https://cdn.quiltdata.com/'
DEPLOYMENT_ID = app.config['DEPLOYMENT_ID']
CATALOG_URL = app.config['CATALOG_URL']
CATALOG_REDIRECT_URL = '%s/oauth_callback' % CATALOG_URL
AUTHORIZATION_HEADER = 'Authorization'
PACKAGE_BUCKET_NAME = app.config['PACKAGE_BUCKET_NAME']
PACKAGE_URL_EXPIRATION = app.config['PACKAGE_URL_EXPIRATION']
TEAM_ID = app.config['TEAM_ID']
ALLOW_ANONYMOUS_ACCESS = app.config['ALLOW_ANONYMOUS_ACCESS']
ALLOW_TEAM_ACCESS = app.config['ALLOW_TEAM_ACCESS']
ENABLE_USER_ENDPOINTS = app.config['ENABLE_USER_ENDPOINTS']
S3_HEAD_OBJECT = 'head_object'
S3_GET_OBJECT = 'get_object'
S3_PUT_OBJECT = 'put_object'
OBJ_DIR = 'objs'
# Limit the JSON metadata to 100MB.
# This is mostly a sanity check; it's already limited by app.config['MAX_CONTENT_LENGTH'].
MAX_METADATA_SIZE = 100 * 1024 * 1024
PREVIEW_MAX_CHILDREN = 10
PREVIEW_MAX_DEPTH = 4
MAX_PREVIEW_SIZE = 640 * 1024 # 640KB ought to be enough for anybody...
s3_client = boto3.client(
's3',
endpoint_url=app.config.get('S3_ENDPOINT'),
aws_access_key_id=app.config.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=app.config.get('AWS_SECRET_ACCESS_KEY')
)
stripe.api_key = app.config['STRIPE_SECRET_KEY']
HAVE_PAYMENTS = bool(stripe.api_key)
class QuiltCli(httpagentparser.Browser):
look_for = 'quilt-cli'
version_markers = [('/', '')]
httpagentparser.detectorshub.register(QuiltCli())
class PythonPlatform(httpagentparser.DetectorBase):
def __init__(self, name):
super().__init__()
self.name = name
self.look_for = name
info_type = 'python_platform'
version_markers = [('/', '')]
for python_name in ['CPython', 'Jython', 'PyPy']:
httpagentparser.detectorshub.register(PythonPlatform(python_name))
class ApiException(Exception):
"""
Base class for API exceptions.
"""
def __init__(self, status_code, message):
super().__init__()
self.status_code = status_code
self.message = message
### Web routes ###
@app.route('/healthcheck')
def healthcheck():
"""ELB health check; just needs to return a 200 status code."""
return Response("ok", content_type='text/plain')
ROBOTS_TXT = '''
User-agent: *
Disallow: /
'''.lstrip()
@app.route('/robots.txt')
def robots():
"""Disallow crawlers; there's nothing useful for them here."""
return Response(ROBOTS_TXT, mimetype='text/plain')
def _valid_catalog_redirect(next):
return next is None or next.startswith(CATALOG_REDIRECT_URL)
def _validate_username(username):
if not VALID_USERNAME_RE.fullmatch(username):
raise ApiException(
requests.codes.bad,
"""
Username is not valid. Usernames must start with a letter or underscore, and
contain only alphanumeric characters and underscores thereafter.
""")
@app.route('/login')
def login():
return redirect('{CATALOG_URL}/code'.format(CATALOG_URL=CATALOG_URL), code=302)
### API routes ###
# Allow CORS requests to API routes.
# The "*" origin is more secure than specific origins because it blocks cookies.
# Cache the settings for a day to avoid pre-flight requests.
CORS(app, resources={"/api/*": {"origins": "*", "max_age": timedelta(days=1)}})
class Auth:
"""
Info about the user making the API request.
"""
def __init__(self, user, email, is_logged_in, is_admin, is_active=True):
self.user = user
self.email = email
self.is_logged_in = is_logged_in
self.is_admin = is_admin
self.is_active = is_active
class PackageNotFoundException(ApiException):
"""
API exception for missing packages.
"""
def __init__(self, owner, package, logged_in=True):
message = "Package %s/%s does not exist" % (owner, package)
if not logged_in:
message = "%s (do you need to log in?)" % message
super().__init__(requests.codes.not_found, message)
@app.errorhandler(ApiException)
def handle_api_exception(error):
"""
Converts an API exception into an error response.
"""
_mp_track(
type="exception",
status_code=error.status_code,
message=error.message,
)
response = jsonify(dict(
message=error.message
))
response.status_code = error.status_code
return response
def api(require_login=True, schema=None, enabled=True,
require_admin=False, require_anonymous=False):
"""
Decorator for API requests.
Handles auth and adds the username as the first argument.
"""
if require_admin:
require_login = True
if schema is not None:
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
else:
validator = None
assert not (require_login and require_anonymous), (
"Can't both require login and require anonymous access.")
def innerdec(f):
@wraps(f)
def wrapper(*args, **kwargs):
g.auth = Auth(user=None, email=None, is_logged_in=False, is_admin=False, is_active=True)
user_agent_str = request.headers.get('user-agent', '')
g.user_agent = httpagentparser.detect(user_agent_str, fill_none=True)
if not enabled:
raise ApiException(
requests.codes.bad_request,
"This endpoint is not enabled."
)
if validator is not None:
try:
validator.validate(request.get_json(cache=True))
except ValidationError as ex:
raise ApiException(requests.codes.bad_request, ex.message)
auth = request.headers.get(AUTHORIZATION_HEADER)
g.auth_header = auth
if auth is None:
if not require_anonymous:
if require_login or not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.unauthorized, "Not logged in")
else:
# try to validate new auth
token = auth
# for compatibility with old clients
if token.startswith("Bearer "):
token = token[7:]
try:
user = verify_token_string(token)
except AuthException:
raise ApiException(requests.codes.unauthorized, "Token invalid.")
g.user = user
g.auth = Auth(user=user.name,
email=user.email,
is_logged_in=True,
is_admin=user.is_admin,
is_active=user.is_active)
if not g.auth.is_active:
raise ApiException(
requests.codes.forbidden,
"Account is inactive. Must have an active account."
)
if require_admin and not g.auth.is_admin:
raise ApiException(
requests.codes.forbidden,
"Must be authenticated as an admin to use this endpoint."
)
return f(*args, **kwargs)
return wrapper
return innerdec
@app.route('/api/token', methods=['POST'])
@api(require_login=False, require_anonymous=True)
@as_json
def token():
def token_success(user):
new_token = issue_token(user)
exp = exp_from_token(new_token)
db.session.commit()
return dict(
refresh_token=new_token,
access_token=new_token,
expires_at=exp
)
refresh_token = request.values.get('refresh_token')
if refresh_token is None:
abort(requests.codes.bad_request)
# check if one-time code, then if token
try:
user = consume_code_string(refresh_token)
return token_success(user)
except ValidationException:
pass
except AuthException:
raise ApiException(requests.codes.unauthorized, 'Code invalid')
try:
user = verify_token_string(refresh_token)
if not user:
raise ApiException(requests.codes.unauthorized, 'Token invalid')
return token_success(user)
except AuthException as ex:
raise ApiException(requests.codes.unauthorized, ex.message)
@app.route('/api/login', methods=['POST'])
@api(require_anonymous=True, require_login=False, schema=USERNAME_PASSWORD_SCHEMA)
@as_json
def login_post():
data = request.get_json()
username = data.get('username')
password = data.get('password')
user = User.query.filter_by(name=username).with_for_update().one_or_none()
if not user:
raise ApiException(requests.codes.unauthorized, 'Login attempt failed')
if try_login(user, password):
token = issue_token(user)
db.session.commit()
return {'token': token}
raise ApiException(requests.codes.unauthorized, 'Login attempt failed')
@app.route('/activate/<link>')
def activate_endpoint(link):
return activate_response(link)
@app.route('/api/reset_password', methods=['POST'])
@api(require_anonymous=True, require_login=False, schema=EMAIL_SCHEMA)
@as_json
def reset_password_start():
data = request.get_json()
email = data['email']
reset_password_from_email(email)
db.session.commit()
return {}
@app.route('/api/change_password', methods=['POST'])
@api(require_anonymous=True, require_login=False, schema=PASSWORD_RESET_SCHEMA)
@as_json
def change_password_endpoint():
data = request.get_json()
raw_password = data['password']
link = data['link']
try:
change_password(raw_password, link)
db.session.commit()
return {}
except ValidationException as ex:
raise ApiException(requests.codes.bad, ex.message)
except CredentialException as ex:
raise ApiException(requests.codes.unauthorized, ex.message)
except AuthException as ex:
raise ApiException(requests.codes.internal_server_error, ex.message)
@app.route('/api/me')
@api()
@as_json
def apiroot():
return {'is_staff': g.auth.is_admin, 'is_active': g.auth.is_active,
'email': g.auth.email, 'current_user': g.auth.user}
@app.route('/api/register', methods=['POST'])
@api(require_anonymous=True, require_login=False, schema=USERNAME_PASSWORD_EMAIL_SCHEMA)
@as_json
def register_endpoint():
data = request.get_json()
if app.config['DISABLE_SIGNUP']:
raise ApiException(requests.codes.not_implemented, "Signup is disabled.")
username = data['username']
password = data['password']
email = data['email']
_create_user(username, password=password, email=email)
db.session.commit()
return {}
@app.route('/api/refresh', methods=['POST'])
@api()
@as_json
def refresh():
token_str = request.headers.get(AUTHORIZATION_HEADER)
if revoke_token_string(token_str):
token = issue_token(g.user)
db.session.commit()
return {'token': token}
# token is valid from @api so should always succeed
raise ApiException(requests.codes.internal_server_error, 'Internal server error')
@app.route('/api/logout', methods=['POST'])
@api()
@as_json
def logout():
token_str = request.headers.get(AUTHORIZATION_HEADER)
if revoke_token_string(token_str):
db.session.commit()
return {}
# token is valid from @api so should always succeed
raise ApiException(requests.codes.internal_server_error, 'Logout failed')
@app.route('/api/code')
@api()
@as_json
def get_code():
user = User.query.filter_by(name=g.user.name).one_or_none()
code = issue_code(user)
db.session.commit()
return {'code': code}
def _access_filter(auth):
query = []
if ALLOW_ANONYMOUS_ACCESS:
query.append(PUBLIC)
if auth.is_logged_in:
assert auth.user not in [None, PUBLIC, TEAM] # Sanity check
query.append(auth.user)
if ALLOW_TEAM_ACCESS:
query.append(TEAM)
return Access.user.in_(query)
def _get_package(auth, owner, package_name):
"""
Helper for looking up a package and checking permissions.
Only useful for *_list functions; all others should use more efficient queries.
"""
package = (
Package.query
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(auth))
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name, auth.is_logged_in)
return package
def _get_instance(auth, owner, package_name, package_hash):
instance = (
Instance.query
.filter_by(hash=package_hash)
.options(undefer('contents')) # Contents is deferred by default.
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(auth))
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Package hash does not exist"
)
return instance
def _mp_track(**kwargs):
if g.user_agent['browser']['name'] == 'QuiltCli':
source = 'cli'
else:
source = 'web'
# Use the user ID if the user is logged in; otherwise, let MP use the IP address.
distinct_id = g.auth.user
# Try to get the ELB's forwarded IP, and fall back to the actual IP (in dev).
ip_addr = request.headers.get('x-forwarded-for', request.remote_addr)
# Set common attributes sent with each event. kwargs cannot override these.
all_args = dict(
kwargs,
time=time.time(),
ip=ip_addr,
user=g.auth.user,
source=source,
browser_name=g.user_agent['browser']['name'],
browser_version=g.user_agent['browser']['version'],
platform_name=g.user_agent['platform']['name'],
platform_version=g.user_agent['platform']['version'],
python_name=g.user_agent.get('python_platform', {}).get('name'),
python_version=g.user_agent.get('python_platform', {}).get('version'),
deployment_id=DEPLOYMENT_ID,
)
mp.track(distinct_id, MIXPANEL_EVENT, all_args)
def _generate_presigned_url(method, owner, blob_hash):
return s3_client.generate_presigned_url(
method,
Params=dict(
Bucket=PACKAGE_BUCKET_NAME,
Key='%s/%s/%s' % (OBJ_DIR, owner, blob_hash)
),
ExpiresIn=PACKAGE_URL_EXPIRATION
)
def _get_or_create_customer():
assert HAVE_PAYMENTS, "Payments are not enabled"
assert g.auth.user
if TEAM_ID:
# In teams instances, we only create one Stripe customer for the whole team.
db_customer_id = ''
else:
db_customer_id = g.auth.user
db_customer = Customer.query.filter_by(id=db_customer_id).one_or_none()
if db_customer is None:
try:
# Insert a placeholder with no Stripe ID just to lock the row.
db_customer = Customer(id=db_customer_id)
db.session.add(db_customer)
db.session.flush()
except IntegrityError:
# Someone else just created it, so look it up.
db.session.rollback()
db_customer = Customer.query.filter_by(id=db_customer_id).one()
else:
# Create a new customer.
if TEAM_ID:
plan = PaymentPlan.TEAM_UNPAID.value
email = None # TODO: Use an admin email?
description = 'Team %s' % TEAM_ID
else:
plan = PaymentPlan.FREE.value
email = g.auth.email
description = g.auth.user
customer = stripe.Customer.create(
email=email,
description=description
)
stripe.Subscription.create(
customer=customer.id,
plan=plan,
)
db_customer.stripe_customer_id = customer.id
db.session.commit()
customer = stripe.Customer.retrieve(db_customer.stripe_customer_id)
assert customer.subscriptions.total_count == 1
return customer
def _get_customer_plan(customer):
return PaymentPlan(customer.subscriptions.data[0].plan.id)
def _private_packages_allowed():
"""
Checks if the current user is allowed to create private packages.
In the public cloud, the user needs to be on a paid plan.
There are no restrictions in other deployments.
"""
if not HAVE_PAYMENTS or TEAM_ID:
return True
customer = _get_or_create_customer()
plan = _get_customer_plan(customer)
return plan != PaymentPlan.FREE
@app.route('/api/get_objects', methods=['POST'])
@api(require_login=False, schema=GET_OBJECTS_SCHEMA)
@as_json
def get_objects():
obj_hashes = request.get_json()
results = (
S3Blob.query
.filter(S3Blob.hash.in_(obj_hashes))
.join(S3Blob.instances)
.join(Instance.package)
.join(Package.access)
.filter(_access_filter(g.auth))
).all()
return dict(
urls={
blob.hash: _generate_presigned_url(S3_GET_OBJECT, blob.owner, blob.hash)
for blob in results
},
sizes={
blob.hash: blob.size for blob in results
}
)
def download_object_preview_impl(owner, obj_hash):
resp = s3_client.get_object(
Bucket=PACKAGE_BUCKET_NAME,
Key='%s/%s/%s' % (OBJ_DIR, owner, obj_hash),
Range='bytes=-%d' % MAX_PREVIEW_SIZE # Limit the size of the gzip'ed content.
)
body = resp['Body']
encoding = resp.get('ContentEncoding')
if encoding == 'gzip':
with gzip.GzipFile(fileobj=body, mode='rb') as fd:
data = fd.read(MAX_PREVIEW_SIZE)
elif encoding is None:
data = body.read(MAX_PREVIEW_SIZE)
else:
# GzipFile raises an OSError if ungzipping fails, so do the same here.
raise OSError("Unexpected encoding: %r" % encoding)
return data.decode(errors='ignore') # Data may be truncated in the middle of a UTF-8 character.
def download_object_preview(owner, obj_hash):
try:
return download_object_preview_impl(owner, obj_hash)
except ClientError as ex:
_mp_track(
type="download_exception",
obj_owner=owner,
obj_hash=obj_hash,
error=str(ex),
)
if ex.response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.not_found:
# The client somehow failed to upload the README.
raise ApiException(
requests.codes.forbidden,
"Failed to download the README; make sure it has been uploaded correctly."
)
else:
# Something unexpected happened.
raise
except OSError as ex:
# Failed to ungzip: either the contents is not actually gzipped,
# or the response was truncated because it was too big.
_mp_track(
type="download_exception",
obj_owner=owner,
obj_hash=obj_hash,
error=str(ex),
)
raise ApiException(
requests.codes.forbidden,
"Failed to ungzip the README; make sure it has been uploaded correctly."
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['PUT'])
@api(schema=PACKAGE_SCHEMA)
@as_json
def package_put(owner, package_name, package_hash):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can push packages.")
if not VALID_NAME_RE.match(package_name):
raise ApiException(requests.codes.bad_request, "Invalid package name")
# TODO: Description.
data = json.loads(request.data.decode('utf-8'), object_hook=decode_node)
dry_run = data.get('dry_run', False)
public = data.get('is_public', data.get('public', False))
team = data.get('is_team', False)
contents = data['contents']
sizes = data.get('sizes', {})
if public and not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.forbidden, "Public access not allowed")
if team and not ALLOW_TEAM_ACCESS:
raise ApiException(requests.codes.forbidden, "Team access not allowed")
if hash_contents(contents) != package_hash:
raise ApiException(requests.codes.bad_request, "Wrong contents hash")
all_hashes = set(find_object_hashes(contents))
# Old clients don't send sizes. But if sizes are present, make sure they match the hashes.
if sizes and set(sizes) != all_hashes:
raise ApiException(requests.codes.bad_request, "Sizes don't match the hashes")
# Insert a package if it doesn't already exist.
# TODO: Separate endpoint for just creating a package with no versions?
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
# Check for case-insensitive matches, and reject the push.
package_ci = (
Package.query
.filter(
sa.and_(
sa.func.lower(Package.owner) == sa.func.lower(owner),
sa.func.lower(Package.name) == sa.func.lower(package_name)
)
)
.one_or_none()
)
if package_ci is not None:
raise ApiException(
requests.codes.forbidden,
"Package already exists: %s/%s" % (package_ci.owner, package_ci.name)
)
if not public and not _private_packages_allowed():
raise ApiException(
requests.codes.payment_required,
(
"Insufficient permissions. Run `quilt push --public %s/%s` to make " +
"this package public, or upgrade your service plan to create " +
"private packages: https://quiltdata.com/profile."
) % (owner, package_name)
)
package = Package(owner=owner, name=package_name)
db.session.add(package)
owner_access = Access(package=package, user=owner)
db.session.add(owner_access)
if public:
public_access = Access(package=package, user=PUBLIC)
db.session.add(public_access)
if team:
team_access = Access(package=package, user=TEAM)
db.session.add(team_access)
else:
if public:
public_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == PUBLIC
))
.one_or_none()
)
if public_access is None:
raise ApiException(
requests.codes.forbidden,
("%(user)s/%(pkg)s is private. To make it public, " +
"run `quilt access add %(user)s/%(pkg)s public`.") %
dict(user=owner, pkg=package_name)
)
if team:
team_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == TEAM
))
.one_or_none()
)
if team_access is None:
raise ApiException(
requests.codes.forbidden,
("%(team)s:%(user)s/%(pkg)s is private. To share it with the team, " +
"run `quilt access add %(team)s:%(user)s/%(pkg)s team`.") %
dict(team=app.config['TEAM_ID'], user=owner, pkg=package_name)
)
# Insert an instance if it doesn't already exist.
instance = (
Instance.query
.with_for_update()
.filter_by(package=package, hash=package_hash)
.one_or_none()
)
# No more error checking at this point, so return from dry-run early.
if dry_run:
db.session.rollback()
# List of signed URLs is potentially huge, so stream it.
def _generate():
yield '{"upload_urls":{'
for idx, blob_hash in enumerate(all_hashes):
comma = ('' if idx == 0 else ',')
value = dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash)
)
yield '%s%s:%s' % (comma, json.dumps(blob_hash), json.dumps(value))
yield '}}'
return Response(_generate(), content_type='application/json')
keywords_tsv = keywords_tsvector(owner, package_name, contents)
if instance is None:
readme_hash = None
readme_preview = None
readme = contents.children.get(README)
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_hash = readme.hashes[0]
# Download the README if necessary. We want to do this early, before we call
# with_for_update() on S3Blob, since it's potentially expensive.
have_readme = (
db.session.query(sa.func.count(S3Blob.id))
.filter_by(owner=owner, hash=readme_hash)
.filter(S3Blob.preview.isnot(None))
).one()[0] == 1
if not have_readme:
readme_preview = download_object_preview(owner, readme_hash)
# Add all the hashes that don't exist yet.
blobs = (
S3Blob.query
.with_for_update()
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
# Create the instance after querying the blobs - otherwise, SQLAlchemy
# will issue an INSERT followed by UPDATE instead of a single INSERT.
instance = Instance(
package=package,
contents=contents,
hash=package_hash,
created_by=g.auth.user,
updated_by=g.auth.user,
keywords_tsv=keywords_tsv,
)
blob_by_hash = {blob.hash: blob for blob in blobs}
for blob_hash in all_hashes:
blob_size = sizes.get(blob_hash)
blob = blob_by_hash.get(blob_hash)
if blob is None:
blob = S3Blob(owner=owner, hash=blob_hash, size=blob_size)
if blob_hash == readme_hash:
if readme_preview is not None:
# If we've just downloaded the README, save it in the blob.
# Otherwise, it was already set.
blob.preview = readme_preview
blob_preview_expr = readme_preview
else:
# README already exists in the DB; use a subquery to avoid fetching it
# only to insert it into the instance.
blob_preview_expr = sa.select([S3Blob.preview]).where(S3Blob.id == blob.id)
instance.readme_blob = blob
instance.blobs_tsv = sa.func.to_tsvector(FTS_LANGUAGE, blob_preview_expr)
instance.blobs.append(blob)
else:
# Just update the contents dictionary.
# Nothing else could've changed without invalidating the hash.
instance.contents = contents
instance.updated_at = sa.func.now()
instance.updated_by = g.auth.user
instance.keywords_tsv = keywords_tsv
db.session.add(instance)
# Insert a log.
log = Log(
package=package,
instance=instance,
author=owner,
)
db.session.add(log)
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.PUSH,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
public=public
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="push",
package_owner=owner,
package_name=package_name,
public=public,
)
return dict(
package_url='%s/package/%s/%s' % (CATALOG_URL, owner, package_name)
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_get(owner, package_name, package_hash):
subpath = request.args.get('subpath')
meta_only = bool(request.args.get('meta_only', ''))
instance = _get_instance(g.auth, owner, package_name, package_hash)
assert isinstance(instance.contents, RootNode)
subnode = instance.contents
for component in subpath.split('/') if subpath else []:
try:
subnode = subnode.children[component]
except (AttributeError, KeyError):
raise ApiException(requests.codes.not_found, "Invalid subpath: %r" % component)
all_hashes = set(find_object_hashes(subnode, meta_only=meta_only))
blobs = (
S3Blob.query
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
urls = {
blob_hash: _generate_presigned_url(S3_GET_OBJECT, owner, blob_hash)
for blob_hash in all_hashes
}
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.INSTALL,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
subpath=subpath
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="install",
package_owner=owner,
package_name=package_name,
subpath=subpath,
)
return dict(
contents=instance.contents,
urls=urls,
sizes={blob.hash: blob.size for blob in blobs},
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
def _generate_preview(node, max_depth=PREVIEW_MAX_DEPTH):
if isinstance(node, GroupNode):
max_children = PREVIEW_MAX_CHILDREN if max_depth else 0
children_preview = [
(name, _generate_preview(child, max_depth - 1))
for name, child in sorted(node.children.items())[:max_children]
]
if len(node.children) > max_children:
children_preview.append(('...', None))
return children_preview
else:
return None
def _iterate_data_nodes(node):
# TODO: Merge into core.py
if isinstance(node, (TableNode, FileNode)):
yield node
elif isinstance(node, GroupNode):
for child in node.children.values():
yield from _iterate_data_nodes(child)
def get_event_timeseries(owner, package_name, event_type, max_weeks_old=52):
now = datetime.utcnow()
last_monday = (now - timedelta(days=now.weekday())).date()
next_monday = last_monday + timedelta(weeks=1)
weeks_ago = sa.func.trunc(sa.func.date_part('day', next_monday - Event.created) / 7)
result = (
db.session.query(
sa.func.count(Event.id),
weeks_ago.label('weeks_ago')
)
.filter(Event.package_owner == owner)
.filter(Event.package_name == package_name)
.filter(Event.type == event_type)
.filter(weeks_ago < max_weeks_old)
.group_by(weeks_ago)
.all()
)
total = (
db.session.query(
sa.func.count(Event.id)
)
.filter(Event.package_owner == owner)
.filter(Event.package_name == package_name)
.filter(Event.type == event_type)
.scalar()
)
result = [(int(count), int(weeks_ago)) for count, weeks_ago in result]
# result contains (count, weeks_ago) pairs
last = next_monday
first = next_monday - timedelta(weeks=max_weeks_old)
counts = [0] * (max_weeks_old) # list of zeroes
for count, weeks_ago in result:
counts[weeks_ago] = count
return {
'startDate': calendar.timegm(first.timetuple()),
'endDate': calendar.timegm(last.timetuple()),
'frequency': 'week',
'timeSeries': reversed(counts), # 0 weeks ago needs to be at end of timeseries
'total': total
}
@app.route('/api/package_timeseries/<owner>/<package_name>/<event_type>',
methods=['GET'])
@api(require_login=False)
@as_json
def package_timeseries(owner, package_name, event_type):
try:
event_enum = Event.Type[event_type.upper()]
except KeyError:
raise ApiException(requests.codes.bad_request, "Event type incorrectly specified.")
result = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC).label('is_public'),
sa.func.bool_or(Access.user == TEAM).label('is_team')
)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.one_or_none()
)
if not result:
raise ApiException(requests.codes.not_found, "Package does not exist.")
return get_event_timeseries(owner, package_name, event_enum)
@app.route('/api/package_preview/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_preview(owner, package_name, package_hash):
result = (
db.session.query(
Instance,
sa.func.bool_or(Access.user == PUBLIC).label('is_public'),
sa.func.bool_or(Access.user == TEAM).label('is_team')
)
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id, Instance.id)
.one_or_none()
)
if result is None:
raise ApiException(
requests.codes.not_found,
"Package hash does not exist"
)
(instance, is_public, is_team) = result
assert isinstance(instance.contents, RootNode)
log_count = (
db.session.query(
sa.func.count(Log.package_id)
)
.filter(Log.package_id == instance.package_id)
).one()
readme = instance.contents.children.get(README)
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_hash = readme.hashes[0]
readme_url = _generate_presigned_url(S3_GET_OBJECT, owner, readme_hash)
readme_blob = (
S3Blob.query
.filter_by(owner=owner, hash=readme_hash)
.options(undefer('preview'))
.one_or_none() # Should be one() once READMEs are backfilled.
)
readme_preview = readme_blob.preview if readme_blob is not None else None
else:
readme_url = None
readme_preview = None
contents_preview = _generate_preview(instance.contents)
total_size = int((
db.session.query(sa.func.coalesce(sa.func.sum(S3Blob.size), 0))
# We could do a join on S3Blob.instances - but that results in two joins instead of one.
# So do a completely manual join to make it efficient.
.join(InstanceBlobAssoc, sa.and_(
InstanceBlobAssoc.c.blob_id == S3Blob.id,
InstanceBlobAssoc.c.instance_id == instance.id
))
).one()[0])
file_types = defaultdict(int)
for node in _iterate_data_nodes(instance.contents):
path = node.metadata.get('q_path')
if not isinstance(path, str):
path = ''
# We don't know if it's a UNIX or a Windows path, so let's treat both \ and / as separators.
# PureWindowsPath will do that for us, since / is legal on Windows.
ext = pathlib.PureWindowsPath(path).suffix.lower()
file_types[ext] += 1
# Insert an event.
event = Event(
type=Event.Type.PREVIEW,
user=g.auth.user,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
)
db.session.add(event)
db.session.commit()
_mp_track(
type="preview",
package_owner=owner,
package_name=package_name,
)
return dict(
preview=contents_preview,
readme_url=readme_url,
readme_preview=readme_preview,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
is_public=is_public,
is_team=is_team,
total_size_uncompressed=total_size,
file_types=file_types,
log_count=log_count,
)
@app.route('/api/package/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
instances = (
Instance.query
.filter_by(package=package)
)
return dict(
hashes=[instance.hash for instance in instances]
)
@app.route('/api/package/<owner>/<package_name>/', methods=['DELETE'])
@api()
@as_json
def package_delete(owner, package_name):
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can delete packages.")
package = _get_package(g.auth, owner, package_name)
db.session.delete(package)
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.DELETE,
package_owner=owner,
package_name=package_name,
)
db.session.add(event)
db.session.commit()
return dict()
@app.route('/api/package/<owner>/', methods=['GET'])
@api(require_login=False)
@as_json
def user_packages(owner):
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.filter_by(owner=owner)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.order_by(Package.name)
.all()
)
return dict(
packages=[
dict(
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages
]
)
@app.route('/api/admin/package_list/<owner>/', methods=['GET'])
@api(require_login=True, require_admin=True)
@as_json
def list_user_packages(owner):
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.filter_by(owner=owner)
.join(Package.access)
.group_by(Package.id)
.order_by(Package.name)
.all()
)
return dict(
packages=[
dict(
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages
]
)
@app.route('/api/log/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def logs_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
tags = (
db.session.query(
Tag.instance_id,
sa.func.array_agg(Tag.tag).label('tag_list')
)
.group_by(Tag.instance_id)
.subquery('tags')
)
versions = (
db.session.query(
Version.instance_id,
sa.func.array_agg(Version.version).label('version_list')
)
.group_by(Version.instance_id)
.subquery('versions')
)
logs = (
db.session.query(
Log,
Instance,
tags.c.tag_list,
versions.c.version_list
)
.filter_by(package=package)
.join(Log.instance)
.outerjoin(tags, Log.instance_id == tags.c.instance_id)
.outerjoin(versions, Log.instance_id == versions.c.instance_id)
# Sort chronologically, but rely on IDs in case of duplicate created times.
.order_by(Log.created, Log.id)
)
results = [dict(
hash=instance.hash,
created=log.created.timestamp(),
author=log.author,
tags=tag_list,
versions=version_list
) for log, instance, tag_list, version_list in logs]
return {'logs' : results}
VERSION_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
def normalize_version(version):
try:
version = Version.normalize(version)
except ValueError:
raise ApiException(requests.codes.bad_request, "Malformed version")
return version
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['PUT'])
@api(schema=VERSION_SCHEMA)
@as_json
def version_put(owner, package_name, package_version):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can create versions"
)
user_version = package_version
package_version = normalize_version(package_version)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
version = Version(
package_id=instance.package_id,
version=package_version,
user_version=user_version,
instance=instance
)
try:
db.session.add(version)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "Version already exists")
return dict()
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['GET'])
@api(require_login=False)
@as_json
def version_get(owner, package_name, package_version):
package_version = normalize_version(package_version)
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.versions)
.filter_by(package=package, version=package_version)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Version %s does not exist" % package_version
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_version=package_version,
)
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
@app.route('/api/version/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def version_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
versions = (
db.session.query(Version, Instance)
.filter_by(package=package)
.join(Version.instance)
.all()
)
sorted_versions = sorted(versions, key=lambda row: row.Version.sort_key())
return dict(
versions=[
dict(
version=version.user_version,
hash=instance.hash
) for version, instance in sorted_versions
]
)
TAG_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['PUT'])
@api(schema=TAG_SCHEMA)
@as_json
def tag_put(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can modify tags"
)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
# Update an existing tag or create a new one.
tag = (
Tag.query
.with_for_update()
.filter_by(package_id=instance.package_id, tag=package_tag)
.one_or_none()
)
if tag is None:
tag = Tag(
package_id=instance.package_id,
tag=package_tag,
instance=instance
)
db.session.add(tag)
else:
tag.instance = instance
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['GET'])
@api(require_login=False)
@as_json
def tag_get(owner, package_name, package_tag):
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.tags)
.filter_by(package=package, tag=package_tag)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Tag %r does not exist" % package_tag
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_tag=package_tag,
)
users = [access.user for access in package.access]
is_public = 'public' in users
is_team = 'team' in users
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
is_public=is_public,
is_team=is_team,
)
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['DELETE'])
@api()
@as_json
def tag_delete(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can delete tags"
)
tag = (
Tag.query
.with_for_update()
.filter_by(tag=package_tag)
.join(Tag.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if tag is None:
raise ApiException(
requests.codes.not_found,
"Package %s/%s tag %r does not exist" % (owner, package_name, package_tag)
)
db.session.delete(tag)
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def tag_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
tags = (
db.session.query(Tag, Instance)
.filter_by(package=package)
.order_by(Tag.tag)
.join(Tag.instance)
.all()
)
return dict(
tags=[
dict(
tag=tag.tag,
hash=instance.hash
) for tag, instance in tags
]
)
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['PUT'])
@api()
@as_json
def access_put(owner, package_name, user):
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can grant access"
)
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name)
if VALID_EMAIL_RE.match(user):
email = user.lower()
invitation = Invitation(package=package, email=email)
db.session.add(invitation)
db.session.commit()
send_invitation_email(email, owner, package_name)
return {}
else:
_validate_username(user)
if user == PUBLIC:
if not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.forbidden, "Public access not allowed")
elif user == TEAM:
if not ALLOW_TEAM_ACCESS:
raise ApiException(requests.codes.forbidden, "Team access not allowed")
elif not User.query.filter_by(name=user).one_or_none():
raise ApiException(
requests.codes.not_found,
"User %s does not exist" % user
)
try:
access = Access(package=package, user=user)
db.session.add(access)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "The user already has access")
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['GET'])
@api()
@as_json
def access_get(owner, package_name, user):
_validate_username(user)
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can view access"
)
access = (
db.session.query(Access)
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['DELETE'])
@api()
@as_json
def access_delete(owner, package_name, user):
_validate_username(user)
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can revoke access"
)
if user == owner:
raise ApiException(
requests.codes.forbidden,
"Cannot revoke the owner's access"
)
if user == PUBLIC and not _private_packages_allowed():
raise ApiException(
requests.codes.payment_required,
"Insufficient permissions. " +
"Upgrade your plan to create private packages: https://quiltdata.com/profile."
)
access = (
Access.query
.with_for_update()
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
db.session.delete(access)
db.session.commit()
return dict()
@app.route('/api/access/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def access_list(owner, package_name):
accesses = (
Access.query
.join(Access.package)
.filter_by(owner=owner, name=package_name)
)
can_access = [access.user for access in accesses]
is_collaborator = g.auth.user in can_access
is_public = ALLOW_ANONYMOUS_ACCESS and (PUBLIC in can_access)
is_team = ALLOW_TEAM_ACCESS and (TEAM in can_access)
if is_public or is_team or is_collaborator:
return dict(users=can_access)
else:
raise PackageNotFoundException(owner, package_name)
@app.route('/api/recent_packages/', methods=['GET'])
@api(require_login=False)
@as_json
def recent_packages():
try:
count = int(request.args.get('count', ''))
except ValueError:
count = 10
if ALLOW_ANONYMOUS_ACCESS:
max_visibility = PUBLIC
elif ALLOW_TEAM_ACCESS:
max_visibility = TEAM
else:
# Shouldn't really happen, but let's handle this case.
raise ApiException(requests.codes.forbidden, "Not allowed")
results = (
db.session.query(Package, sa.func.max(Instance.updated_at))
.join(Package.access)
.filter_by(user=max_visibility)
.join(Package.instances)
.group_by(Package.id)
.order_by(sa.func.max(Instance.updated_at).desc())
.limit(count)
.all()
)
return dict(
packages=[
dict(
owner=package.owner,
name=package.name,
updated_at=updated_at
) for package, updated_at in results
]
)
@app.route('/api/search/', methods=['GET'])
@api(require_login=False)
@as_json
def search():
query = request.args.get('q', '')
# Get the list of visible packages and their permissions.
instances = (
db.session.query(
Instance.id,
# We have an index on (keywords_tsv || blobs_tsv)
tsvector_concat(
Instance.keywords_tsv,
Instance.blobs_tsv
).label('tsv'),
Package.owner,
Package.name,
sa.func.bool_or(Access.user == PUBLIC).label('is_public'),
sa.func.bool_or(Access.user == TEAM).label('is_team'),
sa.func.plainto_tsquery(FTS_LANGUAGE, query).label('query') # Just save the query as a variable
)
.join(Instance.package)
.join(Package.access)
.filter(_access_filter(g.auth))
.join(Instance.tags)
.filter(Tag.tag == LATEST_TAG)
.group_by(Package.id, Instance.id) # Redundant, but we need Instance.id and Package.*
.subquery('i')
)
# Get the README contents and full-text search index.
README_SNIPPET_LEN = 1024
readmes = (
db.session.query(
Instance.id,
sa.func.substr(S3Blob.preview, 1, README_SNIPPET_LEN).label('readme'),
)
.join(Instance.readme_blob)
.subquery('r')
)
# Filter and sort the results.
# Use the "rank / (rank + 1)" normalization; makes it look sort of like percentage.
RANK_NORMALIZATION = 32
results = (
db.session.query(
instances.c.owner,
instances.c.name,
instances.c.is_public,
instances.c.is_team,
readmes.c.readme,
sa.func.ts_rank_cd(
instances.c.tsv,
instances.c.query,
RANK_NORMALIZATION
).label('rank')
)
.outerjoin(readmes, instances.c.id == readmes.c.id)
.filter(
instances.c.tsv.op('@@')(instances.c.query)
if query else True # Disable the filter if there was no query string.
)
.order_by(sa.desc('rank'), instances.c.owner, instances.c.name)
)
return dict(
packages=[
dict(
owner=owner,
name=name,
is_public=is_public,
is_team=is_team,
readme_preview=readme,
rank=rank,
) for owner, name, is_public, is_team, readme, rank in results
]
)
@app.route('/api/profile', methods=['GET'])
@api()
@as_json
def profile():
if HAVE_PAYMENTS:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer).value
have_cc = customer.sources.total_count > 0
else:
plan = None
have_cc = None
# Check for outstanding package sharing invitations
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email.lower())
.join(Invitation.package)
)
for invitation, package in invitations:
access = Access(package=package, user=g.auth.user)
db.session.add(access)
db.session.delete(invitation)
if invitations:
db.session.commit()
# We want to show only the packages owned by or explicitly shared with the user -
# but also show whether they're public, in case a package is both public and shared with the user.
# So do a "GROUP BY" to get the public info, then "HAVING" to filter out packages that aren't shared.
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.order_by(
sa.func.lower(Package.owner),
sa.func.lower(Package.name)
)
.having(sa.func.bool_or(Access.user == g.auth.user))
.all()
)
return dict(
packages=dict(
own=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages if package.owner == g.auth.user
],
shared=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages if package.owner != g.auth.user
],
),
plan=plan,
have_credit_card=have_cc,
is_admin=g.auth.is_admin,
)
@app.route('/api/payments/update_plan', methods=['POST'])
@api()
@as_json
def payments_update_plan():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
plan = request.values.get('plan')
try:
plan = PaymentPlan(plan)
except ValueError:
raise ApiException(requests.codes.bad_request, "Invalid plan: %r" % plan)
if TEAM_ID:
if not g.auth.is_admin:
raise ApiException(requests.codes.forbidden, "Only the admin can update plans")
# Can only switch to TEAM (from TEAM_UNPAID)
# if plan != PaymentPlan.TEAM:
if plan not in (PaymentPlan.TEAM, PaymentPlan.TEAM_UNPAID):
raise ApiException(requests.codes.forbidden, "Can only switch between team plans")
else:
if plan not in (PaymentPlan.FREE, PaymentPlan.INDIVIDUAL):
# Cannot switch to the BUSINESS_MEMBER plan manually.
raise ApiException(requests.codes.forbidden, "Not allowed to switch to plan: %r" % plan)
stripe_token = request.values.get('token')
customer = _get_or_create_customer()
if stripe_token is not None:
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
assert customer.sources.total_count
if plan != PaymentPlan.FREE and not customer.sources.total_count:
# No payment info.
raise ApiException(
requests.codes.payment_required,
"Payment information required to upgrade to %r" % plan.value
)
subscription = customer.subscriptions.data[0]
subscription.plan = plan.value
try:
subscription.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.server_error, str(ex))
return dict(
plan=plan.value
)
@app.route('/api/payments/update_payment', methods=['POST'])
@api()
@as_json
def payments_update_payment():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
stripe_token = request.values.get('token')
if not stripe_token:
raise ApiException(requests.codes.bad_request, "Missing token")
if TEAM_ID and not g.auth.is_admin:
raise ApiException(requests.codes.forbidden, "Only the admin can update payment info")
customer = _get_or_create_customer()
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
return dict()
@app.route('/api/invite/', methods=['GET'])
@api(require_login=False)
@as_json
def invitation_user_list():
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email.lower())
.join(Invitation.package)
.all()
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite, package in invitations])
@app.route('/api/invite/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def invitation_package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
invitations = (
Invitation.query
.filter_by(package_id=package.id)
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite in invitations])
@app.route('/api/log', methods=['POST'])
@api(require_login=False, schema=LOG_SCHEMA)
@as_json
def client_log():
data = request.get_json()
for event in data:
_mp_track(**event)
return dict()
@app.route('/api/users/list', methods=['GET'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def list_users():
users = User.query.all()
results = [{
'username': user.name,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'date_joined': user.date_joined,
'last_login': user.last_login,
'is_superuser': user.is_admin,
'is_active': user.is_active
} for user in users]
return {
'results': results
}
@app.route('/api/users/list_detailed', methods=['GET'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def list_users_detailed():
package_counts_query = (
db.session.query(Package.owner, sa.func.count(Package.owner))
.group_by(Package.owner)
)
package_counts = dict(package_counts_query)
events = (
db.session.query(Event.user, Event.type, sa.func.count(Event.type))
.group_by(Event.user, Event.type)
)
event_results = defaultdict(int)
for event_user, event_type, event_count in events:
event_results[(event_user, event_type)] = event_count
users = User.query.all()
results = {
user.name : {
'packages' : package_counts.get(user.name, 0),
'installs' : event_results[(user.name, Event.Type.INSTALL)],
'previews' : event_results[(user.name, Event.Type.PREVIEW)],
'pushes' : event_results[(user.name, Event.Type.PUSH)],
'deletes' : event_results[(user.name, Event.Type.DELETE)],
'status' : 'active' if user.is_active else 'disabled',
'last_seen' : user.last_login,
'is_admin' : user.is_admin
}
for user in users
}
return {'users' : results}
@app.route('/api/users/create', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True, schema=USERNAME_EMAIL_SCHEMA)
@as_json
def create_user():
try:
data = request.get_json()
username = data['username']
_validate_username(username)
email = data['email']
_create_user(username=username, email=email, requires_reset=True, requires_activation=False)
db.session.commit()
return {}
except ValidationException as ex:
raise ApiException(requests.codes.bad, ex.message)
except ConflictException as ex:
raise ApiException(requests.codes.conflict, ex.message)
@app.route('/api/users/disable', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True, schema=USERNAME_SCHEMA)
@as_json
def disable_user():
try:
data = request.get_json()
username = data['username']
if g.auth.user == username:
raise ApiException(requests.codes.forbidden, "Can't disable yourself")
user = User.query.filter_by(name=username).with_for_update().one_or_none()
_disable_user(user)
db.session.commit()
return {}
except NotFoundException as ex:
raise ApiException(requests.codes.not_found, ex.message)
@app.route('/api/users/enable', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True, schema=USERNAME_SCHEMA)
@as_json
def enable_user():
try:
data = request.get_json()
username = data['username']
user = User.query.filter_by(name=username).with_for_update().one_or_none()
_enable_user(user)
db.session.commit()
return {}
except NotFoundException as ex:
raise ApiException(requests.codes.not_found, ex.message)
# This endpoint is disabled pending a rework of authentication
@app.route('/api/users/delete', methods=['POST'])
@api(enabled=False, require_admin=True, schema=USERNAME_SCHEMA)
@as_json
def delete_user():
try:
data = request.get_json()
username = data['username']
user = User.query.filter_by(name=username).with_for_update().one_or_none()
_delete_user(user)
db.session.commit()
return {}
except NotFoundException as ex:
raise ApiException(requests.codes.not_found, ex.message)
@app.route('/api/audit/<owner>/<package_name>/')
@api(require_admin=True)
@as_json
def audit_package(owner, package_name):
events = (
Event.query
.filter_by(package_owner=owner, package_name=package_name)
)
return dict(
events=[dict(
created=event.created.timestamp(),
user=event.user,
type=Event.Type(event.type).name,
package_owner=event.package_owner,
package_name=event.package_name,
package_hash=event.package_hash,
extra=event.extra,
) for event in events]
)
@app.route('/api/audit/<user>/')
@api(require_admin=True)
@as_json
def audit_user(user):
events = (
Event.query
.filter_by(user=user)
)
return dict(
events=[dict(
created=event.created.timestamp(),
user=event.user,
type=Event.Type(event.type).name,
package_owner=event.package_owner,
package_name=event.package_name,
package_hash=event.package_hash,
extra=event.extra,
) for event in events]
)
@app.route('/api/admin/package_summary')
@api(require_admin=True)
@as_json
def package_summary():
events = (
db.session.query(Event.package_owner, Event.package_name, Event.type,
sa.func.count(Event.type), sa.func.max(Event.created))
.group_by(Event.package_owner, Event.package_name, Event.type)
)
event_results = defaultdict(lambda: {'count':0})
packages = set()
for event_owner, event_package, event_type, event_count, latest in events:
package = "{owner}/{pkg}".format(owner=event_owner, pkg=event_package)
event_results[(package, event_type)] = {'latest':latest.timestamp(), 'count':event_count}
packages.add(package)
results = {
package : {
'installs' : event_results[(package, Event.Type.INSTALL)],
'previews' : event_results[(package, Event.Type.PREVIEW)],
'pushes' : event_results[(package, Event.Type.PUSH)],
'deletes' : event_results[(package, Event.Type.DELETE)]
} for package in packages
}
return {'packages' : results}
@app.route('/api/users/reset_password', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True, schema=USERNAME_SCHEMA)
@as_json
def admin_reset_password():
data = request.get_json()
username = data['username']
user = User.query.filter_by(name=username).with_for_update().one_or_none()
if not user:
raise ApiException(requests.codes.not_found, "User not found.")
reset_password(user, set_unusable=True)
db.session.commit()
return {}
def _comment_dict(comment):
# JSON/JavaScript is not very good with large integers, so let's use strings to be safe.
str_id = '%016x' % comment.id
return dict(
id=str_id,
author=comment.author,
created=comment.created.timestamp(),
contents=comment.contents
)
@app.route('/api/comments/<owner>/<package_name>/', methods=['POST'])
@api()
@as_json
def comments_post(owner, package_name):
package = _get_package(g.auth, owner, package_name)
contents = request.get_json()['contents']
comment = Comment(package=package, author=g.auth.user, contents=contents)
db.session.add(comment)
db.session.commit()
# We disable automatic object expiration on commit, so refresh it manually.
db.session.refresh(comment)
return dict(comment=_comment_dict(comment))
@app.route('/api/comments/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def comments_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
comments = Comment.query.filter_by(package=package).order_by(Comment.created)
return dict(comments=map(_comment_dict, comments))
| 1 | 16,870 | I'd call it `g.auth_token`, just to make it more clear | quiltdata-quilt | py |
@@ -2,6 +2,9 @@ package protocol
import "time"
+// DesiredReceiveBufferSize is the kernel UDP receive buffer size that we'd like to use.
+const DesiredReceiveBufferSize = (1 << 20) * 2 // 2 MB
+
// MaxPacketSizeIPv4 is the maximum packet size that we use for sending IPv4 packets.
const MaxPacketSizeIPv4 = 1252
| 1 | package protocol
import "time"
// MaxPacketSizeIPv4 is the maximum packet size that we use for sending IPv4 packets.
const MaxPacketSizeIPv4 = 1252
// MaxPacketSizeIPv6 is the maximum packet size that we use for sending IPv6 packets.
const MaxPacketSizeIPv6 = 1232
// MaxCongestionWindowPackets is the maximum congestion window in packet.
const MaxCongestionWindowPackets = 10000
// MaxUndecryptablePackets limits the number of undecryptable packets that are queued in the session.
const MaxUndecryptablePackets = 33
// ConnectionFlowControlMultiplier determines how much larger the connection flow control windows needs to be relative to any stream's flow control window
// This is the value that Chromium is using
const ConnectionFlowControlMultiplier = 1.5
// InitialMaxStreamData is the stream-level flow control window for receiving data
const InitialMaxStreamData = (1 << 10) * 512 // 512 kb
// InitialMaxData is the connection-level flow control window for receiving data
const InitialMaxData = ConnectionFlowControlMultiplier * InitialMaxStreamData
// DefaultMaxReceiveStreamFlowControlWindow is the default maximum stream-level flow control window for receiving data, for the server
const DefaultMaxReceiveStreamFlowControlWindow = 6 * (1 << 20) // 6 MB
// DefaultMaxReceiveConnectionFlowControlWindow is the default connection-level flow control window for receiving data, for the server
const DefaultMaxReceiveConnectionFlowControlWindow = 15 * (1 << 20) // 12 MB
// WindowUpdateThreshold is the fraction of the receive window that has to be consumed before an higher offset is advertised to the client
const WindowUpdateThreshold = 0.25
// DefaultMaxIncomingStreams is the maximum number of streams that a peer may open
const DefaultMaxIncomingStreams = 100
// DefaultMaxIncomingUniStreams is the maximum number of unidirectional streams that a peer may open
const DefaultMaxIncomingUniStreams = 100
// MaxServerUnprocessedPackets is the max number of packets stored in the server that are not yet processed.
const MaxServerUnprocessedPackets = 1024
// MaxSessionUnprocessedPackets is the max number of packets stored in each session that are not yet processed.
const MaxSessionUnprocessedPackets = 256
// SkipPacketAveragePeriodLength is the average period length in which one packet number is skipped to prevent an Optimistic ACK attack
const SkipPacketAveragePeriodLength PacketNumber = 500
// MaxAcceptQueueSize is the maximum number of sessions that the server queues for accepting.
// If the queue is full, new connection attempts will be rejected.
const MaxAcceptQueueSize = 32
// TokenValidity is the duration that a (non-retry) token is considered valid
const TokenValidity = 24 * time.Hour
// RetryTokenValidity is the duration that a retry token is considered valid
const RetryTokenValidity = 10 * time.Second
// MaxOutstandingSentPackets is maximum number of packets saved for retransmission.
// When reached, it imposes a soft limit on sending new packets:
// Sending ACKs and retransmission is still allowed, but now new regular packets can be sent.
const MaxOutstandingSentPackets = 2 * MaxCongestionWindowPackets
// MaxTrackedSentPackets is maximum number of sent packets saved for retransmission.
// When reached, no more packets will be sent.
// This value *must* be larger than MaxOutstandingSentPackets.
const MaxTrackedSentPackets = MaxOutstandingSentPackets * 5 / 4
// MaxNonAckElicitingAcks is the maximum number of packets containing an ACK,
// but no ack-eliciting frames, that we send in a row
const MaxNonAckElicitingAcks = 19
// MaxStreamFrameSorterGaps is the maximum number of gaps between received StreamFrames
// prevents DoS attacks against the streamFrameSorter
const MaxStreamFrameSorterGaps = 1000
// MinStreamFrameBufferSize is the minimum data length of a received STREAM frame
// that we use the buffer for. This protects against a DoS where an attacker would send us
// very small STREAM frames to consume a lot of memory.
const MinStreamFrameBufferSize = 128
// MinCoalescedPacketSize is the minimum size of a coalesced packet that we pack.
// If a packet has less than this number of bytes, we won't coalesce any more packets onto it.
const MinCoalescedPacketSize = 128
// MaxCryptoStreamOffset is the maximum offset allowed on any of the crypto streams.
// This limits the size of the ClientHello and Certificates that can be received.
const MaxCryptoStreamOffset = 16 * (1 << 10)
// MinRemoteIdleTimeout is the minimum value that we accept for the remote idle timeout
const MinRemoteIdleTimeout = 5 * time.Second
// DefaultIdleTimeout is the default idle timeout
const DefaultIdleTimeout = 30 * time.Second
// DefaultHandshakeTimeout is the default timeout for a connection until the crypto handshake succeeds.
const DefaultHandshakeTimeout = 10 * time.Second
// MaxKeepAliveInterval is the maximum time until we send a packet to keep a connection alive.
// It should be shorter than the time that NATs clear their mapping.
const MaxKeepAliveInterval = 20 * time.Second
// RetiredConnectionIDDeleteTimeout is the time we keep closed sessions around in order to retransmit the CONNECTION_CLOSE.
// after this time all information about the old connection will be deleted
const RetiredConnectionIDDeleteTimeout = 5 * time.Second
// MinStreamFrameSize is the minimum size that has to be left in a packet, so that we add another STREAM frame.
// This avoids splitting up STREAM frames into small pieces, which has 2 advantages:
// 1. it reduces the framing overhead
// 2. it reduces the head-of-line blocking, when a packet is lost
const MinStreamFrameSize ByteCount = 128
// MaxPostHandshakeCryptoFrameSize is the maximum size of CRYPTO frames
// we send after the handshake completes.
const MaxPostHandshakeCryptoFrameSize = 1000
// MaxAckFrameSize is the maximum size for an ACK frame that we write
// Due to the varint encoding, ACK frames can grow (almost) indefinitely large.
// The MaxAckFrameSize should be large enough to encode many ACK range,
// but must ensure that a maximum size ACK frame fits into one packet.
const MaxAckFrameSize ByteCount = 1000
// MaxNumAckRanges is the maximum number of ACK ranges that we send in an ACK frame.
// It also serves as a limit for the packet history.
// If at any point we keep track of more ranges, old ranges are discarded.
const MaxNumAckRanges = 500
// MinPacingDelay is the minimum duration that is used for packet pacing
// If the packet packing frequency is higher, multiple packets might be sent at once.
// Example: For a packet pacing delay of 200μs, we would send 5 packets at once, wait for 1ms, and so forth.
const MinPacingDelay = time.Millisecond
// DefaultConnectionIDLength is the connection ID length that is used for multiplexed connections
// if no other value is configured.
const DefaultConnectionIDLength = 4
// MaxActiveConnectionIDs is the number of connection IDs that we're storing.
const MaxActiveConnectionIDs = 4
// MaxIssuedConnectionIDs is the maximum number of connection IDs that we're issuing at the same time.
const MaxIssuedConnectionIDs = 6
// PacketsPerConnectionID is the number of packets we send using one connection ID.
// If the peer provices us with enough new connection IDs, we switch to a new connection ID.
const PacketsPerConnectionID = 10000
// AckDelayExponent is the ack delay exponent used when sending ACKs.
const AckDelayExponent = 3
// Estimated timer granularity.
// The loss detection timer will not be set to a value smaller than granularity.
const TimerGranularity = time.Millisecond
// MaxAckDelay is the maximum time by which we delay sending ACKs.
const MaxAckDelay = 25 * time.Millisecond
// MaxAckDelayInclGranularity is the max_ack_delay including the timer granularity.
// This is the value that should be advertised to the peer.
const MaxAckDelayInclGranularity = MaxAckDelay + TimerGranularity
// KeyUpdateInterval is the maximum number of packets we send or receive before initiating a key update.
const KeyUpdateInterval = 100 * 1000
// Max0RTTQueueingDuration is the maximum time that we store 0-RTT packets in order to wait for the corresponding Initial to be received.
const Max0RTTQueueingDuration = 100 * time.Millisecond
// Max0RTTQueues is the maximum number of connections that we buffer 0-RTT packets for.
const Max0RTTQueues = 32
// Max0RTTQueueLen is the maximum number of 0-RTT packets that we buffer for each connection.
// When a new session is created, all buffered packets are passed to the session immediately.
// To avoid blocking, this value has to be smaller than MaxSessionUnprocessedPackets.
// To avoid packets being dropped as undecryptable by the session, this value has to be smaller than MaxUndecryptablePackets.
const Max0RTTQueueLen = 32
| 1 | 9,280 | Not sure if this is what we really need, we might need to adjust this constant after more extensive benchmarking. | lucas-clemente-quic-go | go |
@@ -183,7 +183,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Patch("/hooks/:id:int", bind(api.EditHookOption{}), repo.EditHook)
m.Get("/raw/*", middleware.RepoRef(), repo.GetRawFile)
m.Get("/archive/*", repo.GetArchive)
-
+ m.Group("/branches", func() {
+ m.Get("",repo.ListBranches)
+ m.Get("/:id",repo.GetBranch)
+ })
m.Group("/keys", func() {
m.Combo("").Get(repo.ListDeployKeys).
Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey) | 1 | // Copyright 2015 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package v1
import (
"strings"
"github.com/go-macaron/binding"
"gopkg.in/macaron.v1"
api "github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/middleware"
"github.com/gogits/gogs/routers/api/v1/admin"
"github.com/gogits/gogs/routers/api/v1/misc"
"github.com/gogits/gogs/routers/api/v1/org"
"github.com/gogits/gogs/routers/api/v1/repo"
"github.com/gogits/gogs/routers/api/v1/user"
)
func RepoAssignment() macaron.Handler {
return func(ctx *middleware.Context) {
userName := ctx.Params(":username")
repoName := ctx.Params(":reponame")
var (
owner *models.User
err error
)
// Check if the user is the same as the repository owner.
if ctx.IsSigned && ctx.User.LowerName == strings.ToLower(userName) {
owner = ctx.User
} else {
owner, err = models.GetUserByName(userName)
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Error(404)
} else {
ctx.APIError(500, "GetUserByName", err)
}
return
}
}
ctx.Repo.Owner = owner
// Get repository.
repo, err := models.GetRepositoryByName(owner.Id, repoName)
if err != nil {
if models.IsErrRepoNotExist(err) {
ctx.Error(404)
} else {
ctx.APIError(500, "GetRepositoryByName", err)
}
return
} else if err = repo.GetOwner(); err != nil {
ctx.APIError(500, "GetOwner", err)
return
}
mode, err := models.AccessLevel(ctx.User, repo)
if err != nil {
ctx.APIError(500, "AccessLevel", err)
return
}
ctx.Repo.AccessMode = mode
// Check access.
if ctx.Repo.AccessMode == models.ACCESS_MODE_NONE {
ctx.Error(404)
return
}
ctx.Repo.Repository = repo
}
}
// Contexter middleware already checks token for user sign in process.
func ReqToken() macaron.Handler {
return func(ctx *middleware.Context) {
if !ctx.IsSigned {
ctx.Error(401)
return
}
}
}
func ReqBasicAuth() macaron.Handler {
return func(ctx *middleware.Context) {
if !ctx.IsBasicAuth {
ctx.Error(401)
return
}
}
}
func ReqAdmin() macaron.Handler {
return func(ctx *middleware.Context) {
if !ctx.User.IsAdmin {
ctx.Error(403)
return
}
}
}
// RegisterRoutes registers all v1 APIs routes to web application.
// FIXME: custom form error response
func RegisterRoutes(m *macaron.Macaron) {
bind := binding.Bind
m.Group("/v1", func() {
// Miscellaneous
m.Post("/markdown", bind(api.MarkdownOption{}), misc.Markdown)
m.Post("/markdown/raw", misc.MarkdownRaw)
// Users
m.Group("/users", func() {
m.Get("/search", user.Search)
m.Group("/:username", func() {
m.Get("", user.GetInfo)
m.Group("/tokens", func() {
m.Combo("").Get(user.ListAccessTokens).
Post(bind(api.CreateAccessTokenOption{}), user.CreateAccessToken)
}, ReqBasicAuth())
})
})
m.Group("/users", func() {
m.Group("/:username", func() {
m.Get("/keys", user.ListPublicKeys)
m.Get("/followers", user.ListFollowers)
m.Group("/following", func() {
m.Get("", user.ListFollowing)
m.Get("/:target", user.CheckFollowing)
})
})
}, ReqToken())
m.Group("/user", func() {
m.Combo("/emails").Get(user.ListEmails).
Post(bind(api.CreateEmailOption{}), user.AddEmail).
Delete(bind(api.CreateEmailOption{}), user.DeleteEmail)
m.Get("/followers", user.ListMyFollowers)
m.Group("/following", func() {
m.Get("", user.ListMyFollowing)
m.Combo("/:username").Get(user.CheckMyFollowing).Put(user.Follow).Delete(user.Unfollow)
})
m.Group("/keys", func() {
m.Combo("").Get(user.ListMyPublicKeys).
Post(bind(api.CreateKeyOption{}), user.CreatePublicKey)
m.Combo("/:id").Get(user.GetPublicKey).
Delete(user.DeletePublicKey)
})
}, ReqToken())
// Repositories
m.Combo("/user/repos", ReqToken()).Get(repo.ListMyRepos).
Post(bind(api.CreateRepoOption{}), repo.Create)
m.Post("/org/:org/repos", ReqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo)
m.Group("/repos", func() {
m.Get("/search", repo.Search)
})
m.Group("/repos", func() {
m.Post("/migrate", bind(auth.MigrateRepoForm{}), repo.Migrate)
m.Combo("/:username/:reponame").Get(repo.Get).
Delete(repo.Delete)
m.Group("/:username/:reponame", func() {
m.Combo("/hooks").Get(repo.ListHooks).
Post(bind(api.CreateHookOption{}), repo.CreateHook)
m.Patch("/hooks/:id:int", bind(api.EditHookOption{}), repo.EditHook)
m.Get("/raw/*", middleware.RepoRef(), repo.GetRawFile)
m.Get("/archive/*", repo.GetArchive)
m.Group("/keys", func() {
m.Combo("").Get(repo.ListDeployKeys).
Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey)
m.Combo("/:id").Get(repo.GetDeployKey).
Delete(repo.DeleteDeploykey)
})
}, RepoAssignment())
}, ReqToken())
// Organizations
m.Get("/user/orgs", ReqToken(), org.ListMyOrgs)
m.Get("/users/:username/orgs", org.ListUserOrgs)
m.Combo("/orgs/:orgname").Get(org.Get).Patch(bind(api.EditOrgOption{}), org.Edit)
m.Any("/*", func(ctx *middleware.Context) {
ctx.Error(404)
})
m.Group("/admin", func() {
m.Group("/users", func() {
m.Post("", bind(api.CreateUserOption{}), admin.CreateUser)
m.Group("/:username", func() {
m.Combo("").Patch(bind(api.EditUserOption{}), admin.EditUser).
Delete(admin.DeleteUser)
m.Post("/keys", bind(api.CreateKeyOption{}), admin.CreatePublicKey)
m.Post("/orgs", bind(api.CreateOrgOption{}), admin.CreateOrg)
m.Post("/repos", bind(api.CreateRepoOption{}), admin.CreateRepo)
})
})
}, ReqAdmin())
})
}
| 1 | 10,032 | Shouldn't we use `:branchname` here? Or just `:branch`? | gogs-gogs | go |
@@ -930,7 +930,7 @@ class RandUUID(RandField):
# Automatic timestamp
-class AutoTime(VolatileValue):
+class AutoTime(RandNum):
def __init__(self, base=None):
if base is None:
self.diff = 0 | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# Copyright (C) Michael Farrell <[email protected]>
# Copyright (C) Gauthier Sebaux
# This program is published under a GPLv2 license
"""
Fields that hold random numbers.
"""
from __future__ import absolute_import
import copy
import random
import time
import math
import re
import uuid
import struct
from scapy.base_classes import Net
from scapy.compat import bytes_encode, chb, plain_str
from scapy.utils import corrupt_bits, corrupt_bytes
from scapy.modules.six.moves import range
####################
# Random numbers #
####################
class RandomEnumeration:
"""iterate through a sequence in random order.
When all the values have been drawn, if forever=1, the drawing is done again. # noqa: E501
If renewkeys=0, the draw will be in the same order, guaranteeing that the same # noqa: E501
number will be drawn in not less than the number of integers of the sequence""" # noqa: E501
def __init__(self, inf, sup, seed=None, forever=1, renewkeys=0):
self.forever = forever
self.renewkeys = renewkeys
self.inf = inf
self.rnd = random.Random(seed)
self.sbox_size = 256
self.top = sup - inf + 1
n = 0
while (1 << n) < self.top:
n += 1
self.n = n
self.fs = min(3, (n + 1) // 2)
self.fsmask = 2**self.fs - 1
self.rounds = max(self.n, 3)
self.turns = 0
self.i = 0
def __iter__(self):
return self
def next(self):
while True:
if self.turns == 0 or (self.i == 0 and self.renewkeys):
self.cnt_key = self.rnd.randint(0, 2**self.n - 1)
self.sbox = [self.rnd.randint(0, self.fsmask)
for _ in range(self.sbox_size)]
self.turns += 1
while self.i < 2**self.n:
ct = self.i ^ self.cnt_key
self.i += 1
for _ in range(self.rounds): # Unbalanced Feistel Network
lsb = ct & self.fsmask
ct >>= self.fs
lsb ^= self.sbox[ct % self.sbox_size]
ct |= lsb << (self.n - self.fs)
if ct < self.top:
return self.inf + ct
self.i = 0
if not self.forever:
raise StopIteration
__next__ = next
class VolatileValue(object):
def __repr__(self):
return "<%s>" % self.__class__.__name__
def __eq__(self, other):
x = self._fix()
y = other._fix() if isinstance(other, VolatileValue) else other
if not isinstance(x, type(y)):
return False
return x == y
def __getattr__(self, attr):
if attr in ["__setstate__", "__getstate__"]:
raise AttributeError(attr)
return getattr(self._fix(), attr)
def __str__(self):
return str(self._fix())
def __bytes__(self):
return bytes_encode(self._fix())
def __len__(self):
return len(self._fix())
def copy(self):
return copy.copy(self)
def _fix(self):
return None
class RandField(VolatileValue):
pass
class RandNum(RandField):
"""Instances evaluate to random integers in selected range"""
min = 0
max = 0
def __init__(self, min, max):
self.min = min
self.max = max
def _fix(self):
return random.randrange(self.min, self.max + 1)
def __int__(self):
return int(self._fix())
def __index__(self):
return int(self)
def __nonzero__(self):
return bool(self._fix())
__bool__ = __nonzero__
def __add__(self, other):
return self._fix() + other
def __radd__(self, other):
return other + self._fix()
def __sub__(self, other):
return self._fix() - other
def __rsub__(self, other):
return other - self._fix()
def __mul__(self, other):
return self._fix() * other
def __rmul__(self, other):
return other * self._fix()
def __floordiv__(self, other):
return self._fix() / other
__div__ = __floordiv__
def __lt__(self, other):
return self._fix() < other
def __le__(self, other):
return self._fix() <= other
def __eq__(self, other):
return self._fix() == other
def __ne__(self, other):
return self._fix() != other
def __ge__(self, other):
return self._fix() >= other
def __gt__(self, other):
return self._fix() > other
def __lshift__(self, other):
return self._fix() << other
def __rshift__(self, other):
return self._fix() >> other
def __and__(self, other):
return self._fix() & other
def __rand__(self, other):
return other & self._fix()
def __or__(self, other):
return self._fix() | other
def __ror__(self, other):
return other | self._fix()
class RandFloat(RandNum):
def _fix(self):
return random.uniform(self.min, self.max)
class RandBinFloat(RandNum):
def _fix(self):
return struct.unpack("!f", bytes(RandBin(4)))[0]
class RandNumGamma(RandNum):
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def _fix(self):
return int(round(random.gammavariate(self.alpha, self.beta)))
class RandNumGauss(RandNum):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def _fix(self):
return int(round(random.gauss(self.mu, self.sigma)))
class RandNumExpo(RandNum):
def __init__(self, lambd, base=0):
self.lambd = lambd
self.base = base
def _fix(self):
return self.base + int(round(random.expovariate(self.lambd)))
class RandEnum(RandNum):
"""Instances evaluate to integer sampling without replacement from the given interval""" # noqa: E501
def __init__(self, min, max, seed=None):
self.seq = RandomEnumeration(min, max, seed)
def _fix(self):
return next(self.seq)
class RandByte(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**8 - 1)
class RandSByte(RandNum):
def __init__(self):
RandNum.__init__(self, -2**7, 2**7 - 1)
class RandShort(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**16 - 1)
class RandSShort(RandNum):
def __init__(self):
RandNum.__init__(self, -2**15, 2**15 - 1)
class RandInt(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**32 - 1)
class RandSInt(RandNum):
def __init__(self):
RandNum.__init__(self, -2**31, 2**31 - 1)
class RandLong(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**64 - 1)
class RandSLong(RandNum):
def __init__(self):
RandNum.__init__(self, -2**63, 2**63 - 1)
class RandEnumByte(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**8 - 1)
class RandEnumSByte(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**7, 2**7 - 1)
class RandEnumShort(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**16 - 1)
class RandEnumSShort(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**15, 2**15 - 1)
class RandEnumInt(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**32 - 1)
class RandEnumSInt(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**31, 2**31 - 1)
class RandEnumLong(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**64 - 1)
class RandEnumSLong(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**63, 2**63 - 1)
class RandEnumKeys(RandEnum):
"""Picks a random value from dict keys list. """
def __init__(self, enum, seed=None):
self.enum = list(enum)
self.seq = RandomEnumeration(0, len(self.enum) - 1, seed)
def _fix(self):
return self.enum[next(self.seq)]
class RandChoice(RandField):
def __init__(self, *args):
if not args:
raise TypeError("RandChoice needs at least one choice")
self._choice = args
def _fix(self):
return random.choice(self._choice)
class RandString(RandField):
def __init__(self, size=None, chars=b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"): # noqa: E501
if size is None:
size = RandNumExpo(0.01)
self.size = size
self.chars = chars
def _fix(self):
s = b""
for _ in range(self.size):
rdm_chr = random.choice(self.chars)
s += rdm_chr if isinstance(rdm_chr, str) else chb(rdm_chr)
return s
def __str__(self):
return plain_str(self._fix())
def __bytes__(self):
return bytes_encode(self._fix())
def __mul__(self, n):
return self._fix() * n
class RandBin(RandString):
def __init__(self, size=None):
super(RandBin, self).__init__(size=size, chars=b"".join(chb(c) for c in range(256))) # noqa: E501
class RandTermString(RandBin):
def __init__(self, size, term):
self.term = bytes_encode(term)
super(RandTermString, self).__init__(size=size)
def _fix(self):
return RandBin._fix(self) + self.term
class RandIP(RandString):
def __init__(self, iptemplate="0.0.0.0/0"):
self.ip = Net(iptemplate)
def _fix(self):
return self.ip.choice()
class RandMAC(RandString):
def __init__(self, template="*"):
template += ":*:*:*:*:*"
template = template.split(":")
self.mac = ()
for i in range(6):
if template[i] == "*":
v = RandByte()
elif "-" in template[i]:
x, y = template[i].split("-")
v = RandNum(int(x, 16), int(y, 16))
else:
v = int(template[i], 16)
self.mac += (v,)
def _fix(self):
return "%02x:%02x:%02x:%02x:%02x:%02x" % self.mac
class RandIP6(RandString):
def __init__(self, ip6template="**"):
self.tmpl = ip6template
self.sp = self.tmpl.split(":")
for i, v in enumerate(self.sp):
if not v or v == "**":
continue
if "-" in v:
a, b = v.split("-")
elif v == "*":
a = b = ""
else:
a = b = v
if not a:
a = "0"
if not b:
b = "ffff"
if a == b:
self.sp[i] = int(a, 16)
else:
self.sp[i] = RandNum(int(a, 16), int(b, 16))
self.variable = "" in self.sp
self.multi = self.sp.count("**")
def _fix(self):
nbm = self.multi
ip = []
for i, n in enumerate(self.sp):
if n == "**":
nbm -= 1
remain = 8 - (len(self.sp) - i - 1) - len(ip) + nbm
if "" in self.sp:
remain += 1
if nbm or self.variable:
remain = random.randint(0, remain)
for j in range(remain):
ip.append("%04x" % random.randint(0, 65535))
elif isinstance(n, RandNum):
ip.append("%04x" % n)
elif n == 0:
ip.append("0")
elif not n:
ip.append("")
else:
ip.append("%04x" % n)
if len(ip) == 9:
ip.remove("")
if ip[-1] == "":
ip[-1] = "0"
return ":".join(ip)
class RandOID(RandString):
def __init__(self, fmt=None, depth=RandNumExpo(0.1), idnum=RandNumExpo(0.01)): # noqa: E501
self.ori_fmt = fmt
if fmt is not None:
fmt = fmt.split(".")
for i in range(len(fmt)):
if "-" in fmt[i]:
fmt[i] = tuple(map(int, fmt[i].split("-")))
self.fmt = fmt
self.depth = depth
self.idnum = idnum
def __repr__(self):
if self.ori_fmt is None:
return "<%s>" % self.__class__.__name__
else:
return "<%s [%s]>" % (self.__class__.__name__, self.ori_fmt)
def _fix(self):
if self.fmt is None:
return ".".join(str(self.idnum) for _ in range(1 + self.depth))
else:
oid = []
for i in self.fmt:
if i == "*":
oid.append(str(self.idnum))
elif i == "**":
oid += [str(self.idnum) for i in range(1 + self.depth)]
elif isinstance(i, tuple):
oid.append(str(random.randrange(*i)))
else:
oid.append(i)
return ".".join(oid)
class RandRegExp(RandField):
def __init__(self, regexp, lambda_=0.3,):
self._regexp = regexp
self._lambda = lambda_
@staticmethod
def choice_expand(s): # XXX does not support special sets like (ex ':alnum:') # noqa: E501
m = ""
invert = s and s[0] == "^"
while True:
p = s.find("-")
if p < 0:
break
if p == 0 or p == len(s) - 1:
m = "-"
if p:
s = s[:-1]
else:
s = s[1:]
else:
c1 = s[p - 1]
c2 = s[p + 1]
rng = "".join(map(chr, range(ord(c1), ord(c2) + 1)))
s = s[:p - 1] + rng + s[p + 1:]
res = m + s
if invert:
res = "".join(chr(x) for x in range(256) if chr(x) not in res)
return res
@staticmethod
def stack_fix(lst, index):
r = ""
mul = 1
for e in lst:
if isinstance(e, list):
if mul != 1:
mul = mul - 1
r += RandRegExp.stack_fix(e[1:] * mul, index)
# only the last iteration should be kept for back reference
f = RandRegExp.stack_fix(e[1:], index)
for i, idx in enumerate(index):
if e is idx:
index[i] = f
r += f
mul = 1
elif isinstance(e, tuple):
kind, val = e
if kind == "cite":
r += index[val - 1]
elif kind == "repeat":
mul = val
elif kind == "choice":
if mul == 1:
c = random.choice(val)
r += RandRegExp.stack_fix(c[1:], index)
else:
r += RandRegExp.stack_fix([e] * mul, index)
mul = 1
else:
if mul != 1:
r += RandRegExp.stack_fix([e] * mul, index)
mul = 1
else:
r += str(e)
return r
def _fix(self):
stack = [None]
index = []
current = stack
i = 0
ln = len(self._regexp)
interp = True
while i < ln:
c = self._regexp[i]
i += 1
if c == '(':
current = [current]
current[0].append(current)
elif c == '|':
p = current[0]
ch = p[-1]
if not isinstance(ch, tuple):
ch = ("choice", [current])
p[-1] = ch
else:
ch[1].append(current)
current = [p]
elif c == ')':
ch = current[0][-1]
if isinstance(ch, tuple):
ch[1].append(current)
index.append(current)
current = current[0]
elif c == '[' or c == '{':
current = [current]
current[0].append(current)
interp = False
elif c == ']':
current = current[0]
choice = RandRegExp.choice_expand("".join(current.pop()[1:]))
current.append(RandChoice(*list(choice)))
interp = True
elif c == '}':
current = current[0]
num = "".join(current.pop()[1:])
e = current.pop()
if "," not in num:
n = int(num)
current.append([current] + [e] * n)
else:
num_min, num_max = num.split(",")
if not num_min:
num_min = "0"
if num_max:
n = RandNum(int(num_min), int(num_max))
else:
n = RandNumExpo(self._lambda, base=int(num_min))
current.append(("repeat", n))
current.append(e)
interp = True
elif c == '\\':
c = self._regexp[i]
if c == "s":
c = RandChoice(" ", "\t")
elif c in "0123456789":
c = ("cite", ord(c) - 0x30)
current.append(c)
i += 1
elif not interp:
current.append(c)
elif c == '+':
e = current.pop()
current.append([current] + [e] * (int(random.expovariate(self._lambda)) + 1)) # noqa: E501
elif c == '*':
e = current.pop()
current.append([current] + [e] * int(random.expovariate(self._lambda))) # noqa: E501
elif c == '?':
if random.randint(0, 1):
current.pop()
elif c == '.':
current.append(RandChoice(*[chr(x) for x in range(256)]))
elif c == '$' or c == '^':
pass
else:
current.append(c)
return RandRegExp.stack_fix(stack[1:], index)
def __repr__(self):
return "<%s [%r]>" % (self.__class__.__name__, self._regexp)
class RandSingularity(RandChoice):
pass
class RandSingNum(RandSingularity):
@staticmethod
def make_power_of_two(end):
sign = 1
if end == 0:
end = 1
if end < 0:
end = -end
sign = -1
end_n = int(math.log(end) / math.log(2)) + 1
return {sign * 2**i for i in range(end_n)}
def __init__(self, mn, mx):
sing = {0, mn, mx, int((mn + mx) / 2)}
sing |= self.make_power_of_two(mn)
sing |= self.make_power_of_two(mx)
for i in sing.copy():
sing.add(i + 1)
sing.add(i - 1)
for i in sing.copy():
if not mn <= i <= mx:
sing.remove(i)
self._choice = list(sing)
self._choice.sort()
class RandSingByte(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**8 - 1)
class RandSingSByte(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**7, 2**7 - 1)
class RandSingShort(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**16 - 1)
class RandSingSShort(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**15, 2**15 - 1)
class RandSingInt(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**32 - 1)
class RandSingSInt(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**31, 2**31 - 1)
class RandSingLong(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**64 - 1)
class RandSingSLong(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**63, 2**63 - 1)
class RandSingString(RandSingularity):
def __init__(self):
self._choice = ["",
"%x",
"%%",
"%s",
"%i",
"%n",
"%x%x%x%x%x%x%x%x%x",
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
"%",
"%%%",
"A" * 4096,
b"\x00" * 4096,
b"\xff" * 4096,
b"\x7f" * 4096,
b"\x80" * 4096,
" " * 4096,
"\\" * 4096,
"(" * 4096,
"../" * 1024,
"/" * 1024,
"${HOME}" * 512,
" or 1=1 --",
"' or 1=1 --",
'" or 1=1 --',
" or 1=1; #",
"' or 1=1; #",
'" or 1=1; #',
";reboot;",
"$(reboot)",
"`reboot`",
"index.php%00",
b"\x00",
"%00",
"\\",
"../../../../../../../../../../../../../../../../../etc/passwd", # noqa: E501
"%2e%2e%2f" * 20 + "etc/passwd",
"%252e%252e%252f" * 20 + "boot.ini",
"..%c0%af" * 20 + "etc/passwd",
"..%c0%af" * 20 + "boot.ini",
"//etc/passwd",
r"..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\boot.ini", # noqa: E501
"AUX:",
"CLOCK$",
"COM:",
"CON:",
"LPT:",
"LST:",
"NUL:",
"CON:",
r"C:\CON\CON",
r"C:\boot.ini",
r"\\myserver\share",
"foo.exe:",
"foo.exe\\", ]
def __str__(self):
return str(self._fix())
def __bytes__(self):
return bytes_encode(self._fix())
class RandPool(RandField):
def __init__(self, *args):
"""Each parameter is a volatile object or a couple (volatile object, weight)""" # noqa: E501
pool = []
for p in args:
w = 1
if isinstance(p, tuple):
p, w = p
pool += [p] * w
self._pool = pool
def _fix(self):
r = random.choice(self._pool)
return r._fix()
class RandUUID(RandField):
"""Generates a random UUID.
By default, this generates a RFC 4122 version 4 UUID (totally random).
See Python's ``uuid`` module documentation for more information.
Args:
template (optional): A template to build the UUID from. Not valid with
any other option.
node (optional): A 48-bit Host ID. Only valid for version 1 (where it
is optional).
clock_seq (optional): An integer of up to 14-bits for the sequence
number. Only valid for version 1 (where it is
optional).
namespace: A namespace identifier, which is also a UUID. Required for
versions 3 and 5, must be omitted otherwise.
name: string, required for versions 3 and 5, must be omitted otherwise.
version: Version of UUID to use (1, 3, 4 or 5). If omitted, attempts to
guess which version to generate, defaulting to version 4
(totally random).
Raises:
ValueError: on invalid constructor arguments
"""
# This was originally scapy.contrib.dce_rpc.RandUUID.
_BASE = "([0-9a-f]{{{0}}}|\\*|[0-9a-f]{{{0}}}:[0-9a-f]{{{0}}})"
_REG = re.compile(
r"^{0}-?{1}-?{1}-?{2}{2}-?{2}{2}{2}{2}{2}{2}$".format(
_BASE.format(8), _BASE.format(4), _BASE.format(2)
),
re.I
)
VERSIONS = [1, 3, 4, 5]
def __init__(self, template=None, node=None, clock_seq=None,
namespace=None, name=None, version=None):
self.uuid_template = None
self.node = None
self.clock_seq = None
self.namespace = None
self.node = None
self.version = None
if template:
if node or clock_seq or namespace or name or version:
raise ValueError("UUID template must be the only parameter, "
"if specified")
tmp = RandUUID._REG.match(template)
if tmp:
template = tmp.groups()
else:
# Invalid template
raise ValueError("UUID template is invalid")
rnd_f = [RandInt] + [RandShort] * 2 + [RandByte] * 8
uuid_template = []
for i, t in enumerate(template):
if t == "*":
val = rnd_f[i]()
elif ":" in t:
mini, maxi = t.split(":")
val = RandNum(int(mini, 16), int(maxi, 16))
else:
val = int(t, 16)
uuid_template.append(val)
self.uuid_template = tuple(uuid_template)
else:
if version:
if version not in RandUUID.VERSIONS:
raise ValueError("version is not supported")
else:
self.version = version
else:
# No version specified, try to guess...
# This could be wrong, and cause an error later!
if node or clock_seq:
self.version = 1
elif namespace and name:
self.version = 5
else:
# Don't know, random!
self.version = 4
# We have a version, now do things...
if self.version == 1:
if namespace or name:
raise ValueError("namespace and name may not be used with "
"version 1")
self.node = node
self.clock_seq = clock_seq
elif self.version in (3, 5):
if node or clock_seq:
raise ValueError("node and clock_seq may not be used with "
"version {}".format(self.version))
self.namespace = namespace
self.name = name
elif self.version == 4:
if namespace or name or node or clock_seq:
raise ValueError("node, clock_seq, node and clock_seq may "
"not be used with version 4. If you "
"did not specify version, you need to "
"specify it explicitly.")
def _fix(self):
if self.uuid_template:
return uuid.UUID(("%08x%04x%04x" + ("%02x" * 8))
% self.uuid_template)
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 3:
return uuid.uuid3(self.namespace, self.name)
elif self.version == 4:
return uuid.uuid4()
elif self.version == 5:
return uuid.uuid5(self.namespace, self.name)
else:
raise ValueError("Unhandled version")
# Automatic timestamp
class AutoTime(VolatileValue):
def __init__(self, base=None):
if base is None:
self.diff = 0
else:
self.diff = time.time() - base
def _fix(self):
return time.time() - self.diff
class IntAutoTime(AutoTime):
def _fix(self):
return int(time.time() - self.diff)
class ZuluTime(AutoTime):
def __init__(self, diff=0):
self.diff = diff
def _fix(self):
return time.strftime("%y%m%d%H%M%SZ",
time.gmtime(time.time() + self.diff))
class GeneralizedTime(AutoTime):
def __init__(self, diff=0):
self.diff = diff
def _fix(self):
return time.strftime("%Y%m%d%H%M%SZ",
time.gmtime(time.time() + self.diff))
class DelayedEval(VolatileValue):
""" Example of usage: DelayedEval("time.time()") """
def __init__(self, expr):
self.expr = expr
def _fix(self):
return eval(self.expr)
class IncrementalValue(VolatileValue):
def __init__(self, start=0, step=1, restart=-1):
self.start = self.val = start
self.step = step
self.restart = restart
def _fix(self):
v = self.val
if self.val == self.restart:
self.val = self.start
else:
self.val += self.step
return v
class CorruptedBytes(VolatileValue):
def __init__(self, s, p=0.01, n=None):
self.s = s
self.p = p
self.n = n
def _fix(self):
return corrupt_bytes(self.s, self.p, self.n)
class CorruptedBits(CorruptedBytes):
def _fix(self):
return corrupt_bits(self.s, self.p, self.n)
| 1 | 16,385 | `AutoTime` is a number but calling `int()` would make it crash. Tested through the docs | secdev-scapy | py |
@@ -333,7 +333,16 @@ namespace OpenTelemetry.Trace
new EvictingQueue<KeyValuePair<string, object>>(this.tracerConfiguration.MaxNumberOfAttributes);
}
- this.attributes.Add(new KeyValuePair<string, object>(key ?? string.Empty, sanitizedValue));
+ var attribute = this.attributes.FirstOrDefault(a => a.Key == (key ?? string.Empty));
+ var newAttribute = new KeyValuePair<string, object>(key ?? string.Empty, sanitizedValue);
+ if (attribute.Equals(default(KeyValuePair<string, object>)))
+ {
+ this.attributes.Add(newAttribute);
+ }
+ else
+ {
+ this.attributes.Replace(attribute, newAttribute);
+ }
}
}
| 1 | // <copyright file="SpanSdk.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Runtime.CompilerServices;
using OpenTelemetry.Context.Propagation;
using OpenTelemetry.Internal;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace.Configuration;
using OpenTelemetry.Trace.Export;
using OpenTelemetry.Trace.Internal;
using OpenTelemetry.Utils;
namespace OpenTelemetry.Trace
{
/// <summary>
/// Span implementation.
/// </summary>
internal sealed class SpanSdk : TelemetrySpan, IDisposable
{
internal static readonly SpanSdk Invalid = new SpanSdk();
private static readonly ConditionalWeakTable<Activity, SpanSdk> ActivitySpanTable = new ConditionalWeakTable<Activity, SpanSdk>();
private readonly SpanData spanData;
private readonly Sampler sampler;
private readonly TracerConfiguration tracerConfiguration;
private readonly SpanProcessor spanProcessor;
private readonly bool createdFromActivity;
private readonly object lck = new object();
private readonly bool isOutOfBand;
private bool endOnDispose;
private Status status;
private EvictingQueue<KeyValuePair<string, object>> attributes;
private EvictingQueue<Event> events;
private bool hasEnded;
internal SpanSdk(
string name,
in SpanContext context,
in ActivitySpanId parentSpanId,
SpanKind kind,
DateTimeOffset startTimestamp,
IEnumerable<KeyValuePair<string, object>> attributes,
IEnumerable<Event> events,
IEnumerable<Link> links,
Resource resource,
Status status,
DateTimeOffset endTimestamp,
TracerConfiguration tracerConfiguration)
{
this.tracerConfiguration = tracerConfiguration;
this.IsRecording = true;
if (name != null)
{
this.Name = name;
}
else
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("StartSpan", nameof(name), "is null");
this.Name = string.Empty;
}
this.Context = context;
this.Kind = kind;
this.StartTimestamp = startTimestamp;
this.SetLinks(links);
if (attributes != null)
{
foreach (var attribute in attributes)
{
this.SetAttribute(attribute.Key, attribute.Value);
}
}
if (events != null)
{
foreach (var evnt in events)
{
this.AddEvent(evnt);
}
}
this.Status = status;
this.EndTimestamp = endTimestamp;
this.LibraryResource = resource;
this.ParentSpanId = parentSpanId;
this.isOutOfBand = true;
this.hasEnded = true;
}
private SpanSdk()
{
this.Name = string.Empty;
this.Context = default;
this.IsRecording = false;
}
private SpanSdk(
string name,
SpanContext parentSpanContext,
ActivityAndTracestate activityAndTracestate,
bool createdFromActivity,
SpanKind spanKind,
SpanCreationOptions spanCreationOptions,
Sampler sampler,
TracerConfiguration tracerConfiguration,
SpanProcessor spanProcessor,
Resource libraryResource)
{
if (name != null)
{
this.Name = name;
}
else
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("StartSpan", nameof(name), "is null");
this.Name = string.Empty;
}
this.LibraryResource = libraryResource;
IEnumerable<Link> links = null;
if (spanCreationOptions != null)
{
links = spanCreationOptions.Links ?? spanCreationOptions.LinksFactory?.Invoke();
this.StartTimestamp = spanCreationOptions.StartTimestamp;
}
if (this.StartTimestamp == default)
{
this.StartTimestamp = PreciseTimestamp.GetUtcNow();
}
this.sampler = sampler;
this.tracerConfiguration = tracerConfiguration;
this.spanProcessor = spanProcessor;
this.Kind = spanKind;
this.createdFromActivity = createdFromActivity;
this.Activity = activityAndTracestate.Activity;
var tracestate = activityAndTracestate.Tracestate;
this.IsRecording = MakeSamplingDecision(
parentSpanContext,
name,
spanKind,
spanCreationOptions?.Attributes,
links, // we'll enumerate again, but double enumeration over small collection is cheaper than allocation
this.Activity.TraceId,
this.Activity.SpanId,
this.sampler);
this.Activity.ActivityTraceFlags =
this.IsRecording
? this.Activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded
: this.Activity.ActivityTraceFlags &= ~ActivityTraceFlags.Recorded;
// this context is definitely not remote, setting isRemote to false
this.Context = new SpanContext(this.Activity.TraceId, this.Activity.SpanId, this.Activity.ActivityTraceFlags, false, tracestate);
this.ParentSpanId = this.Activity.ParentSpanId;
if (this.IsRecording)
{
this.SetLinks(links);
if (spanCreationOptions?.Attributes != null)
{
foreach (var attribute in spanCreationOptions.Attributes)
{
this.SetAttribute(attribute.Key, attribute.Value);
}
}
this.spanData = new SpanData(this);
this.spanProcessor.OnStart(this.spanData);
}
this.isOutOfBand = false;
}
public override SpanContext Context { get; }
public string Name { get; private set; }
/// <inheritdoc/>
public override Status Status
{
set
{
if (!value.IsValid)
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("set_Status", nameof(value), "is null");
return;
}
this.status = value;
}
}
public ActivitySpanId ParentSpanId { get; }
/// <inheritdoc/>
public override bool IsRecording { get; }
/// <summary>
/// Gets attributes.
/// </summary>
public IEnumerable<KeyValuePair<string, object>> Attributes => this.attributes;
/// <summary>
/// Gets events.
/// </summary>
public IEnumerable<Event> Events => this.events;
/// <summary>
/// Gets links.
/// </summary>
public IEnumerable<Link> Links { get; private set; }
/// <summary>
/// Gets span start timestamp.
/// </summary>
public DateTimeOffset StartTimestamp { get; private set; }
/// <summary>
/// Gets span end timestamp.
/// </summary>
public DateTimeOffset EndTimestamp { get; private set; }
/// <summary>
/// Gets the span kind.
/// </summary>
public SpanKind? Kind { get; }
/// <summary>
/// Gets the "Library Resource" (name + version) associated with the TracerSdk that produced this span.
/// </summary>
public Resource LibraryResource { get; }
internal static SpanSdk Current
{
get
{
var currentActivity = Activity.Current;
if (currentActivity == null)
{
return Invalid;
}
if (ActivitySpanTable.TryGetValue(currentActivity, out var currentSpan))
{
return currentSpan;
}
return Invalid;
}
}
internal Activity Activity { get; }
public Status GetStatus()
{
return this.status;
}
/// <inheritdoc />
public override void UpdateName(string name)
{
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("UpdateName");
return;
}
if (name != null)
{
this.Name = name;
}
else
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("UpdateName", nameof(name), "is null");
this.Name = string.Empty;
}
}
/// <inheritdoc/>
public override void SetAttribute(string key, object value)
{
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("SetAttribute");
return;
}
object sanitizedValue = value;
if (value == null)
{
sanitizedValue = string.Empty;
}
else if (!this.IsAttributeValueTypeSupported(value))
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("SetAttribute", nameof(value), $"Type '{value.GetType()}' of attribute '{key}' is not supported");
sanitizedValue = string.Empty;
}
lock (this.lck)
{
if (this.attributes == null)
{
this.attributes =
new EvictingQueue<KeyValuePair<string, object>>(this.tracerConfiguration.MaxNumberOfAttributes);
}
this.attributes.Add(new KeyValuePair<string, object>(key ?? string.Empty, sanitizedValue));
}
}
/// <inheritdoc/>
public override void SetAttribute(string key, bool value)
{
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("SetAttribute");
return;
}
lock (this.lck)
{
if (this.attributes == null)
{
this.attributes =
new EvictingQueue<KeyValuePair<string, object>>(this.tracerConfiguration.MaxNumberOfAttributes);
}
this.attributes.Add(new KeyValuePair<string, object>(key ?? string.Empty, value));
}
}
/// <inheritdoc/>
public override void SetAttribute(string key, long value)
{
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("SetAttribute");
return;
}
lock (this.lck)
{
if (this.attributes == null)
{
this.attributes =
new EvictingQueue<KeyValuePair<string, object>>(this.tracerConfiguration.MaxNumberOfAttributes);
}
this.attributes.Add(new KeyValuePair<string, object>(key ?? string.Empty, value));
}
}
/// <inheritdoc/>
public override void SetAttribute(string key, double value)
{
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("SetAttribute");
return;
}
lock (this.lck)
{
if (this.attributes == null)
{
this.attributes =
new EvictingQueue<KeyValuePair<string, object>>(this.tracerConfiguration.MaxNumberOfAttributes);
}
this.attributes.Add(new KeyValuePair<string, object>(key ?? string.Empty, value));
}
}
/// <inheritdoc/>
public override void AddEvent(string name)
{
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("AddEvent");
return;
}
this.AddEvent(new Event(name, PreciseTimestamp.GetUtcNow()));
}
/// <inheritdoc/>
public override void AddEvent(Event addEvent)
{
if (addEvent == null)
{
OpenTelemetrySdkEventSource.Log.InvalidArgument("AddEvent", nameof(addEvent), "is null");
return;
}
if (!this.IsRecording)
{
return;
}
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("AddEvent");
return;
}
lock (this.lck)
{
if (this.events == null)
{
this.events =
new EvictingQueue<Event>(this.tracerConfiguration.MaxNumberOfEvents);
}
this.events.Add(addEvent);
}
}
/// <inheritdoc/>
public override void End()
{
this.End(PreciseTimestamp.GetUtcNow());
}
public override void End(DateTimeOffset endTimestamp)
{
if (this.hasEnded)
{
OpenTelemetrySdkEventSource.Log.UnexpectedCallOnEndedSpan("End");
return;
}
this.hasEnded = true;
this.EndTimestamp = endTimestamp;
if (!this.createdFromActivity)
{
this.Activity.SetEndTime(endTimestamp.UtcDateTime);
}
if (this.endOnDispose)
{
this.EndScope();
}
if (this.IsRecording)
{
this.spanProcessor.OnEnd(this.spanData);
}
}
public void Dispose()
{
this.End();
}
internal static SpanSdk CreateFromParentSpan(
string name,
TelemetrySpan parentSpan,
SpanKind spanKind,
SpanCreationOptions spanCreationOptions,
Sampler sampler,
TracerConfiguration tracerConfiguration,
SpanProcessor spanProcessor,
Resource libraryResource)
{
if (parentSpan.Context.IsValid)
{
return new SpanSdk(
name,
parentSpan.Context,
FromParentSpan(name, parentSpan),
false,
spanKind,
spanCreationOptions,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource);
}
var currentActivity = Activity.Current;
if (currentActivity == null ||
currentActivity.IdFormat != ActivityIdFormat.W3C)
{
return new SpanSdk(
name,
default,
CreateRoot(name),
false,
spanKind,
spanCreationOptions,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource);
}
return new SpanSdk(
name,
new SpanContext(
currentActivity.TraceId,
currentActivity.SpanId,
currentActivity.ActivityTraceFlags),
FromCurrentParentActivity(name, currentActivity),
false,
spanKind,
spanCreationOptions,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource);
}
internal static SpanSdk CreateFromParentContext(
string name,
SpanContext parentContext,
SpanKind spanKind,
SpanCreationOptions spanCreationOptions,
Sampler sampler,
TracerConfiguration tracerConfiguration,
SpanProcessor spanProcessor,
Resource libraryResource)
{
return new SpanSdk(
name,
parentContext,
FromParentSpanContext(name, parentContext),
false,
spanKind,
spanCreationOptions,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource);
}
internal static SpanSdk CreateRoot(
string name,
SpanKind spanKind,
SpanCreationOptions spanCreationOptions,
Sampler sampler,
TracerConfiguration tracerConfiguration,
SpanProcessor spanProcessor,
Resource libraryResource)
{
return new SpanSdk(
name,
default,
CreateRoot(name),
false,
spanKind,
spanCreationOptions,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource);
}
internal static SpanSdk CreateFromActivity(
string name,
Activity activity,
SpanKind spanKind,
IEnumerable<Link> links,
Sampler sampler,
TracerConfiguration tracerConfiguration,
SpanProcessor spanProcessor,
Resource libraryResource)
{
var span = new SpanSdk(
name,
ParentContextFromActivity(activity),
FromActivity(activity),
true,
spanKind,
null,
sampler,
tracerConfiguration,
spanProcessor,
libraryResource)
{
StartTimestamp = new DateTimeOffset(activity.StartTimeUtc),
};
span.SetLinks(links);
span.BeginScope(true);
return span;
}
internal IDisposable BeginScope(bool endOnDispose)
{
if (this.isOutOfBand)
{
OpenTelemetrySdkEventSource.Log.AttemptToActivateOobSpan(this.Name);
return NoopDisposable.Instance;
}
if (ActivitySpanTable.TryGetValue(this.Activity, out _))
{
OpenTelemetrySdkEventSource.Log.AttemptToActivateActiveSpan(this.Name);
return this.endOnDispose ? this : NoopDisposable.Instance;
}
ActivitySpanTable.Add(this.Activity, this);
Activity.Current = this.Activity;
this.endOnDispose = endOnDispose;
if (this.endOnDispose)
{
return this;
}
return new ScopeInSpan(this);
}
private static bool MakeSamplingDecision(
SpanContext parent,
string name,
SpanKind spanKind,
IDictionary<string, object> attributes,
IEnumerable<Link> parentLinks,
ActivityTraceId traceId,
ActivitySpanId spanId,
Sampler sampler)
{
return sampler.ShouldSample(parent, traceId, spanId, name, spanKind, attributes, parentLinks).IsSampled;
}
private static ActivityAndTracestate FromCurrentParentActivity(string spanName, Activity current)
{
var activity = new Activity(spanName);
activity.SetIdFormat(ActivityIdFormat.W3C);
activity.Start();
Activity.Current = current;
List<KeyValuePair<string, string>> tracestate = null;
if (activity.TraceStateString != null)
{
tracestate = new List<KeyValuePair<string, string>>();
if (!TracestateUtils.AppendTracestate(activity.TraceStateString, tracestate))
{
activity.TraceStateString = null;
}
}
return new ActivityAndTracestate(activity, tracestate);
}
private static ActivityAndTracestate FromParentSpan(string spanName, TelemetrySpan parentSpan)
{
if (parentSpan is SpanSdk parentSpanImpl && parentSpanImpl.Activity == Activity.Current)
{
var activity = new Activity(spanName);
activity.SetIdFormat(ActivityIdFormat.W3C);
activity.TraceStateString = parentSpanImpl.Activity.TraceStateString;
var originalActivity = Activity.Current;
activity.Start();
Activity.Current = originalActivity;
return new ActivityAndTracestate(activity, parentSpan.Context.Tracestate);
}
return FromParentSpanContext(spanName, parentSpan.Context);
}
private static ActivityAndTracestate FromParentSpanContext(string spanName, SpanContext parentContext)
{
var activity = new Activity(spanName);
IEnumerable<KeyValuePair<string, string>> tracestate = null;
if (parentContext.IsValid)
{
activity.SetParentId(parentContext.TraceId,
parentContext.SpanId,
parentContext.TraceOptions);
if (parentContext.Tracestate != null && parentContext.Tracestate.Any())
{
activity.TraceStateString = TracestateUtils.GetString(parentContext.Tracestate);
tracestate = parentContext.Tracestate;
}
}
activity.SetIdFormat(ActivityIdFormat.W3C);
var originalActivity = Activity.Current;
activity.Start();
Activity.Current = originalActivity;
return new ActivityAndTracestate(activity, tracestate);
}
private static ActivityAndTracestate CreateRoot(string spanName)
{
var activity = new Activity(spanName);
activity.SetIdFormat(ActivityIdFormat.W3C);
var originalActivity = Activity.Current;
if (originalActivity != null)
{
activity.SetParentId(" ");
}
activity.Start();
Activity.Current = originalActivity;
return new ActivityAndTracestate(activity, null);
}
private static ActivityAndTracestate FromActivity(Activity activity)
{
List<KeyValuePair<string, string>> tracestate = null;
if (activity.TraceStateString != null)
{
tracestate = new List<KeyValuePair<string, string>>();
if (!TracestateUtils.AppendTracestate(activity.TraceStateString, tracestate))
{
activity.TraceStateString = null;
}
}
return new ActivityAndTracestate(activity, tracestate);
}
private static SpanContext ParentContextFromActivity(Activity activity)
{
if (activity.TraceId != default && activity.ParentSpanId != default)
{
return new SpanContext(
activity.TraceId,
activity.ParentSpanId,
activity.ActivityTraceFlags);
}
return default;
}
private void SetLinks(IEnumerable<Link> links)
{
if (this.IsRecording)
{
if (links != null)
{
var parentLinks = links.ToList();
if (parentLinks.Count <= this.tracerConfiguration.MaxNumberOfLinks)
{
this.Links = parentLinks;
}
else
{
this.Links = parentLinks.GetRange(parentLinks.Count - this.tracerConfiguration.MaxNumberOfLinks,
this.tracerConfiguration.MaxNumberOfLinks);
}
}
}
}
private void EndScope()
{
if (this.Activity == Activity.Current)
{
ActivitySpanTable.Remove(this.Activity);
// spans created from Activity do not control
// Activity lifetime and should not change Current activity
if (!this.createdFromActivity)
{
Activity.Current = this.Activity.Parent;
}
}
else
{
OpenTelemetrySdkEventSource.Log.AttemptToEndScopeWhichIsNotCurrent(this.Name);
}
}
private bool IsAttributeValueTypeSupported(object attributeValue)
{
if (this.IsNumericBoolOrString(attributeValue))
{
return true;
}
if (attributeValue is IEnumerable enumerable)
{
try
{
Type entryType = null;
foreach (var entry in enumerable)
{
if (entryType == null)
{
entryType = entry.GetType();
}
if (!this.IsNumericBoolOrString(entry) || entryType != entry.GetType())
{
return false;
}
}
}
catch
{
return false;
}
return true;
}
return false;
}
private bool IsNumericBoolOrString(object attributeValue)
{
return attributeValue is string
|| attributeValue is bool
|| attributeValue is int
|| attributeValue is uint
|| attributeValue is long
|| attributeValue is ulong
|| attributeValue is double
|| attributeValue is sbyte
|| attributeValue is byte
|| attributeValue is short
|| attributeValue is ushort
|| attributeValue is float
|| attributeValue is decimal;
}
private readonly struct ActivityAndTracestate
{
public readonly Activity Activity;
public readonly IEnumerable<KeyValuePair<string, string>> Tracestate;
public ActivityAndTracestate(Activity activity, IEnumerable<KeyValuePair<string, string>> tracestate)
{
this.Activity = activity;
this.Tracestate = tracestate;
}
}
private sealed class ScopeInSpan : IDisposable
{
private readonly SpanSdk span;
public ScopeInSpan(SpanSdk span)
{
this.span = span;
}
public void Dispose()
{
this.span.EndScope();
}
}
}
}
| 1 | 13,425 | @eddynaka Looks like the same block more or less 4 times, should we have a helper method? private void AddOrReplaceAttribute or something? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -381,7 +381,8 @@ public class JettySolrRunner {
dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
dispatchFilter.setHeldClass(SolrDispatchFilter.class);
dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
- root.addFilter(dispatchFilter, "*", EnumSet.of(DispatcherType.REQUEST));
+ // Map dispatchFilter in same path as in web.xml
+ root.addFilter(dispatchFilter, "/*", EnumSet.of(DispatcherType.REQUEST));
synchronized (JettySolrRunner.this) {
waitOnSolr = true; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.embedded;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.servlet.DispatcherType;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.lucene.util.Constants;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.cloud.SocketProxy;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.apache.solr.util.TimeOut;
import org.eclipse.jetty.alpn.server.ALPNServerConnectionFactory;
import org.eclipse.jetty.http2.HTTP2Cipher;
import org.eclipse.jetty.http2.server.HTTP2CServerConnectionFactory;
import org.eclipse.jetty.http2.server.HTTP2ServerConnectionFactory;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
import org.eclipse.jetty.rewrite.handler.RewritePatternRule;
import org.eclipse.jetty.server.Connector;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.SecureRequestCustomizer;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.server.handler.HandlerWrapper;
import org.eclipse.jetty.server.handler.gzip.GzipHandler;
import org.eclipse.jetty.server.session.DefaultSessionIdManager;
import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.servlet.Source;
import org.eclipse.jetty.util.component.LifeCycle;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ReservedThreadExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* Run solr using jetty
*
* @since solr 1.3
*/
public class JettySolrRunner {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int THREAD_POOL_MAX_THREADS = 10000;
// NOTE: needs to be larger than SolrHttpClient.threadPoolSweeperMaxIdleTime
private static final int THREAD_POOL_MAX_IDLE_TIME_MS = 260000;
Server server;
volatile FilterHolder dispatchFilter;
volatile FilterHolder debugFilter;
private boolean waitOnSolr = false;
private int jettyPort = -1;
private final JettyConfig config;
private final String solrHome;
private final Properties nodeProperties;
private volatile boolean startedBefore = false;
private LinkedList<FilterHolder> extraFilters;
private static final String excludePatterns = "/css/.+,/js/.+,/img/.+,/tpl/.+";
private int proxyPort = -1;
private final boolean enableProxy;
private SocketProxy proxy;
private String protocol;
private String host;
private volatile boolean started = false;
public static class DebugFilter implements Filter {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private AtomicLong nRequests = new AtomicLong();
List<Delay> delays = new ArrayList<>();
public long getTotalRequests() {
return nRequests.get();
}
/**
* Introduce a delay of specified milliseconds for the specified request.
*
* @param reason Info message logged when delay occurs
* @param count The count-th request will experience a delay
* @param delay There will be a delay of this many milliseconds
*/
public void addDelay(String reason, int count, int delay) {
delays.add(new Delay(reason, count, delay));
}
/**
* Remove any delay introduced before.
*/
public void unsetDelay() {
delays.clear();
}
@Override
public void init(FilterConfig filterConfig) throws ServletException { }
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
nRequests.incrementAndGet();
executeDelay();
filterChain.doFilter(servletRequest, servletResponse);
}
@Override
public void destroy() { }
private void executeDelay() {
int delayMs = 0;
for (Delay delay: delays) {
this.log.info("Delaying "+delay.delayValue+", for reason: "+delay.reason);
if (delay.counter.decrementAndGet() == 0) {
delayMs += delay.delayValue;
}
}
if (delayMs > 0) {
this.log.info("Pausing this socket connection for " + delayMs + "ms...");
try {
Thread.sleep(delayMs);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
this.log.info("Waking up after the delay of " + delayMs + "ms...");
}
}
}
/**
* Create a new JettySolrRunner.
*
* After construction, you must start the jetty with {@link #start()}
*
* @param solrHome the solr home directory to use
* @param context the context to run in
* @param port the port to run on
*/
public JettySolrRunner(String solrHome, String context, int port) {
this(solrHome, JettyConfig.builder().setContext(context).setPort(port).build());
}
/**
* Construct a JettySolrRunner
*
* After construction, you must start the jetty with {@link #start()}
*
* @param solrHome the base path to run from
* @param config the configuration
*/
public JettySolrRunner(String solrHome, JettyConfig config) {
this(solrHome, new Properties(), config);
}
/**
* Construct a JettySolrRunner
*
* After construction, you must start the jetty with {@link #start()}
*
* @param solrHome the solrHome to use
* @param nodeProperties the container properties
* @param config the configuration
*/
public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig config) {
this(solrHome, nodeProperties, config, false);
}
/**
* Construct a JettySolrRunner
*
* After construction, you must start the jetty with {@link #start()}
*
* @param solrHome the solrHome to use
* @param nodeProperties the container properties
* @param config the configuration
* @param enableProxy enables proxy feature to disable connections
*/
public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig config, boolean enableProxy) {
this.enableProxy = enableProxy;
this.solrHome = solrHome;
this.config = config;
this.nodeProperties = nodeProperties;
if (enableProxy) {
try {
proxy = new SocketProxy(0, config.sslConfig != null && config.sslConfig.isSSLMode());
} catch (Exception e) {
throw new RuntimeException(e);
}
setProxyPort(proxy.getListenPort());
}
this.init(this.config.port);
}
private void init(int port) {
QueuedThreadPool qtp = new QueuedThreadPool();
qtp.setMaxThreads(THREAD_POOL_MAX_THREADS);
qtp.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
qtp.setReservedThreads(0);
server = new Server(qtp);
server.manage(qtp);
server.setStopAtShutdown(config.stopAtShutdown);
if (System.getProperty("jetty.testMode") != null) {
// if this property is true, then jetty will be configured to use SSL
// leveraging the same system properties as java to specify
// the keystore/truststore if they are set unless specific config
// is passed via the constructor.
//
// This means we will use the same truststore, keystore (and keys) for
// the server as well as any client actions taken by this JVM in
// talking to that server, but for the purposes of testing that should
// be good enough
final SslContextFactory sslcontext = SSLConfig.createContextFactory(config.sslConfig);
HttpConfiguration configuration = new HttpConfiguration();
ServerConnector connector;
if (sslcontext != null) {
configuration.setSecureScheme("https");
configuration.addCustomizer(new SecureRequestCustomizer());
HttpConnectionFactory http1ConnectionFactory = new HttpConnectionFactory(configuration);
if (config.onlyHttp1 || !Constants.JRE_IS_MINIMUM_JAVA9) {
connector = new ServerConnector(server, new SslConnectionFactory(sslcontext,
http1ConnectionFactory.getProtocol()),
http1ConnectionFactory);
} else {
sslcontext.setCipherComparator(HTTP2Cipher.COMPARATOR);
connector = new ServerConnector(server);
SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslcontext, "alpn");
connector.addConnectionFactory(sslConnectionFactory);
connector.setDefaultProtocol(sslConnectionFactory.getProtocol());
HTTP2ServerConnectionFactory http2ConnectionFactory = new HTTP2ServerConnectionFactory(configuration);
ALPNServerConnectionFactory alpn = new ALPNServerConnectionFactory(
http2ConnectionFactory.getProtocol(),
http1ConnectionFactory.getProtocol());
alpn.setDefaultProtocol(http1ConnectionFactory.getProtocol());
connector.addConnectionFactory(alpn);
connector.addConnectionFactory(http1ConnectionFactory);
connector.addConnectionFactory(http2ConnectionFactory);
}
} else {
if (config.onlyHttp1) {
connector = new ServerConnector(server, new HttpConnectionFactory(configuration));
} else {
connector = new ServerConnector(server, new HttpConnectionFactory(configuration),
new HTTP2CServerConnectionFactory(configuration));
}
}
connector.setReuseAddress(true);
connector.setSoLingerTime(-1);
connector.setPort(port);
connector.setHost("127.0.0.1");
connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
connector.setStopTimeout(0);
server.setConnectors(new Connector[] {connector});
server.setSessionIdManager(new DefaultSessionIdManager(server, new Random()));
} else {
HttpConfiguration configuration = new HttpConfiguration();
ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory(configuration));
connector.setPort(port);
connector.setSoLingerTime(-1);
connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
server.setConnectors(new Connector[] {connector});
}
HandlerWrapper chain;
{
// Initialize the servlets
final ServletContextHandler root = new ServletContextHandler(server, config.context, ServletContextHandler.SESSIONS);
server.addLifeCycleListener(new LifeCycle.Listener() {
@Override
public void lifeCycleStopping(LifeCycle arg0) {
}
@Override
public void lifeCycleStopped(LifeCycle arg0) {}
@Override
public void lifeCycleStarting(LifeCycle arg0) {
}
@Override
public void lifeCycleStarted(LifeCycle arg0) {
jettyPort = getFirstConnectorPort();
int port = jettyPort;
if (proxyPort != -1) port = proxyPort;
nodeProperties.setProperty("hostPort", Integer.toString(port));
nodeProperties.setProperty("hostContext", config.context);
root.getServletContext().setAttribute(SolrDispatchFilter.PROPERTIES_ATTRIBUTE, nodeProperties);
root.getServletContext().setAttribute(SolrDispatchFilter.SOLRHOME_ATTRIBUTE, solrHome);
log.info("Jetty properties: {}", nodeProperties);
debugFilter = root.addFilter(DebugFilter.class, "*", EnumSet.of(DispatcherType.REQUEST) );
extraFilters = new LinkedList<>();
for (Map.Entry<Class<? extends Filter>, String> entry : config.extraFilters.entrySet()) {
extraFilters.add(root.addFilter(entry.getKey(), entry.getValue(), EnumSet.of(DispatcherType.REQUEST)));
}
for (Map.Entry<ServletHolder, String> entry : config.extraServlets.entrySet()) {
root.addServlet(entry.getKey(), entry.getValue());
}
dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
dispatchFilter.setHeldClass(SolrDispatchFilter.class);
dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
root.addFilter(dispatchFilter, "*", EnumSet.of(DispatcherType.REQUEST));
synchronized (JettySolrRunner.this) {
waitOnSolr = true;
JettySolrRunner.this.notify();
}
}
@Override
public void lifeCycleFailure(LifeCycle arg0, Throwable arg1) {
System.clearProperty("hostPort");
}
});
// for some reason, there must be a servlet for this to get applied
root.addServlet(Servlet404.class, "/*");
chain = root;
}
chain = injectJettyHandlers(chain);
if(config.enableV2) {
RewriteHandler rwh = new RewriteHandler();
rwh.setHandler(chain);
rwh.setRewriteRequestURI(true);
rwh.setRewritePathInfo(false);
rwh.setOriginalPathAttribute("requestedPath");
rwh.addRule(new RewritePatternRule("/api/*", "/solr/____v2"));
chain = rwh;
}
GzipHandler gzipHandler = new GzipHandler();
gzipHandler.setHandler(chain);
gzipHandler.setMinGzipSize(0);
gzipHandler.setCheckGzExists(false);
gzipHandler.setCompressionLevel(-1);
gzipHandler.setExcludedAgentPatterns(".*MSIE.6\\.0.*");
gzipHandler.setIncludedMethods("GET");
server.setHandler(gzipHandler);
}
/** descendants may inject own handler chaining it to the given root
* and then returning that own one*/
protected HandlerWrapper injectJettyHandlers(HandlerWrapper chain) {
return chain;
}
/**
* @return the {@link SolrDispatchFilter} for this node
*/
public SolrDispatchFilter getSolrDispatchFilter() { return (SolrDispatchFilter) dispatchFilter.getFilter(); }
/**
* @return the {@link CoreContainer} for this node
*/
public CoreContainer getCoreContainer() {
if (getSolrDispatchFilter() == null || getSolrDispatchFilter().getCores() == null) {
return null;
}
return getSolrDispatchFilter().getCores();
}
public String getNodeName() {
if (getCoreContainer() == null) {
return null;
}
return getCoreContainer().getZkController().getNodeName();
}
public boolean isRunning() {
return server.isRunning() && dispatchFilter != null && dispatchFilter.isRunning();
}
public boolean isStopped() {
return (server.isStopped() && dispatchFilter == null) || (server.isStopped() && dispatchFilter.isStopped()
&& ((QueuedThreadPool) server.getThreadPool()).isStopped());
}
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
/**
* Start the Jetty server
*
* If the server has been started before, it will restart using the same port
*
* @throws Exception if an error occurs on startup
*/
public void start() throws Exception {
start(true);
}
/**
* Start the Jetty server
*
* @param reusePort when true, will start up on the same port as used by any
* previous runs of this JettySolrRunner. If false, will use
* the port specified by the server's JettyConfig.
*
* @throws Exception if an error occurs on startup
*/
public void start(boolean reusePort) throws Exception {
// Do not let Jetty/Solr pollute the MDC for this thread
Map<String, String> prevContext = MDC.getCopyOfContextMap();
MDC.clear();
try {
int port = reusePort && jettyPort != -1 ? jettyPort : this.config.port;
log.info("Start Jetty (configured port={}, binding port={})", this.config.port, port);
// if started before, make a new server
if (startedBefore) {
waitOnSolr = false;
init(port);
} else {
startedBefore = true;
}
if (!server.isRunning()) {
if (config.portRetryTime > 0) {
retryOnPortBindFailure(config.portRetryTime, port);
} else {
server.start();
}
}
synchronized (JettySolrRunner.this) {
int cnt = 0;
while (!waitOnSolr || !dispatchFilter.isRunning() || getCoreContainer() == null) {
this.wait(100);
if (cnt++ == 15) {
throw new RuntimeException("Jetty/Solr unresponsive");
}
}
}
if (config.waitForLoadingCoresToFinishMs != null && config.waitForLoadingCoresToFinishMs > 0L) {
waitForLoadingCoresToFinish(config.waitForLoadingCoresToFinishMs);
}
setProtocolAndHost();
if (enableProxy) {
if (started) {
proxy.reopen();
} else {
proxy.open(getBaseUrl().toURI());
}
}
} finally {
started = true;
if (prevContext != null) {
MDC.setContextMap(prevContext);
} else {
MDC.clear();
}
}
}
private void setProtocolAndHost() {
String protocol = null;
Connector[] conns = server.getConnectors();
if (0 == conns.length) {
throw new IllegalStateException("Jetty Server has no Connectors");
}
ServerConnector c = (ServerConnector) conns[0];
protocol = c.getDefaultProtocol().toLowerCase(Locale.ROOT).startsWith("ssl") ? "https" : "http";
this.protocol = protocol;
this.host = c.getHost();
}
private void retryOnPortBindFailure(int portRetryTime, int port) throws Exception, InterruptedException {
TimeOut timeout = new TimeOut(portRetryTime, TimeUnit.SECONDS, TimeSource.NANO_TIME);
int tryCnt = 1;
while (true) {
try {
log.info("Trying to start Jetty on port {} try number {} ...", port, tryCnt++);
server.start();
break;
} catch (IOException ioe) {
Exception e = lookForBindException(ioe);
if (e instanceof BindException) {
log.info("Port is in use, will try again until timeout of " + timeout);
server.stop();
Thread.sleep(3000);
if (!timeout.hasTimedOut()) {
continue;
}
}
throw e;
}
}
}
/**
* Traverses the cause chain looking for a BindException. Returns either a bind exception
* that was found in the chain or the original argument.
*
* @param ioe An IOException that might wrap a BindException
* @return A bind exception if present otherwise ioe
*/
Exception lookForBindException(IOException ioe) {
Exception e = ioe;
while(e.getCause() != null && !(e == e.getCause()) && ! (e instanceof BindException)) {
if (e.getCause() instanceof Exception) {
e = (Exception) e.getCause();
if (e instanceof BindException) {
return e;
}
}
}
return ioe;
}
/**
* Stop the Jetty server
*
* @throws Exception if an error occurs on shutdown
*/
public void stop() throws Exception {
// Do not let Jetty/Solr pollute the MDC for this thread
Map<String,String> prevContext = MDC.getCopyOfContextMap();
MDC.clear();
try {
Filter filter = dispatchFilter.getFilter();
// we want to shutdown outside of jetty cutting us off
SolrDispatchFilter sdf = getSolrDispatchFilter();
ExecutorService customThreadPool = null;
if (sdf != null) {
customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("jettyShutDown"));
sdf.closeOnDestroy(false);
// customThreadPool.submit(() -> {
// try {
// sdf.close();
// } catch (Throwable t) {
// log.error("Error shutting down Solr", t);
// }
// });
try {
sdf.close();
} catch (Throwable t) {
log.error("Error shutting down Solr", t);
}
}
QueuedThreadPool qtp = (QueuedThreadPool) server.getThreadPool();
ReservedThreadExecutor rte = qtp.getBean(ReservedThreadExecutor.class);
server.stop();
if (server.getState().equals(Server.FAILED)) {
filter.destroy();
if (extraFilters != null) {
for (FilterHolder f : extraFilters) {
f.getFilter().destroy();
}
}
}
// stop timeout is 0, so we will interrupt right away
while(!qtp.isStopped()) {
qtp.stop();
if (qtp.isStopped()) {
Thread.sleep(50);
}
}
// we tried to kill everything, now we wait for executor to stop
qtp.setStopTimeout(Integer.MAX_VALUE);
qtp.stop();
qtp.join();
if (rte != null) {
// we try and wait for the reserved thread executor, but it doesn't always seem to work
// so we actually set 0 reserved threads at creation
rte.stop();
TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
timeout.waitFor("Timeout waiting for reserved executor to stop.", ()
-> rte.isStopped());
}
if (customThreadPool != null) {
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
do {
try {
server.join();
} catch (InterruptedException e) {
// ignore
}
} while (!server.isStopped());
} finally {
if (enableProxy) {
proxy.close();
}
if (prevContext != null) {
MDC.setContextMap(prevContext);
} else {
MDC.clear();
}
}
}
/**
* Returns the Local Port of the jetty Server.
*
* @exception RuntimeException if there is no Connector
*/
private int getFirstConnectorPort() {
Connector[] conns = server.getConnectors();
if (0 == conns.length) {
throw new RuntimeException("Jetty Server has no Connectors");
}
return ((ServerConnector) conns[0]).getLocalPort();
}
/**
* Returns the Local Port of the jetty Server.
*
* @exception RuntimeException if there is no Connector
*/
public int getLocalPort() {
return getLocalPort(false);
}
/**
* Returns the Local Port of the jetty Server.
*
* @param internalPort pass true to get the true jetty port rather than the proxy port if configured
*
* @exception RuntimeException if there is no Connector
*/
public int getLocalPort(boolean internalPort) {
if (jettyPort == -1) {
throw new IllegalStateException("You cannot get the port until this instance has started");
}
if (internalPort ) {
return jettyPort;
}
return (proxyPort != -1) ? proxyPort : jettyPort;
}
/**
* Sets the port of a local socket proxy that sits infront of this server; if set
* then all client traffic will flow through the proxy, giving us the ability to
* simulate network partitions very easily.
*/
public void setProxyPort(int proxyPort) {
this.proxyPort = proxyPort;
}
/**
* Returns a base URL consisting of the protocol, host, and port for a
* Connector in use by the Jetty Server contained in this runner.
*/
public URL getBaseUrl() {
try {
return new URL(protocol, host, jettyPort, config.context);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
/**
* Returns a base URL consisting of the protocol, host, and port for a
* Connector in use by the Jetty Server contained in this runner.
*/
public URL getProxyBaseUrl() {
try {
return new URL(protocol, host, getLocalPort(), config.context);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
public SolrClient newClient() {
return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
}
public SolrClient newClient(int connectionTimeoutMillis, int socketTimeoutMillis) {
return new HttpSolrClient.Builder(getBaseUrl().toString())
.withConnectionTimeout(connectionTimeoutMillis)
.withSocketTimeout(socketTimeoutMillis)
.build();
}
public DebugFilter getDebugFilter() {
return (DebugFilter)debugFilter.getFilter();
}
// --------------------------------------------------------------
// --------------------------------------------------------------
/**
* This is a stupid hack to give jetty something to attach to
*/
public static class Servlet404 extends HttpServlet {
@Override
public void service(HttpServletRequest req, HttpServletResponse res)
throws IOException {
res.sendError(404, "Can not find: " + req.getRequestURI());
}
}
/**
* A main class that starts jetty+solr This is useful for debugging
*/
public static void main(String[] args) throws Exception {
JettySolrRunner jetty = new JettySolrRunner(".", "/solr", 8983);
jetty.start();
}
/**
* @return the Solr home directory of this JettySolrRunner
*/
public String getSolrHome() {
return solrHome;
}
/**
* @return this node's properties
*/
public Properties getNodeProperties() {
return nodeProperties;
}
private void waitForLoadingCoresToFinish(long timeoutMs) {
if (dispatchFilter != null) {
SolrDispatchFilter solrFilter = (SolrDispatchFilter) dispatchFilter.getFilter();
CoreContainer cores = solrFilter.getCores();
if (cores != null) {
cores.waitForLoadingCoresToFinish(timeoutMs);
} else {
throw new IllegalStateException("The CoreContainer is not set!");
}
} else {
throw new IllegalStateException("The dispatchFilter is not set!");
}
}
static class Delay {
final AtomicInteger counter;
final int delayValue;
final String reason;
public Delay(String reason, int counter, int delay) {
this.reason = reason;
this.counter = new AtomicInteger(counter);
this.delayValue = delay;
}
}
public SocketProxy getProxy() {
return proxy;
}
}
| 1 | 31,215 | Please do the same for the DebugFilter a few lines above. | apache-lucene-solr | java |
@@ -0,0 +1,2 @@
+export * from './act';
+export * from './setupRerender'; | 1 | 1 | 12,541 | Minor nit: Just inline both functions here. No need to make a file per function (that's a popular approach for some projects though, just not for us :) ). | preactjs-preact | js |
|
@@ -28,6 +28,18 @@ namespace Datadog.Trace.Configuration
{
}
+ /// <summary>
+ /// Initializes a new instance of the <see cref="TracerSettings"/> class with default values,
+ /// or using the default sources. Calling <c>new TracerSettings(true)</c> is equivalent to
+ /// calling <c>TracerSettings.FromDefaultSources()</c>
+ /// </summary>
+ /// <param name="useDefaultSources">If <c>true</c>, creates a <see cref="TracerSettings"/> populated from
+ /// the default sources such as environment variables etc. If <c>false</c>, uses the default values.</param>
+ public TracerSettings(bool useDefaultSources)
+ : this(useDefaultSources ? CreateDefaultConfigurationSource() : null)
+ {
+ }
+
/// <summary>
/// Initializes a new instance of the <see cref="TracerSettings"/> class
/// using the specified <see cref="IConfigurationSource"/> to initialize values. | 1 | // <copyright file="TracerSettings.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text.RegularExpressions;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.Util;
using Datadog.Trace.Vendors.Serilog;
namespace Datadog.Trace.Configuration
{
/// <summary>
/// Contains Tracer settings.
/// </summary>
public class TracerSettings
{
/// <summary>
/// Initializes a new instance of the <see cref="TracerSettings"/> class with default values.
/// </summary>
public TracerSettings()
: this(null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="TracerSettings"/> class
/// using the specified <see cref="IConfigurationSource"/> to initialize values.
/// </summary>
/// <param name="source">The <see cref="IConfigurationSource"/> to use when retrieving configuration values.</param>
public TracerSettings(IConfigurationSource source)
{
Environment = source?.GetString(ConfigurationKeys.Environment);
ServiceName = source?.GetString(ConfigurationKeys.ServiceName) ??
// backwards compatibility for names used in the past
source?.GetString("DD_SERVICE_NAME");
ServiceVersion = source?.GetString(ConfigurationKeys.ServiceVersion);
TraceEnabled = source?.GetBool(ConfigurationKeys.TraceEnabled) ??
// default value
true;
if (AzureAppServices.Metadata.IsRelevant && AzureAppServices.Metadata.IsUnsafeToTrace)
{
TraceEnabled = false;
}
var disabledIntegrationNames = source?.GetString(ConfigurationKeys.DisabledIntegrations)
?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries) ??
Enumerable.Empty<string>();
DisabledIntegrationNames = new HashSet<string>(disabledIntegrationNames, StringComparer.OrdinalIgnoreCase);
Integrations = new IntegrationSettingsCollection(source);
Exporter = new ExporterSettings(source);
#pragma warning disable 618 // App analytics is deprecated, but still used
AnalyticsEnabled = source?.GetBool(ConfigurationKeys.GlobalAnalyticsEnabled) ??
// default value
false;
#pragma warning restore 618
LogsInjectionEnabled = source?.GetBool(ConfigurationKeys.LogsInjectionEnabled) ??
// default value
false;
MaxTracesSubmittedPerSecond = source?.GetInt32(ConfigurationKeys.MaxTracesSubmittedPerSecond) ??
// default value
100;
GlobalTags = source?.GetDictionary(ConfigurationKeys.GlobalTags) ??
// backwards compatibility for names used in the past
source?.GetDictionary("DD_TRACE_GLOBAL_TAGS") ??
// default value (empty)
new ConcurrentDictionary<string, string>();
// Filter out tags with empty keys or empty values, and trim whitespace
GlobalTags = GlobalTags.Where(kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
.ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim());
var inputHeaderTags = source?.GetDictionary(ConfigurationKeys.HeaderTags, allowOptionalMappings: true) ??
// default value (empty)
new Dictionary<string, string>();
var headerTagsNormalizationFixEnabled = source?.GetBool(ConfigurationKeys.FeatureFlags.HeaderTagsNormalizationFixEnabled) ?? true;
// Filter out tags with empty keys or empty values, and trim whitespaces
HeaderTags = InitializeHeaderTags(inputHeaderTags, headerTagsNormalizationFixEnabled);
var serviceNameMappings = source?.GetDictionary(ConfigurationKeys.ServiceNameMappings)
?.Where(kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value))
?.ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim());
ServiceNameMappings = new ServiceNames(serviceNameMappings);
TracerMetricsEnabled = source?.GetBool(ConfigurationKeys.TracerMetricsEnabled) ??
// default value
false;
RuntimeMetricsEnabled = source?.GetBool(ConfigurationKeys.RuntimeMetricsEnabled) ??
false;
CustomSamplingRules = source?.GetString(ConfigurationKeys.CustomSamplingRules);
GlobalSamplingRate = source?.GetDouble(ConfigurationKeys.GlobalSamplingRate);
StartupDiagnosticLogEnabled = source?.GetBool(ConfigurationKeys.StartupDiagnosticLogEnabled) ??
// default value
true;
var urlSubstringSkips = source?.GetString(ConfigurationKeys.HttpClientExcludedUrlSubstrings) ??
// default value
(AzureAppServices.Metadata.IsRelevant ? AzureAppServices.Metadata.DefaultHttpClientExclusions : null);
if (urlSubstringSkips != null)
{
HttpClientExcludedUrlSubstrings = TrimSplitString(urlSubstringSkips.ToUpperInvariant(), ',').ToArray();
}
var httpServerErrorStatusCodes = source?.GetString(ConfigurationKeys.HttpServerErrorStatusCodes) ??
// Default value
"500-599";
HttpServerErrorStatusCodes = ParseHttpCodesToArray(httpServerErrorStatusCodes);
var httpClientErrorStatusCodes = source?.GetString(ConfigurationKeys.HttpClientErrorStatusCodes) ??
// Default value
"400-499";
HttpClientErrorStatusCodes = ParseHttpCodesToArray(httpClientErrorStatusCodes);
TraceBufferSize = source?.GetInt32(ConfigurationKeys.BufferSize)
?? 1024 * 1024 * 10; // 10MB
TraceBatchInterval = source?.GetInt32(ConfigurationKeys.SerializationBatchInterval)
?? 100;
RouteTemplateResourceNamesEnabled = source?.GetBool(ConfigurationKeys.FeatureFlags.RouteTemplateResourceNamesEnabled)
?? true;
KafkaCreateConsumerScopeEnabled = source?.GetBool(ConfigurationKeys.KafkaCreateConsumerScopeEnabled)
?? true; // default
DelayWcfInstrumentationEnabled = source?.GetBool(ConfigurationKeys.FeatureFlags.DelayWcfInstrumentationEnabled)
?? false;
}
/// <summary>
/// Gets or sets the default environment name applied to all spans.
/// </summary>
/// <seealso cref="ConfigurationKeys.Environment"/>
public string Environment { get; set; }
/// <summary>
/// Gets or sets the service name applied to top-level spans and used to build derived service names.
/// </summary>
/// <seealso cref="ConfigurationKeys.ServiceName"/>
public string ServiceName { get; set; }
/// <summary>
/// Gets or sets the version tag applied to all spans.
/// </summary>
/// <seealso cref="ConfigurationKeys.ServiceVersion"/>
public string ServiceVersion { get; set; }
/// <summary>
/// Gets or sets a value indicating whether tracing is enabled.
/// Default is <c>true</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.TraceEnabled"/>
public bool TraceEnabled { get; set; }
/// <summary>
/// Gets or sets the names of disabled integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.DisabledIntegrations"/>
public HashSet<string> DisabledIntegrationNames { get; set; }
/// <summary>
/// Gets or sets the transport settings that dictate how the tracer connects to the agent.
/// </summary>
public ExporterSettings Exporter { get; set; }
/// <summary>
/// Gets or sets a value indicating whether default Analytics are enabled.
/// Settings this value is a shortcut for setting
/// <see cref="Configuration.IntegrationSettings.AnalyticsEnabled"/> on some predetermined integrations.
/// See the documentation for more details.
/// </summary>
/// <seealso cref="ConfigurationKeys.GlobalAnalyticsEnabled"/>
[Obsolete(DeprecationMessages.AppAnalytics)]
public bool AnalyticsEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether correlation identifiers are
/// automatically injected into the logging context.
/// Default is <c>false</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.LogsInjectionEnabled"/>
public bool LogsInjectionEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating the maximum number of traces set to AutoKeep (p1) per second.
/// Default is <c>100</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.MaxTracesSubmittedPerSecond"/>
public int MaxTracesSubmittedPerSecond { get; set; }
/// <summary>
/// Gets or sets a value indicating custom sampling rules.
/// </summary>
/// <seealso cref="ConfigurationKeys.CustomSamplingRules"/>
public string CustomSamplingRules { get; set; }
/// <summary>
/// Gets or sets a value indicating a global rate for sampling.
/// </summary>
/// <seealso cref="ConfigurationKeys.GlobalSamplingRate"/>
public double? GlobalSamplingRate { get; set; }
/// <summary>
/// Gets a collection of <see cref="Integrations"/> keyed by integration name.
/// </summary>
public IntegrationSettingsCollection Integrations { get; }
/// <summary>
/// Gets or sets the global tags, which are applied to all <see cref="Span"/>s.
/// </summary>
public IDictionary<string, string> GlobalTags { get; set; }
/// <summary>
/// Gets or sets the map of header keys to tag names, which are applied to the root <see cref="Span"/> of incoming requests.
/// </summary>
public IDictionary<string, string> HeaderTags { get; set; }
/// <summary>
/// Gets or sets a value indicating whether internal metrics
/// are enabled and sent to DogStatsd.
/// </summary>
public bool TracerMetricsEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether the use
/// of System.Diagnostics.DiagnosticSource is enabled.
/// Default is <c>true</c>.
/// </summary>
/// <remark>
/// This value cannot be set in code. Instead,
/// set it using the <c>DD_TRACE_DIAGNOSTIC_SOURCE_ENABLED</c>
/// environment variable or in configuration files.
/// </remark>
public bool DiagnosticSourceEnabled
{
get => GlobalSettings.Source.DiagnosticSourceEnabled;
set { }
}
/// <summary>
/// Gets or sets a value indicating whether a span context should be created on exiting a successful Kafka
/// Consumer.Consume() call, and closed on entering Consumer.Consume().
/// </summary>
/// <seealso cref="ConfigurationKeys.KafkaCreateConsumerScopeEnabled"/>
public bool KafkaCreateConsumerScopeEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to enable the updated WCF instrumentation that delays execution
/// until later in the WCF pipeline when the WCF server exception handling is established.
/// </summary>
internal bool DelayWcfInstrumentationEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether the diagnostic log at startup is enabled
/// </summary>
public bool StartupDiagnosticLogEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether runtime metrics
/// are enabled and sent to DogStatsd.
/// </summary>
internal bool RuntimeMetricsEnabled { get; set; }
/// <summary>
/// Gets or sets the comma separated list of url patterns to skip tracing.
/// </summary>
/// <seealso cref="ConfigurationKeys.HttpClientExcludedUrlSubstrings"/>
internal string[] HttpClientExcludedUrlSubstrings { get; set; }
/// <summary>
/// Gets or sets the HTTP status code that should be marked as errors for server integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.HttpServerErrorStatusCodes"/>
internal bool[] HttpServerErrorStatusCodes { get; set; }
/// <summary>
/// Gets or sets the HTTP status code that should be marked as errors for client integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.HttpClientErrorStatusCodes"/>
internal bool[] HttpClientErrorStatusCodes { get; set; }
/// <summary>
/// Gets configuration values for changing service names based on configuration
/// </summary>
internal ServiceNames ServiceNameMappings { get; }
/// <summary>
/// Gets or sets a value indicating the size in bytes of the trace buffer
/// </summary>
internal int TraceBufferSize { get; set; }
/// <summary>
/// Gets or sets a value indicating the batch interval for the serialization queue, in milliseconds
/// </summary>
internal int TraceBatchInterval { get; set; }
/// <summary>
/// Gets a value indicating whether the feature flag to enable the updated ASP.NET resource names is enabled
/// </summary>
/// <seealso cref="ConfigurationKeys.FeatureFlags.RouteTemplateResourceNamesEnabled"/>
internal bool RouteTemplateResourceNamesEnabled { get; }
/// <summary>
/// Create a <see cref="TracerSettings"/> populated from the default sources
/// returned by <see cref="CreateDefaultConfigurationSource"/>.
/// </summary>
/// <returns>A <see cref="TracerSettings"/> populated from the default sources.</returns>
public static TracerSettings FromDefaultSources()
{
var source = CreateDefaultConfigurationSource();
return new TracerSettings(source);
}
/// <summary>
/// Creates a <see cref="IConfigurationSource"/> by combining environment variables,
/// AppSettings where available, and a local datadog.json file, if present.
/// </summary>
/// <returns>A new <see cref="IConfigurationSource"/> instance.</returns>
public static CompositeConfigurationSource CreateDefaultConfigurationSource()
{
return GlobalSettings.CreateDefaultConfigurationSource();
}
/// <summary>
/// Sets the HTTP status code that should be marked as errors for client integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.HttpClientErrorStatusCodes"/>
/// <param name="statusCodes">Status codes that should be marked as errors</param>
public void SetHttpClientErrorStatusCodes(IEnumerable<int> statusCodes)
{
HttpClientErrorStatusCodes = ParseHttpCodesToArray(string.Join(",", statusCodes));
}
/// <summary>
/// Sets the HTTP status code that should be marked as errors for server integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.HttpServerErrorStatusCodes"/>
/// <param name="statusCodes">Status codes that should be marked as errors</param>
public void SetHttpServerErrorStatusCodes(IEnumerable<int> statusCodes)
{
HttpServerErrorStatusCodes = ParseHttpCodesToArray(string.Join(",", statusCodes));
}
/// <summary>
/// Sets the mappings to use for service names within a <see cref="Span"/>
/// </summary>
/// <param name="mappings">Mappings to use from original service name (e.g. <code>sql-server</code> or <code>graphql</code>)
/// as the <see cref="KeyValuePair{TKey, TValue}.Key"/>) to replacement service names as <see cref="KeyValuePair{TKey, TValue}.Value"/>).</param>
public void SetServiceNameMappings(IEnumerable<KeyValuePair<string, string>> mappings)
{
ServiceNameMappings.SetServiceNameMappings(mappings);
}
/// <summary>
/// Create an instance of <see cref="ImmutableTracerSettings"/> that can be used to build a <see cref="Tracer"/>
/// </summary>
/// <returns>The <see cref="ImmutableTracerSettings"/> that can be passed to a <see cref="Tracer"/> instance</returns>
public ImmutableTracerSettings Build()
{
return new ImmutableTracerSettings(this);
}
private static IDictionary<string, string> InitializeHeaderTags(IDictionary<string, string> configurationDictionary, bool headerTagsNormalizationFixEnabled)
{
var headerTags = new Dictionary<string, string>();
foreach (var kvp in configurationDictionary)
{
var headerName = kvp.Key;
var providedTagName = kvp.Value;
if (string.IsNullOrWhiteSpace(headerName))
{
continue;
}
// The user has not provided a tag name. The normalization will happen later, when adding the prefix.
if (string.IsNullOrEmpty(providedTagName))
{
headerTags.Add(headerName.Trim(), string.Empty);
}
else if (headerTagsNormalizationFixEnabled && providedTagName.TryConvertToNormalizedTagName(normalizePeriods: false, out var normalizedTagName))
{
// If the user has provided a tag name, then we don't normalize periods in the provided tag name
headerTags.Add(headerName.Trim(), normalizedTagName);
}
else if (!headerTagsNormalizationFixEnabled && providedTagName.TryConvertToNormalizedTagName(normalizePeriods: true, out var normalizedTagNameNoPeriods))
{
// Back to the previous behaviour if the flag is set
headerTags.Add(headerName.Trim(), normalizedTagNameNoPeriods);
}
}
return headerTags;
}
// internal for testing
internal static IEnumerable<string> TrimSplitString(string textValues, char separator)
{
var values = textValues.Split(separator);
for (var i = 0; i < values.Length; i++)
{
if (!string.IsNullOrWhiteSpace(values[i]))
{
yield return values[i].Trim();
}
}
}
internal static bool[] ParseHttpCodesToArray(string httpStatusErrorCodes)
{
bool[] httpErrorCodesArray = new bool[600];
void TrySetValue(int index)
{
if (index >= 0 && index < httpErrorCodesArray.Length)
{
httpErrorCodesArray[index] = true;
}
}
string[] configurationsArray = httpStatusErrorCodes.Replace(" ", string.Empty).Split(',');
foreach (string statusConfiguration in configurationsArray)
{
int startStatus;
// Checks that the value about to be used follows the `401-404` structure or single 3 digit number i.e. `401` else log the warning
if (!Regex.IsMatch(statusConfiguration, @"^\d{3}-\d{3}$|^\d{3}$"))
{
Log.Warning("Wrong format '{0}' for DD_HTTP_SERVER/CLIENT_ERROR_STATUSES configuration.", statusConfiguration);
}
// If statusConfiguration equals a single value i.e. `401` parse the value and save to the array
else if (int.TryParse(statusConfiguration, out startStatus))
{
TrySetValue(startStatus);
}
else
{
string[] statusCodeLimitsRange = statusConfiguration.Split('-');
startStatus = int.Parse(statusCodeLimitsRange[0]);
int endStatus = int.Parse(statusCodeLimitsRange[1]);
if (endStatus < startStatus)
{
startStatus = endStatus;
endStatus = int.Parse(statusCodeLimitsRange[0]);
}
for (int statusCode = startStatus; statusCode <= endStatus; statusCode++)
{
TrySetValue(statusCode);
}
}
}
return httpErrorCodesArray;
}
}
}
| 1 | 25,953 | There are several uses of the word "default" in here, perhaps it can be reworded slightly? | DataDog-dd-trace-dotnet | .cs |
@@ -50,7 +50,7 @@ class MediaType extends AbstractType
'new_on_update' => $options['new_on_update'],
)));
- $builder->addEventListener(FormEvents::BIND, function (FormEvent $event) {
+ $builder->addEventListener(FormEvents::SUBMIT, function(FormEvent $event) {
if ($event->getForm()->has('unlink') && $event->getForm()->get('unlink')->getData()) {
$event->setData(null);
} | 1 | <?php
/*
* This file is part of the Sonata project.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Form\Type;
use Sonata\MediaBundle\Form\DataTransformer\ProviderDataTransformer;
use Sonata\MediaBundle\Provider\Pool;
use Symfony\Component\Form\AbstractType;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormEvent;
use Symfony\Component\Form\FormEvents;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\Form\FormView;
use Symfony\Component\OptionsResolver\OptionsResolver;
use Symfony\Component\OptionsResolver\OptionsResolverInterface;
class MediaType extends AbstractType
{
protected $pool;
protected $class;
/**
* @param Pool $pool
* @param string $class
*/
public function __construct(Pool $pool, $class)
{
$this->pool = $pool;
$this->class = $class;
}
/**
* {@inheritdoc}
*/
public function buildForm(FormBuilderInterface $builder, array $options)
{
$builder->addModelTransformer(new ProviderDataTransformer($this->pool, $this->class, array(
'provider' => $options['provider'],
'context' => $options['context'],
'empty_on_new' => $options['empty_on_new'],
'new_on_update' => $options['new_on_update'],
)));
$builder->addEventListener(FormEvents::BIND, function (FormEvent $event) {
if ($event->getForm()->has('unlink') && $event->getForm()->get('unlink')->getData()) {
$event->setData(null);
}
});
$this->pool->getProvider($options['provider'])->buildMediaType($builder);
$builder->add('unlink', 'checkbox', array(
'mapped' => false,
'data' => false,
'required' => false,
));
}
/**
* {@inheritdoc}
*/
public function buildView(FormView $view, FormInterface $form, array $options)
{
$view->vars['provider'] = $options['provider'];
$view->vars['context'] = $options['context'];
}
/**
* {@inheritdoc}
*
* @deprecated Remove it when bumping requirements to Symfony >=2.7
*/
public function setDefaultOptions(OptionsResolverInterface $resolver)
{
$this->configureOptions($resolver);
}
/**
* {@inheritdoc}
*/
public function configureOptions(OptionsResolver $resolver)
{
$resolver->setDefaults(array(
'data_class' => $this->class,
'provider' => null,
'context' => null,
'empty_on_new' => true,
'new_on_update' => true,
));
}
/**
* {@inheritdoc}
*/
public function getParent()
{
return 'form';
}
/**
* {@inheritdoc}
*/
public function getName()
{
return 'sonata_media_type';
}
}
| 1 | 6,735 | `function (` is correct. A space must be there. Please run `make cs` to have correct coding style. | sonata-project-SonataMediaBundle | php |
@@ -18,7 +18,7 @@ function pageNoDuplicateEvaluate(node, options, virtualNode) {
cache.set(key, true);
let elms = querySelectorAllFilter(axe._tree[0], options.selector, elm =>
- isVisible(elm.actualNode)
+ isVisible(elm.actualNode, true)
);
// Filter elements that, within certain contexts, don't map their role. | 1 | import cache from '../../core/base/cache';
import { querySelectorAllFilter } from '../../core/utils';
import { isVisible, findUpVirtual } from '../../commons/dom';
function pageNoDuplicateEvaluate(node, options, virtualNode) {
if (!options || !options.selector || typeof options.selector !== 'string') {
throw new TypeError(
'page-no-duplicate requires options.selector to be a string'
);
}
// only look at the first node and it's related nodes
const key = 'page-no-duplicate;' + options.selector;
if (cache.get(key)) {
this.data('ignored');
return;
}
cache.set(key, true);
let elms = querySelectorAllFilter(axe._tree[0], options.selector, elm =>
isVisible(elm.actualNode)
);
// Filter elements that, within certain contexts, don't map their role.
// e.g. a <footer> inside a <main> is not a banner, but in the <body> context it is
if (typeof options.nativeScopeFilter === 'string') {
elms = elms.filter(elm => {
return (
elm.actualNode.hasAttribute('role') ||
!findUpVirtual(elm, options.nativeScopeFilter)
);
});
}
this.relatedNodes(
elms.filter(elm => elm !== virtualNode).map(elm => elm.actualNode)
);
return elms.length <= 1;
}
export default pageNoDuplicateEvaluate;
| 1 | 16,753 | I think this is now going to fail the scenario where there is a `main` with aria-hidden="true". I don't think that should fail. | dequelabs-axe-core | js |
@@ -83,7 +83,7 @@ func NewSublist() *Sublist {
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
// copy the subject since we hold this and this might be part of a large byte slice.
- subject := string(append([]byte(nil), sub.subject...))
+ subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0 | 1 | // Copyright 2016 Apcera Inc. All rights reserved.
// Package sublist is a routing mechanism to handle subject distribution
// and provides a facility to match subjects from published messages to
// interested subscribers. Subscribers can have wildcard subjects to match
// multiple published subjects.
package server
import (
"bytes"
"errors"
"strings"
"sync"
"sync/atomic"
)
// Common byte variables for wildcards and token separator.
const (
pwc = '*'
fwc = '>'
tsep = "."
btsep = '.'
)
// Sublist related errors
var (
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
ErrNotFound = errors.New("sublist: No Matches Found")
)
// cacheMax is used to bound limit the frontend cache
const slCacheMax = 1024
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs [][]*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
genid uint64
matches uint64
cacheHits uint64
inserts uint64
removes uint64
cache map[string]*SublistResult
root *level
count uint32
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs [][]*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
pwc, fwc *node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algortihm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
// copy the subject since we hold this and this might be part of a large byte slice.
subject := string(append([]byte(nil), sub.subject...))
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
sfwc := false
l := s.root
var n *node
for _, t := range tokens {
if len(t) == 0 || sfwc {
s.Unlock()
return ErrInvalidSubject
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n == nil {
n = newNode()
switch t[0] {
case pwc:
l.pwc = n
case fwc:
l.fwc = n
default:
l.nodes[t] = n
}
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue == nil {
n.psubs = append(n.psubs, sub)
} else {
// This is a queue subscription
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
n.qsubs[i] = append(n.qsubs[i], sub)
} else {
n.qsubs = append(n.qsubs, []*subscription{sub})
}
}
s.count++
s.inserts++
s.addToCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
s.Unlock()
return nil
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
for _, qr := range r.qsubs {
nqr := append([]*subscription(nil), qr...)
nr.qsubs = append(nr.qsubs, nqr)
}
return nr
}
// addToCache will add the new entry to existing cache
// entries if needed. Assumes write lock is held.
func (s *Sublist) addToCache(subject string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == nil {
nr.psubs = append(nr.psubs, sub)
} else {
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
nr.qsubs[i] = append(nr.qsubs[i], sub)
} else {
nr.qsubs = append(nr.qsubs, []*subscription{sub})
}
}
s.cache[k] = nr
}
}
}
// removeFromCache will remove the sub from any active cache entries.
// Assumes write lock is held.
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
for k, _ := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
// Match will match all entries to the literal subject.
// It will return a set of results for both normal and queue subscribers.
func (s *Sublist) Match(subject string) *SublistResult {
s.RLock()
atomic.AddUint64(&s.matches, 1)
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
atomic.AddUint64(&s.cacheHits, 1)
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
result := &SublistResult{}
s.Lock()
matchLevel(s.root, tokens, result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k, _ := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
for _, qr := range n.qsubs {
if len(qr) == 0 {
continue
}
// Need to find matching list in results
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
results.qsubs[i] = append(results.qsubs[i], qr...)
} else {
results.qsubs = append(results.qsubs, qr)
}
}
}
// We do not use a map here since we want iteration to be past when
// processing publishes in L1 on client. So we need to walk sequentially
// for now. Keep an eye on this in case we start getting large number of
// different queue subscribers for the same subject.
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
if sub.queue == nil {
return -1
}
for i, qr := range qsl {
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
return i
}
}
return -1
}
// matchLevel is used to recursively descend into the trie.
func matchLevel(l *level, toks []string, results *SublistResult) {
var pwc, n *node
for i, t := range toks {
if l == nil {
return
}
if l.fwc != nil {
addNodeToResults(l.fwc, results)
}
if pwc = l.pwc; pwc != nil {
matchLevel(pwc.next, toks[i+1:], results)
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if pwc != nil {
addNodeToResults(pwc, results)
}
}
// lnt is used to track descent into levels for a removal for pruning.
type lnt struct {
l *level
n *node
t string
}
// Remove will remove a subscription.
func (s *Sublist) Remove(sub *subscription) error {
subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
if len(t) == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return ErrNotFound
}
s.count--
s.removes++
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
return nil
}
// pruneNode is used to prune an empty node from the tree.
func (l *level) pruneNode(n *node, t string) {
if n == nil {
return
}
if n == l.fwc {
l.fwc = nil
} else if n == l.pwc {
l.pwc = nil
} else {
delete(l.nodes, t)
}
}
// isEmpty will test if the node has any entries. Used
// in pruning.
func (n *node) isEmpty() bool {
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
if n.next == nil || n.next.numNodes() == 0 {
return true
}
}
return false
}
// Return the number of nodes for the given level.
func (l *level) numNodes() int {
num := len(l.nodes)
if l.pwc != nil {
num++
}
if l.fwc != nil {
num++
}
return num
}
// Removes a sub from a list.
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Remove the sub for the given node.
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue == nil {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
// We have a queue group subscription here
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
n.qsubs[i], found = removeSubFromList(sub, n.qsubs[i])
if len(n.qsubs[i]) == 0 {
last := len(n.qsubs) - 1
n.qsubs[i] = n.qsubs[last]
n.qsubs[last] = nil
n.qsubs = n.qsubs[:last]
if len(n.qsubs) == 0 {
n.qsubs = nil
}
}
return found
}
return false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}
// Count returns the number of subscriptions.
func (s *Sublist) Count() uint32 {
s.RLock()
defer s.RUnlock()
return s.count
}
// CacheCount returns the number of result sets in the cache.
func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
}
// Public stats for the sublist
type SublistStats struct {
NumSubs uint32 `json:"num_subscriptions"`
NumCache uint32 `json:"num_cache"`
NumInserts uint64 `json:"num_inserts"`
NumRemoves uint64 `json:"num_removes"`
NumMatches uint64 `json:"num_matches"`
CacheHitRate float64 `json:"cache_hit_rate"`
MaxFanout uint32 `json:"max_fanout"`
AvgFanout float64 `json:"avg_fanout"`
}
// Stats will return a stats structure for the current state.
func (s *Sublist) Stats() *SublistStats {
s.Lock()
defer s.Unlock()
st := &SublistStats{}
st.NumSubs = s.count
st.NumCache = uint32(len(s.cache))
st.NumInserts = s.inserts
st.NumRemoves = s.removes
st.NumMatches = s.matches
if s.matches > 0 {
st.CacheHitRate = float64(s.cacheHits) / float64(s.matches)
}
// whip through cache for fanout stats
tot, max := 0, 0
for _, r := range s.cache {
l := len(r.psubs) + len(r.qsubs)
tot += l
if l > max {
max = l
}
}
st.MaxFanout = uint32(max)
if tot > 0 {
st.AvgFanout = float64(tot) / float64(len(s.cache))
}
return st
}
// numLevels will return the maximum number of levels
// contained in the Sublist tree.
func (s *Sublist) numLevels() int {
return visitLevel(s.root, 0)
}
// visitLevel is used to descend the Sublist tree structure
// recursively.
func visitLevel(l *level, depth int) int {
if l == nil || l.numNodes() == 0 {
return depth
}
depth++
maxDepth := depth
for _, n := range l.nodes {
if n == nil {
continue
}
newDepth := visitLevel(n.next, depth)
if newDepth > maxDepth {
maxDepth = newDepth
}
}
if l.pwc != nil {
pwcDepth := visitLevel(l.pwc.next, depth)
if pwcDepth > maxDepth {
maxDepth = pwcDepth
}
}
if l.fwc != nil {
fwcDepth := visitLevel(l.fwc.next, depth)
if fwcDepth > maxDepth {
maxDepth = fwcDepth
}
}
return maxDepth
}
// IsValidLiteralSubject returns true if a subject is valid, false otherwise
func IsValidLiteralSubject(subject string) bool {
tokens := strings.Split(string(subject), tsep)
for _, t := range tokens {
if len(t) == 0 {
return false
}
if len(t) > 1 {
continue
}
switch t[0] {
case pwc, fwc:
return false
}
}
return true
}
// matchLiteral is used to test literal subjects, those that do not have any
// wildcards, with a target subject. This is used in the cache layer.
func matchLiteral(literal, subject string) bool {
li := 0
ll := len(literal)
for i := 0; i < len(subject); i++ {
if li >= ll {
return false
}
b := subject[i]
switch b {
case pwc:
// Skip token in literal
ll := len(literal)
for {
if li >= ll || literal[li] == btsep {
li--
break
}
li++
}
case fwc:
return true
default:
if b != literal[li] {
return false
}
}
li++
}
// Make sure we have processed all of the literal's chars..
if li < ll {
return false
}
return true
}
| 1 | 6,363 | Is this specifically that if we capture the cast in another variable, even if it passes escape analysis, that the runtime will copy? | nats-io-nats-server | go |
@@ -108,6 +108,10 @@ type EKSConfig struct {
// Fargate controls the configuration of the AWS IAM role for
// used by EKS managed machine pools.
Fargate *AWSIAMRoleSpec `json:"fargate,omitempty"`
+ // KMSAliasPrefix is prefix to use to restrict permission to KMS keys to only those that have an alias
+ // name that is prefixed by this.
+ // Defaults to cluster-api-provider-aws-*
+ KMSAliasPrefix string `json:"kmsAliasPrefix,omitempty"`
}
// EventBridgeConfig represents configuration for enabling experimental feature to consume | 1 | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
iamv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1alpha1"
)
// BootstrapUser contains a list of elements that is specific
// to the configuration and enablement of an IAM user.
type BootstrapUser struct {
// Enable controls whether or not a bootstrap AWS IAM user will be created.
// This can be used to scope down the initial credentials used to bootstrap the
// cluster.
// Defaults to false.
Enable bool `json:"enable"`
// UserName controls the username of the bootstrap user. Defaults to
// "bootstrapper.cluster-api-provider-aws.sigs.k8s.io"
UserName string `json:"userName,omitempty"`
// GroupName controls the group the user will belong to. Defaults to
// "bootstrapper.cluster-api-provider-aws.sigs.k8s.io"
GroupName string `json:"groupName,omitempty"`
// ExtraPolicyAttachments is a list of additional policies to be attached to the IAM user.
ExtraPolicyAttachments []string `json:"extraPolicyAttachments,omitempty"`
// ExtraGroups is a list of groups to add this user to.
ExtraGroups []string `json:"extraGroups,omitempty"`
// ExtraStatements are additional AWS IAM policy document statements to be included inline for the user.
ExtraStatements []iamv1.StatementEntry `json:"extraStatements,omitempty"`
// Tags is a map of tags to be applied to the AWS IAM user.
Tags infrav1.Tags `json:"tags,omitempty"`
}
// ControlPlane controls the configuration of the AWS IAM role for
// the control plane of provisioned Kubernetes clusters.
type ControlPlane struct {
AWSIAMRoleSpec `json:",inline"`
// DisableClusterAPIControllerPolicyAttachment, if set to true, will not attach the AWS IAM policy for Cluster
// API Provider AWS to the control plane role. Defaults to false.
DisableClusterAPIControllerPolicyAttachment bool `json:"disableClusterAPIControllerPolicyAttachment,omitempty"`
// DisableCloudProviderPolicy if set to true, will not generate and attach the AWS IAM policy for the AWS Cloud Provider.
DisableCloudProviderPolicy bool `json:"disableCloudProviderPolicy"`
// EnableCSIPolicy if set to true, will generate and attach the AWS IAM policy for the EBS CSI Driver.
EnableCSIPolicy bool `json:"enableCSIPolicy"`
}
// AWSIAMRoleSpec defines common configuration for AWS IAM roles created by
// Kubernetes Cluster API Provider AWS
type AWSIAMRoleSpec struct {
// Disable if set to true will not create the AWS IAM role. Defaults to false.
Disable bool `json:"disable"` // default: false
// ExtraPolicyAttachments is a list of additional policies to be attached to the IAM role.
ExtraPolicyAttachments []string `json:"extraPolicyAttachments,omitempty"`
// ExtraStatements are additional IAM statements to be included inline for the role.
ExtraStatements []iamv1.StatementEntry `json:"extraStatements,omitempty"`
// TrustStatements is an IAM PolicyDocument defining what identities are allowed to assume this role.
// See "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1alpha1" for more documentation.
TrustStatements []iamv1.StatementEntry `json:"trustStatements,omitempty"`
// Tags is a map of tags to be applied to the AWS IAM role.
Tags infrav1.Tags `json:"tags,omitempty"`
}
// EKSConfig represents the EKS related configuration config
type EKSConfig struct {
// Enable controls whether EKS-related permissions are granted
Enable bool `json:"enable"`
// AllowIAMRoleCreation controls whether the EKS controllers have permissions for creating IAM
// roles per cluster
AllowIAMRoleCreation bool `json:"iamRoleCreation,omitempty"`
// DefaultControlPlaneRole controls the configuration of the AWS IAM role for
// the EKS control plane. This is the default role that will be used if
// no role is included in the spec and automatic creation of the role
// isn't enabled
DefaultControlPlaneRole AWSIAMRoleSpec `json:"defaultControlPlaneRole,omitempty"`
// ManagedMachinePool controls the configuration of the AWS IAM role for
// used by EKS managed machine pools.
ManagedMachinePool *AWSIAMRoleSpec `json:"managedMachinePool,omitempty"`
// Fargate controls the configuration of the AWS IAM role for
// used by EKS managed machine pools.
Fargate *AWSIAMRoleSpec `json:"fargate,omitempty"`
}
// EventBridgeConfig represents configuration for enabling experimental feature to consume
// EventBridge EC2 events
type EventBridgeConfig struct {
// Enable controls whether permissions are granted to consume EC2 events
Enable bool `json:"enable,omitempty"`
}
// ClusterAPIControllers controls the configuration of the AWS IAM role for
// the Kubernetes Cluster API Provider AWS controller.
type ClusterAPIControllers struct {
AWSIAMRoleSpec `json:",inline"`
// AllowedEC2InstanceProfiles controls which EC2 roles are allowed to be
// consumed by Cluster API when creating an ec2 instance. Defaults to
// *.<suffix>, where suffix is defaulted to .cluster-api-provider-aws.sigs.k8s.io
AllowedEC2InstanceProfiles []string `json:"allowedEC2InstanceProfiles,omitempty"`
}
// Nodes controls the configuration of the AWS IAM role for worker nodes
// in a cluster created by Kubernetes Cluster API Provider AWS.
type Nodes struct {
AWSIAMRoleSpec `json:",inline"`
// DisableCloudProviderPolicy if set to true, will not generate and attach the policy for the AWS Cloud Provider.
// Defaults to false.
DisableCloudProviderPolicy bool `json:"disableCloudProviderPolicy"`
// EC2ContainerRegistryReadOnly controls whether the node has read-only access to the
// EC2 container registry
EC2ContainerRegistryReadOnly bool `json:"ec2ContainerRegistryReadOnly"`
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AWSIAMConfiguration controls the creation of AWS Identity and Access Management (IAM) resources for use
// by Kubernetes clusters and Kubernetes Cluster API Provider AWS.
type AWSIAMConfiguration struct {
metav1.TypeMeta `json:",inline"`
Spec AWSIAMConfigurationSpec `json:"spec,omitempty"`
}
// AWSIAMConfigurationSpec defines the specification of the AWSIAMConfiguration.
type AWSIAMConfigurationSpec struct {
// NamePrefix will be prepended to every AWS IAM role, user and policy created by clusterawsadm. Defaults to "".
NamePrefix string `json:"namePrefix,omitempty"`
// NameSuffix will be appended to every AWS IAM role, user and policy created by clusterawsadm. Defaults to
// ".cluster-api-provider-aws.sigs.k8s.io".
NameSuffix *string `json:"nameSuffix,omitempty"`
// ControlPlane controls the configuration of the AWS IAM role for a Kubernetes cluster's control plane nodes.
ControlPlane ControlPlane `json:"controlPlane,omitempty"`
// ClusterAPIControllers controls the configuration of an IAM role and policy specifically for Kubernetes Cluster API Provider AWS.
ClusterAPIControllers ClusterAPIControllers `json:"clusterAPIControllers,omitempty"`
// Nodes controls the configuration of the AWS IAM role for all nodes in a Kubernetes cluster.
Nodes Nodes `json:"nodes,omitempty"`
// BootstrapUser contains a list of elements that is specific
// to the configuration and enablement of an IAM user.
BootstrapUser BootstrapUser `json:"bootstrapUser,omitempty"`
// StackName defines the name of the AWS CloudFormation stack.
StackName string `json:"stackName,omitempty"`
// Region controls which region the control-plane is created in if not specified on the command line or
// via environment variables.
Region string `json:"region,omitempty"`
// EKS controls the configuration related to EKS. Settings in here affect the control plane
// and nodes roles
EKS *EKSConfig `json:"eks,omitempty"`
// EventBridge controls configuration for consuming EventBridge events
EventBridge *EventBridgeConfig `json:"eventBridge,omitempty"`
// Partition is the AWS security partition being used. Defaults to "aws"
Partition string `json:"partition,omitempty"`
// SecureSecretsBackend, when set to parameter-store will create AWS Systems Manager
// Parameter Storage policies. By default or with the value of secrets-manager,
// will generate AWS Secrets Manager policies instead.
// +kubebuilder:validation:Enum=secrets-manager;ssm-parameter-store
SecureSecretsBackends []infrav1.SecretBackend `json:"secureSecretBackends,omitempty"`
}
func (obj *AWSIAMConfiguration) GetObjectKind() schema.ObjectKind {
return &obj.TypeMeta
}
func NewAWSIAMConfiguration() *AWSIAMConfiguration {
conf := &AWSIAMConfiguration{}
SetObjectDefaults_AWSIAMConfiguration(conf)
return conf
}
| 1 | 19,042 | Should this be optional for existing configs to work? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -119,12 +119,14 @@ func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount uint64) er
}
return ErrUnknownBeneficary
}
- err = s.chequebook.Issue(ctx, beneficiary, big.NewInt(int64(amount)), func(signedCheque *chequebook.SignedCheque) error {
+ balance, err := s.chequebook.Issue(ctx, beneficiary, big.NewInt(int64(amount)), func(signedCheque *chequebook.SignedCheque) error {
return s.proto.EmitCheque(ctx, peer, signedCheque)
})
if err != nil {
return err
}
+
+ s.metrics.AvailableBalance.Set(float64(balance.Int64()))
s.metrics.TotalSent.Add(float64(amount))
return nil
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package swap
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/swapprotocol"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
// ErrWrongChequebook is the error if a peer uses a different chequebook from before.
ErrWrongChequebook = errors.New("wrong chequebook")
// ErrWrongBeneficiary is the error if a peer uses a different beneficiary than expected.
ErrWrongBeneficiary = errors.New("wrong beneficiary")
// ErrUnknownBeneficary is the error if a peer has never announced a beneficiary.
ErrUnknownBeneficary = errors.New("unknown beneficiary for peer")
)
type ApiInterface interface {
// LastSentCheque returns the last sent cheque for the peer
LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
// LastSentCheques returns the list of last sent cheques for all peers
LastSentCheques() (map[string]*chequebook.SignedCheque, error)
// LastReceivedCheque returns the last received cheque for the peer
LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
// LastReceivedCheques returns the list of last received cheques for all peers
LastReceivedCheques() (map[string]*chequebook.SignedCheque, error)
// CashCheque sends a cashing transaction for the last cheque of the peer
CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error)
// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
}
// Service is the implementation of the swap settlement layer.
type Service struct {
proto swapprotocol.Interface
logger logging.Logger
store storage.StateStorer
notifyPaymentFunc settlement.NotifyPaymentFunc
metrics metrics
chequebook chequebook.Service
chequeStore chequebook.ChequeStore
cashout chequebook.CashoutService
p2pService p2p.Service
addressbook Addressbook
networkID uint64
}
// New creates a new swap Service.
func New(proto swapprotocol.Interface, logger logging.Logger, store storage.StateStorer, chequebook chequebook.Service, chequeStore chequebook.ChequeStore, addressbook Addressbook, networkID uint64, cashout chequebook.CashoutService, p2pService p2p.Service) *Service {
return &Service{
proto: proto,
logger: logger,
store: store,
metrics: newMetrics(),
chequebook: chequebook,
chequeStore: chequeStore,
addressbook: addressbook,
networkID: networkID,
cashout: cashout,
p2pService: p2pService,
}
}
// ReceiveCheque is called by the swap protocol if a cheque is received.
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque) (err error) {
// check this is the same chequebook for this peer as previously
expectedChequebook, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return err
}
if known && expectedChequebook != cheque.Chequebook {
return ErrWrongChequebook
}
amount, err := s.chequeStore.ReceiveCheque(ctx, cheque)
if err != nil {
s.metrics.ChequesRejected.Inc()
return fmt.Errorf("rejecting cheque: %w", err)
}
if !known {
err = s.addressbook.PutChequebook(peer, cheque.Chequebook)
if err != nil {
return err
}
}
s.metrics.TotalReceived.Add(float64(amount.Uint64()))
return s.notifyPaymentFunc(peer, amount.Uint64())
}
// Pay initiates a payment to the given peer
func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount uint64) error {
beneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return err
}
if !known {
s.logger.Warningf("disconnecting non-swap peer %v", peer)
err = s.p2pService.Disconnect(peer)
if err != nil {
return err
}
return ErrUnknownBeneficary
}
err = s.chequebook.Issue(ctx, beneficiary, big.NewInt(int64(amount)), func(signedCheque *chequebook.SignedCheque) error {
return s.proto.EmitCheque(ctx, peer, signedCheque)
})
if err != nil {
return err
}
s.metrics.TotalSent.Add(float64(amount))
return nil
}
// SetNotifyPaymentFunc sets the NotifyPaymentFunc to notify
func (s *Service) SetNotifyPaymentFunc(notifyPaymentFunc settlement.NotifyPaymentFunc) {
s.notifyPaymentFunc = notifyPaymentFunc
}
// TotalSent returns the total amount sent to a peer
func (s *Service) TotalSent(peer swarm.Address) (totalSent uint64, err error) {
beneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return 0, err
}
if !known {
return 0, settlement.ErrPeerNoSettlements
}
cheque, err := s.chequebook.LastCheque(beneficiary)
if err != nil {
if err == chequebook.ErrNoCheque {
return 0, settlement.ErrPeerNoSettlements
}
return 0, err
}
return cheque.CumulativePayout.Uint64(), nil
}
// TotalReceived returns the total amount received from a peer
func (s *Service) TotalReceived(peer swarm.Address) (totalReceived uint64, err error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return 0, err
}
if !known {
return 0, settlement.ErrPeerNoSettlements
}
cheque, err := s.chequeStore.LastCheque(chequebookAddress)
if err != nil {
if err == chequebook.ErrNoCheque {
return 0, settlement.ErrPeerNoSettlements
}
return 0, err
}
return cheque.CumulativePayout.Uint64(), nil
}
// SettlementsSent returns sent settlements for each individual known peer
func (s *Service) SettlementsSent() (map[string]uint64, error) {
result := make(map[string]uint64)
cheques, err := s.chequebook.LastCheques()
if err != nil {
return nil, err
}
for beneficiary, cheque := range cheques {
peer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
if err != nil {
return nil, err
}
if !known {
continue
}
result[peer.String()] = cheque.CumulativePayout.Uint64()
}
return result, nil
}
// SettlementsReceived returns received settlements for each individual known peer.
func (s *Service) SettlementsReceived() (map[string]uint64, error) {
result := make(map[string]uint64)
cheques, err := s.chequeStore.LastCheques()
if err != nil {
return nil, err
}
for chequebook, cheque := range cheques {
peer, known, err := s.addressbook.ChequebookPeer(chequebook)
if err != nil {
return nil, err
}
if !known {
continue
}
result[peer.String()] = cheque.CumulativePayout.Uint64()
}
return result, err
}
// Handshake is called by the swap protocol when a handshake is received.
func (s *Service) Handshake(peer swarm.Address, beneficiary common.Address) error {
// check that the overlay address was derived from the beneficiary (implying they have the same private key)
// while this is not strictly necessary for correct functionality we need to ensure no two peers use the same beneficiary
// as long as we enforce this we might not need the handshake message if the p2p layer exposed the overlay public key
expectedOverlay := crypto.NewOverlayFromEthereumAddress(beneficiary[:], s.networkID)
if !expectedOverlay.Equal(peer) {
return ErrWrongBeneficiary
}
storedBeneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return err
}
if !known {
s.logger.Tracef("initial swap handshake peer: %v beneficiary: %x", peer, beneficiary)
return s.addressbook.PutBeneficiary(peer, beneficiary)
}
if storedBeneficiary != beneficiary {
return ErrWrongBeneficiary
}
return nil
}
// LastSentCheque returns the last sent cheque for the peer
func (s *Service) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
common, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.chequebook.LastCheque(common)
}
// LastReceivedCheque returns the last received cheque for the peer
func (s *Service) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
common, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.chequeStore.LastCheque(common)
}
// LastSentCheques returns the list of last sent cheques for all peers
func (s *Service) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
lastcheques, err := s.chequebook.LastCheques()
if err != nil {
return nil, err
}
resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
for i, j := range lastcheques {
addr, known, err := s.addressbook.BeneficiaryPeer(i)
if err == nil && known {
resultmap[addr.String()] = j
}
}
return resultmap, nil
}
// LastReceivedCheques returns the list of last received cheques for all peers
func (s *Service) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
lastcheques, err := s.chequeStore.LastCheques()
if err != nil {
return nil, err
}
resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
for i, j := range lastcheques {
addr, known, err := s.addressbook.ChequebookPeer(i)
if err == nil && known {
resultmap[addr.String()] = j
}
}
return resultmap, nil
}
// CashCheque sends a cashing transaction for the last cheque of the peer
func (s *Service) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return common.Hash{}, err
}
if !known {
return common.Hash{}, chequebook.ErrNoCheque
}
return s.cashout.CashCheque(ctx, chequebookAddress, s.chequebook.Address())
}
// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.cashout.CashoutStatus(ctx, chequebookAddress)
}
| 1 | 13,721 | note that `balance` does not necessarily fit into `int64`, especially with the new bzz token. so you might want to avoid the `int64` conversion step using `big.NewFloat(0).SetInt(balance).Float64()`. | ethersphere-bee | go |
@@ -96,7 +96,12 @@ int main(int argc, const char *argv[]) {
flatbuffers::IDLOptions::kPhp,
"Generate PHP files for tables/structs",
flatbuffers::GeneralMakeRule },
- };
+ { flatbuffers::GenerateJsonSchema, "-S", "--jsonschema", "JsonSchema", true,
+ nullptr,
+ flatbuffers::IDLOptions::kJsonSchema,
+ "Generate Json schema",
+ flatbuffers::GeneralMakeRule },
+ };
flatbuffers::FlatCompiler::InitParams params;
params.generators = generators; | 1 | /*
* Copyright 2017 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flatbuffers/flatc.h"
static const char *g_program_name = nullptr;
static void Warn(const flatbuffers::FlatCompiler *flatc,
const std::string &warn,
bool show_exe_name) {
(void)flatc;
if (show_exe_name) {
printf("%s: ", g_program_name);
}
printf("warning: %s\n", warn.c_str());
}
static void Error(const flatbuffers::FlatCompiler *flatc,
const std::string &err,
bool usage,
bool show_exe_name) {
if (show_exe_name) {
printf("%s: ", g_program_name);
}
printf("error: %s\n", err.c_str());
if (usage) {
printf("%s", flatc->GetUsageString(g_program_name).c_str());
}
exit(1);
}
int main(int argc, const char *argv[]) {
g_program_name = argv[0];
const flatbuffers::FlatCompiler::Generator generators[] = {
{ flatbuffers::GenerateBinary, "-b", "--binary", "binary", false,
nullptr,
flatbuffers::IDLOptions::kBinary,
"Generate wire format binaries for any data definitions",
flatbuffers::BinaryMakeRule },
{ flatbuffers::GenerateTextFile, "-t", "--json", "text", false,
nullptr,
flatbuffers::IDLOptions::kJson,
"Generate text output for any data definitions",
flatbuffers::TextMakeRule },
{ flatbuffers::GenerateCPP, "-c", "--cpp", "C++", true,
flatbuffers::GenerateCppGRPC,
flatbuffers::IDLOptions::kCpp,
"Generate C++ headers for tables/structs",
flatbuffers::CPPMakeRule },
{ flatbuffers::GenerateGo, "-g", "--go", "Go", true,
flatbuffers::GenerateGoGRPC,
flatbuffers::IDLOptions::kGo,
"Generate Go files for tables/structs",
flatbuffers::GeneralMakeRule },
{ flatbuffers::GenerateGeneral, "-j", "--java", "Java", true,
nullptr,
flatbuffers::IDLOptions::kJava,
"Generate Java classes for tables/structs",
flatbuffers::GeneralMakeRule },
{ flatbuffers::GenerateJS, "-s", "--js", "JavaScript", true,
nullptr,
flatbuffers::IDLOptions::kJs,
"Generate JavaScript code for tables/structs",
flatbuffers::JSMakeRule },
{ flatbuffers::GenerateJS, "-T", "--ts", "TypeScript", true,
nullptr,
flatbuffers::IDLOptions::kTs,
"Generate TypeScript code for tables/structs",
flatbuffers::JSMakeRule },
{ flatbuffers::GenerateGeneral, "-n", "--csharp", "C#", true,
nullptr,
flatbuffers::IDLOptions::kCSharp,
"Generate C# classes for tables/structs",
flatbuffers::GeneralMakeRule },
{ flatbuffers::GeneratePython, "-p", "--python", "Python", true,
nullptr,
flatbuffers::IDLOptions::kPython,
"Generate Python files for tables/structs",
flatbuffers::GeneralMakeRule },
{ flatbuffers::GeneratePhp, nullptr, "--php", "PHP", true,
nullptr,
flatbuffers::IDLOptions::kPhp,
"Generate PHP files for tables/structs",
flatbuffers::GeneralMakeRule },
};
flatbuffers::FlatCompiler::InitParams params;
params.generators = generators;
params.num_generators = sizeof(generators) / sizeof(generators[0]);
params.warn_fn = Warn;
params.error_fn = Error;
flatbuffers::FlatCompiler flatc(params);
return flatc.Compile(argc - 1, argv + 1);
}
| 1 | 12,130 | lets leave out the `-S` ? | google-flatbuffers | java |
@@ -112,4 +112,9 @@ public class CommonRenderingUtil {
public static int toInt(String value) {
return Integer.valueOf(value);
}
+
+ /** Returns the lowercase version of the given text */
+ public static String lowercase(String value) {
+ return value.toLowerCase();
+ }
} | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** Utility class to process text in the templates. */
public class CommonRenderingUtil {
private static Pattern singleQuoteStringPattern = Pattern.compile("'([^\\\']*)'");
private static Pattern doubleQuoteStringPattern = Pattern.compile("\"([^\\\"]*)\"");
/** Strips the surrounding quotes from the given string */
public static String stripQuotes(String value) {
Matcher singleQuoteMatcher = singleQuoteStringPattern.matcher(value);
Matcher doubleQuoteMatcher = doubleQuoteStringPattern.matcher(value);
if (singleQuoteMatcher.matches()) {
value = singleQuoteMatcher.group(1);
} else if (doubleQuoteMatcher.matches()) {
value = doubleQuoteMatcher.group(1);
}
return value;
}
/** Returns the input text split on newlines. */
public static List<String> getDocLines(String text) {
// TODO: Convert markdown to language-specific doc format.
// https://github.com/googleapis/toolkit/issues/331
List<String> result = Splitter.on(String.format("%n")).splitToList(text);
return result.size() == 1 && result.get(0).isEmpty() ? ImmutableList.<String>of() : result;
}
/**
* Returns the input text split on newlines and maxWidth.
*
* <p>maxWidth includes the ending newline.
*/
public static List<String> getDocLines(String text, int maxWidth) {
maxWidth = maxWidth - 1;
List<String> lines = new ArrayList<>();
for (String line : text.trim().split("\n")) {
line = line.trim();
while (line.length() > maxWidth) {
int split = lineWrapIndex(line, maxWidth);
lines.add(line.substring(0, split).trim());
line = line.substring(split).trim();
}
if (!line.isEmpty()) {
lines.add(line);
}
}
return lines;
}
/** Returns the index on which to insert a newline given maxWidth. */
private static int lineWrapIndex(String line, int maxWidth) {
for (int i = maxWidth; i > 0; i--) {
if (isLineWrapChar(line.charAt(i))) {
return i;
}
}
for (int i = maxWidth + 1; i < line.length(); i++) {
if (isLineWrapChar(line.charAt(i))) {
return i;
}
}
return line.length();
}
/**
* Returns true if c is a character that should be wrapped on.
*
* <p>The set includes whitespace characters, '(', and '['.
*/
private static boolean isLineWrapChar(char c) {
return Character.isWhitespace(c) || "([".indexOf(c) >= 0;
}
/**
* Creates a whitespace string of the specified width.
*
* @param width number of spaces
* @return padding whitespace
*/
public static String padding(int width) {
return Strings.repeat(" ", width);
}
/**
* Helper function for referencing integers from templates.
*
* @param value value
* @return int value
*/
public static int toInt(String value) {
return Integer.valueOf(value);
}
}
| 1 | 26,073 | Is this method needed? | googleapis-gapic-generator | java |
@@ -127,6 +127,7 @@ void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
+ PoolingParameter pool_param = this->layer_param_.pooling_param();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
const int top_count = top[0]->count(); | 1 | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
using std::min;
using std::max;
template <typename Dtype>
void PoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
PoolingParameter pool_param = this->layer_param_.pooling_param();
if (pool_param.global_pooling()) {
CHECK(!(pool_param.has_kernel_size() ||
pool_param.has_kernel_h() || pool_param.has_kernel_w()))
<< "With Global_pooling: true Filter size cannot specified";
} else {
CHECK(!pool_param.has_kernel_size() !=
!(pool_param.has_kernel_h() && pool_param.has_kernel_w()))
<< "Filter size is kernel_size OR kernel_h and kernel_w; not both";
CHECK(pool_param.has_kernel_size() ||
(pool_param.has_kernel_h() && pool_param.has_kernel_w()))
<< "For non-square filters both kernel_h and kernel_w are required.";
}
CHECK((!pool_param.has_pad() && pool_param.has_pad_h()
&& pool_param.has_pad_w())
|| (!pool_param.has_pad_h() && !pool_param.has_pad_w()))
<< "pad is pad OR pad_h and pad_w are required.";
CHECK((!pool_param.has_stride() && pool_param.has_stride_h()
&& pool_param.has_stride_w())
|| (!pool_param.has_stride_h() && !pool_param.has_stride_w()))
<< "Stride is stride OR stride_h and stride_w are required.";
global_pooling_ = pool_param.global_pooling();
if (global_pooling_) {
kernel_h_ = bottom[0]->height();
kernel_w_ = bottom[0]->width();
} else {
if (pool_param.has_kernel_size()) {
kernel_h_ = kernel_w_ = pool_param.kernel_size();
} else {
kernel_h_ = pool_param.kernel_h();
kernel_w_ = pool_param.kernel_w();
}
}
CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero.";
CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero.";
if (!pool_param.has_pad_h()) {
pad_h_ = pad_w_ = pool_param.pad();
} else {
pad_h_ = pool_param.pad_h();
pad_w_ = pool_param.pad_w();
}
if (!pool_param.has_stride_h()) {
stride_h_ = stride_w_ = pool_param.stride();
} else {
stride_h_ = pool_param.stride_h();
stride_w_ = pool_param.stride_w();
}
if (global_pooling_) {
CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1)
<< "With Global_pooling: true; only pad = 0 and stride = 1";
}
if (pad_h_ != 0 || pad_w_ != 0) {
CHECK(this->layer_param_.pooling_param().pool()
== PoolingParameter_PoolMethod_AVE
|| this->layer_param_.pooling_param().pool()
== PoolingParameter_PoolMethod_MAX)
<< "Padding implemented only for average and max pooling.";
CHECK_LT(pad_h_, kernel_h_);
CHECK_LT(pad_w_, kernel_w_);
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
<< "corresponding to (num, channels, height, width)";
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
if (global_pooling_) {
kernel_h_ = bottom[0]->height();
kernel_w_ = bottom[0]->width();
}
pooled_height_ = static_cast<int>(ceil(static_cast<float>(
height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
pooled_width_ = static_cast<int>(ceil(static_cast<float>(
width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
if (pad_h_ || pad_w_) {
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) {
--pooled_height_;
}
if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) {
--pooled_width_;
}
CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_);
CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_);
}
top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
if (top.size() > 1) {
top[1]->ReshapeLike(*top[0]);
}
// If max pooling, we will initialize the vector index part.
if (this->layer_param_.pooling_param().pool() ==
PoolingParameter_PoolMethod_MAX && top.size() == 1) {
max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
}
// If stochastic pooling, we will initialize the random index part.
if (this->layer_param_.pooling_param().pool() ==
PoolingParameter_PoolMethod_STOCHASTIC) {
rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
}
}
// TODO(Yangqing): Is there a faster way to do pooling in the channel-first
// case?
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
const int top_count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL; // suppress warnings about uninitalized variables
Dtype* top_mask = NULL;
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more code.
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// Initialize
if (use_top_mask) {
top_mask = top[1]->mutable_cpu_data();
caffe_set(top_count, Dtype(-1), top_mask);
} else {
mask = max_idx_.mutable_cpu_data();
caffe_set(top_count, -1, mask);
}
caffe_set(top_count, Dtype(-FLT_MAX), top_data);
// The main loop
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
for (int pw = 0; pw < pooled_width_; ++pw) {
int hstart = ph * stride_h_ - pad_h_;
int wstart = pw * stride_w_ - pad_w_;
int hend = min(hstart + kernel_h_, height_);
int wend = min(wstart + kernel_w_, width_);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
const int pool_index = ph * pooled_width_ + pw;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
const int index = h * width_ + w;
if (bottom_data[index] > top_data[pool_index]) {
top_data[pool_index] = bottom_data[index];
if (use_top_mask) {
top_mask[pool_index] = static_cast<Dtype>(index);
} else {
mask[pool_index] = index;
}
}
}
}
}
}
// compute offset
bottom_data += bottom[0]->offset(0, 1);
top_data += top[0]->offset(0, 1);
if (use_top_mask) {
top_mask += top[0]->offset(0, 1);
} else {
mask += top[0]->offset(0, 1);
}
}
}
break;
case PoolingParameter_PoolMethod_AVE:
for (int i = 0; i < top_count; ++i) {
top_data[i] = 0;
}
// The main loop
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
for (int pw = 0; pw < pooled_width_; ++pw) {
int hstart = ph * stride_h_ - pad_h_;
int wstart = pw * stride_w_ - pad_w_;
int hend = min(hstart + kernel_h_, height_ + pad_h_);
int wend = min(wstart + kernel_w_, width_ + pad_w_);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height_);
wend = min(wend, width_);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
top_data[ph * pooled_width_ + pw] +=
bottom_data[h * width_ + w];
}
}
top_data[ph * pooled_width_ + pw] /= pool_size;
}
}
// compute offset
bottom_data += bottom[0]->offset(0, 1);
top_data += top[0]->offset(0, 1);
}
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more codes.
caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL; // suppress warnings about uninitialized variables
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// The main loop
if (use_top_mask) {
top_mask = top[1]->cpu_data();
} else {
mask = max_idx_.cpu_data();
}
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
for (int pw = 0; pw < pooled_width_; ++pw) {
const int index = ph * pooled_width_ + pw;
const int bottom_index =
use_top_mask ? top_mask[index] : mask[index];
bottom_diff[bottom_index] += top_diff[index];
}
}
bottom_diff += bottom[0]->offset(0, 1);
top_diff += top[0]->offset(0, 1);
if (use_top_mask) {
top_mask += top[0]->offset(0, 1);
} else {
mask += top[0]->offset(0, 1);
}
}
}
break;
case PoolingParameter_PoolMethod_AVE:
// The main loop
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
for (int pw = 0; pw < pooled_width_; ++pw) {
int hstart = ph * stride_h_ - pad_h_;
int wstart = pw * stride_w_ - pad_w_;
int hend = min(hstart + kernel_h_, height_ + pad_h_);
int wend = min(wstart + kernel_w_, width_ + pad_w_);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height_);
wend = min(wend, width_);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
bottom_diff[h * width_ + w] +=
top_diff[ph * pooled_width_ + pw] / pool_size;
}
}
}
}
// offset
bottom_diff += bottom[0]->offset(0, 1);
top_diff += top[0]->offset(0, 1);
}
}
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
}
#ifdef CPU_ONLY
STUB_GPU(PoolingLayer);
#endif
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
| 1 | 38,657 | How about, instead of reading the param every time, just add a class member variable to hold the value of the flag, and just set it once during `LayerSetUp`? | BVLC-caffe | cpp |
@@ -99,7 +99,7 @@ func (e *ECS) Service(clusterName, serviceName string) (*Service, error) {
return nil, fmt.Errorf("cannot find service %s", serviceName)
}
-// ServiceTasks calls ECS API and returns ECS tasks running by a service.
+// ServiceTasks calls ECS API and returns ECS tasks desired to be running by a service.
func (e *ECS) ServiceTasks(cluster, service string) ([]*Task, error) {
return e.listTasks(cluster, withService(service))
} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package ecs provides a client to make API requests to Amazon Elastic Container Service.
package ecs
import (
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/copilot-cli/internal/pkg/exec"
)
const clusterStatusActive = "ACTIVE"
type api interface {
DescribeClusters(input *ecs.DescribeClustersInput) (*ecs.DescribeClustersOutput, error)
DescribeServices(input *ecs.DescribeServicesInput) (*ecs.DescribeServicesOutput, error)
DescribeTasks(input *ecs.DescribeTasksInput) (*ecs.DescribeTasksOutput, error)
DescribeTaskDefinition(input *ecs.DescribeTaskDefinitionInput) (*ecs.DescribeTaskDefinitionOutput, error)
ExecuteCommand(input *ecs.ExecuteCommandInput) (*ecs.ExecuteCommandOutput, error)
ListTasks(input *ecs.ListTasksInput) (*ecs.ListTasksOutput, error)
RunTask(input *ecs.RunTaskInput) (*ecs.RunTaskOutput, error)
StopTask(input *ecs.StopTaskInput) (*ecs.StopTaskOutput, error)
WaitUntilTasksRunning(input *ecs.DescribeTasksInput) error
}
type ssmSessionStarter interface {
StartSession(ssmSession *ecs.Session) error
}
// ECS wraps an AWS ECS client.
type ECS struct {
client api
newSessStarter func() ssmSessionStarter
}
// RunTaskInput holds the fields needed to run tasks.
type RunTaskInput struct {
Cluster string
Count int
Subnets []string
SecurityGroups []string
TaskFamilyName string
StartedBy string
}
// ExecuteCommandInput holds the fields needed to execute commands in a running container.
type ExecuteCommandInput struct {
Cluster string
Command string
Task string
Container string
}
// New returns a Service configured against the input session.
func New(s *session.Session) *ECS {
return &ECS{
client: ecs.New(s),
newSessStarter: func() ssmSessionStarter {
return exec.NewSSMPluginCommand(s)
},
}
}
// TaskDefinition calls ECS API and returns the task definition.
func (e *ECS) TaskDefinition(taskDefName string) (*TaskDefinition, error) {
resp, err := e.client.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: aws.String(taskDefName),
})
if err != nil {
return nil, fmt.Errorf("describe task definition %s: %w", taskDefName, err)
}
td := TaskDefinition(*resp.TaskDefinition)
return &td, nil
}
// Service calls ECS API and returns the specified service running in the cluster.
func (e *ECS) Service(clusterName, serviceName string) (*Service, error) {
resp, err := e.client.DescribeServices(&ecs.DescribeServicesInput{
Cluster: aws.String(clusterName),
Services: aws.StringSlice([]string{serviceName}),
})
if err != nil {
return nil, fmt.Errorf("describe service %s: %w", serviceName, err)
}
for _, service := range resp.Services {
if aws.StringValue(service.ServiceName) == serviceName {
svc := Service(*service)
return &svc, nil
}
}
return nil, fmt.Errorf("cannot find service %s", serviceName)
}
// ServiceTasks calls ECS API and returns ECS tasks running by a service.
func (e *ECS) ServiceTasks(cluster, service string) ([]*Task, error) {
return e.listTasks(cluster, withService(service))
}
// StoppedServiceTasks calls ECS API and returns stopped ECS tasks in a service.
func (e *ECS) StoppedServiceTasks(cluster, service string) ([]*Task, error) {
return e.listTasks(cluster, withService(service), withStoppedTasks())
}
// RunningTasksInFamily calls ECS API and returns ECS tasks with the desired status to be RUNNING
// within the same task definition family.
func (e *ECS) RunningTasksInFamily(cluster, family string) ([]*Task, error) {
return e.listTasks(cluster, withFamily(family), withRunningTasks())
}
// RunningTasks calls ECS API and returns ECS tasks with the desired status to be RUNNING.
func (e *ECS) RunningTasks(cluster string) ([]*Task, error) {
return e.listTasks(cluster, withRunningTasks())
}
type listTasksOpts func(*ecs.ListTasksInput)
func withService(svcName string) listTasksOpts {
return func(in *ecs.ListTasksInput) {
in.ServiceName = aws.String(svcName)
}
}
func withFamily(family string) listTasksOpts {
return func(in *ecs.ListTasksInput) {
in.Family = aws.String(family)
}
}
func withRunningTasks() listTasksOpts {
return func(in *ecs.ListTasksInput) {
in.DesiredStatus = aws.String(ecs.DesiredStatusRunning)
}
}
func withStoppedTasks() listTasksOpts {
return func(in *ecs.ListTasksInput) {
in.DesiredStatus = aws.String(ecs.DesiredStatusStopped)
}
}
func (e *ECS) listTasks(cluster string, opts ...listTasksOpts) ([]*Task, error) {
var tasks []*Task
in := &ecs.ListTasksInput{
Cluster: aws.String(cluster),
}
for _, opt := range opts {
opt(in)
}
for {
listTaskResp, err := e.client.ListTasks(in)
if err != nil {
return nil, fmt.Errorf("list running tasks: %w", err)
}
if len(listTaskResp.TaskArns) == 0 {
return tasks, nil
}
descTaskResp, err := e.client.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: aws.String(cluster),
Tasks: listTaskResp.TaskArns,
Include: aws.StringSlice([]string{ecs.TaskFieldTags}),
})
if err != nil {
return nil, fmt.Errorf("describe running tasks in cluster %s: %w", cluster, err)
}
for _, task := range descTaskResp.Tasks {
t := Task(*task)
tasks = append(tasks, &t)
}
if listTaskResp.NextToken == nil {
break
}
in.NextToken = listTaskResp.NextToken
}
return tasks, nil
}
// StopTasksOpts sets the optional parameter for StopTasks.
type StopTasksOpts func(*ecs.StopTaskInput)
// WithStopTaskReason sets an optional message specified when a task is stopped.
func WithStopTaskReason(reason string) StopTasksOpts {
return func(in *ecs.StopTaskInput) {
in.Reason = aws.String(reason)
}
}
// WithStopTaskCluster sets the cluster that hosts the task to stop.
func WithStopTaskCluster(cluster string) StopTasksOpts {
return func(in *ecs.StopTaskInput) {
in.Cluster = aws.String(cluster)
}
}
// StopTasks stops multiple running tasks given their IDs or ARNs.
func (e *ECS) StopTasks(tasks []string, opts ...StopTasksOpts) error {
in := &ecs.StopTaskInput{}
for _, opt := range opts {
opt(in)
}
for _, task := range tasks {
in.Task = aws.String(task)
if _, err := e.client.StopTask(in); err != nil {
return fmt.Errorf("stop task %s: %w", task, err)
}
}
return nil
}
// DefaultCluster returns the default cluster ARN in the account and region.
func (e *ECS) DefaultCluster() (string, error) {
resp, err := e.client.DescribeClusters(&ecs.DescribeClustersInput{})
if err != nil {
return "", fmt.Errorf("get default cluster: %w", err)
}
if len(resp.Clusters) == 0 {
return "", ErrNoDefaultCluster
}
// NOTE: right now at most 1 default cluster is possible, so cluster[0] must be the default cluster
cluster := resp.Clusters[0]
if aws.StringValue(cluster.Status) != clusterStatusActive {
return "", ErrNoDefaultCluster
}
return aws.StringValue(cluster.ClusterArn), nil
}
// HasDefaultCluster tries to find the default cluster and returns true if there is one.
func (e *ECS) HasDefaultCluster() (bool, error) {
if _, err := e.DefaultCluster(); err != nil {
if errors.Is(err, ErrNoDefaultCluster) {
return false, nil
}
return false, err
}
return true, nil
}
// RunTask runs a number of tasks with the task definition and network configurations in a cluster, and returns after
// the task(s) is running or fails to run, along with task ARNs if possible.
func (e *ECS) RunTask(input RunTaskInput) ([]*Task, error) {
resp, err := e.client.RunTask(&ecs.RunTaskInput{
Cluster: aws.String(input.Cluster),
Count: aws.Int64(int64(input.Count)),
LaunchType: aws.String(ecs.LaunchTypeFargate),
StartedBy: aws.String(input.StartedBy),
TaskDefinition: aws.String(input.TaskFamilyName),
NetworkConfiguration: &ecs.NetworkConfiguration{
AwsvpcConfiguration: &ecs.AwsVpcConfiguration{
AssignPublicIp: aws.String(ecs.AssignPublicIpEnabled),
Subnets: aws.StringSlice(input.Subnets),
SecurityGroups: aws.StringSlice(input.SecurityGroups),
},
},
EnableExecuteCommand: aws.Bool(true),
PlatformVersion: aws.String("1.4.0"),
PropagateTags: aws.String(ecs.PropagateTagsTaskDefinition),
})
if err != nil {
return nil, fmt.Errorf("run task(s) %s: %w", input.TaskFamilyName, err)
}
taskARNs := make([]string, len(resp.Tasks))
for idx, task := range resp.Tasks {
taskARNs[idx] = aws.StringValue(task.TaskArn)
}
waitErr := e.client.WaitUntilTasksRunning(&ecs.DescribeTasksInput{
Cluster: aws.String(input.Cluster),
Tasks: aws.StringSlice(taskARNs),
Include: aws.StringSlice([]string{ecs.TaskFieldTags}),
})
if waitErr != nil && !isRequestTimeoutErr(waitErr) {
return nil, fmt.Errorf("wait for tasks to be running: %w", waitErr)
}
tasks, describeErr := e.DescribeTasks(input.Cluster, taskARNs)
if describeErr != nil {
return nil, describeErr
}
if waitErr != nil {
return nil, &ErrWaiterResourceNotReadyForTasks{tasks: tasks, awsErrResourceNotReady: waitErr}
}
return tasks, nil
}
// DescribeTasks returns the tasks with the taskARNs in the cluster.
func (e *ECS) DescribeTasks(cluster string, taskARNs []string) ([]*Task, error) {
resp, err := e.client.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: aws.String(cluster),
Tasks: aws.StringSlice(taskARNs),
Include: aws.StringSlice([]string{ecs.TaskFieldTags}),
})
if err != nil {
return nil, fmt.Errorf("describe tasks: %w", err)
}
tasks := make([]*Task, len(resp.Tasks))
for idx, task := range resp.Tasks {
t := Task(*task)
tasks[idx] = &t
}
return tasks, nil
}
// ExecuteCommand executes commands in a running container, and then terminate the session.
func (e *ECS) ExecuteCommand(in ExecuteCommandInput) (err error) {
execCmdresp, err := e.client.ExecuteCommand(&ecs.ExecuteCommandInput{
Cluster: aws.String(in.Cluster),
Command: aws.String(in.Command),
Container: aws.String(in.Container),
Interactive: aws.Bool(true),
Task: aws.String(in.Task),
})
if err != nil {
return &ErrExecuteCommand{err: err}
}
sessID := aws.StringValue(execCmdresp.Session.SessionId)
if err = e.newSessStarter().StartSession(execCmdresp.Session); err != nil {
err = fmt.Errorf("start session %s using ssm plugin: %w", sessID, err)
}
return err
}
// NetworkConfiguration returns the network configuration of a service.
func (e *ECS) NetworkConfiguration(cluster, serviceName string) (*NetworkConfiguration, error) {
service, err := e.service(cluster, serviceName)
if err != nil {
return nil, err
}
networkConfig := service.NetworkConfiguration
if networkConfig == nil || networkConfig.AwsvpcConfiguration == nil {
return nil, fmt.Errorf("cannot find the awsvpc configuration for service %s", serviceName)
}
return &NetworkConfiguration{
AssignPublicIp: aws.StringValue(networkConfig.AwsvpcConfiguration.AssignPublicIp),
SecurityGroups: aws.StringValueSlice(networkConfig.AwsvpcConfiguration.SecurityGroups),
Subnets: aws.StringValueSlice(networkConfig.AwsvpcConfiguration.Subnets),
}, nil
}
func (e *ECS) service(clusterName, serviceName string) (*Service, error) {
resp, err := e.client.DescribeServices(&ecs.DescribeServicesInput{
Cluster: aws.String(clusterName),
Services: aws.StringSlice([]string{serviceName}),
})
if err != nil {
return nil, fmt.Errorf("describe service %s: %w", serviceName, err)
}
for _, service := range resp.Services {
if aws.StringValue(service.ServiceName) == serviceName {
svc := Service(*service)
return &svc, nil
}
}
return nil, fmt.Errorf("cannot find service %s", serviceName)
}
func isRequestTimeoutErr(err error) bool {
if aerr, ok := err.(awserr.Error); ok {
return aerr.Code() == request.WaiterResourceNotReadyErrorCode
}
return false
}
| 1 | 17,754 | Does this mean it's only returning tasks associated with the primary deployment? or something else | aws-copilot-cli | go |
@@ -1101,6 +1101,11 @@ func TestRequestNoConnection(t *testing.T) {
t.Fatal("expect error, but got none")
}
+ t.Log(err)
+ awsError := err.(awserr.Error)
+ origError := awsError.OrigErr()
+ t.Logf("Orig Error: %#v of type %T", origError, origError)
+
if e, a := 10, r.RetryCount; e != a {
t.Errorf("expect %v retry count, got %v", e, a)
} | 1 | package request_test
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
type tempNetworkError struct {
op string
msg string
isTemp bool
}
func (e *tempNetworkError) Temporary() bool { return e.isTemp }
func (e *tempNetworkError) Error() string {
return fmt.Sprintf("%s: %s", e.op, e.msg)
}
var (
// net.OpError accept, are always temporary
errAcceptConnectionResetStub = &tempNetworkError{
isTemp: true, op: "accept", msg: "connection reset",
}
// net.OpError read for ECONNRESET is not temporary.
errReadConnectionResetStub = &tempNetworkError{
isTemp: false, op: "read", msg: "connection reset",
}
// net.OpError write for ECONNRESET may not be temporary, but is treaded as
// temporary by the SDK.
errWriteConnectionResetStub = &tempNetworkError{
isTemp: false, op: "write", msg: "connection reset",
}
// net.OpError write for broken pipe may not be temporary, but is treaded as
// temporary by the SDK.
errWriteBrokenPipeStub = &tempNetworkError{
isTemp: false, op: "write", msg: "broken pipe",
}
// Generic connection reset error
errConnectionResetStub = errors.New("connection reset")
)
type testData struct {
Data string
}
func body(str string) io.ReadCloser {
return ioutil.NopCloser(bytes.NewReader([]byte(str)))
}
func unmarshal(req *request.Request) {
defer req.HTTPResponse.Body.Close()
if req.Data != nil {
json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
}
return
}
func unmarshalError(req *request.Request) {
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
if err != nil {
req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
return
}
if len(bodyBytes) == 0 {
req.Error = awserr.NewRequestFailure(
awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
req.HTTPResponse.StatusCode,
"",
)
return
}
var jsonErr jsonErrorResponse
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
return
}
req.Error = awserr.NewRequestFailure(
awserr.New(jsonErr.Code, jsonErr.Message, nil),
req.HTTPResponse.StatusCode,
"",
)
}
type jsonErrorResponse struct {
Code string `json:"__type"`
Message string `json:"message"`
}
// test that retries occur for 5xx status codes
func TestRequestRecoverRetry5xx(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 502, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err != nil {
t.Fatalf("expect no error, but got %v", err)
}
if e, a := 2, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if e, a := "valid", out.Data; e != a {
t.Errorf("expect %q output got %q", e, a)
}
}
// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
{StatusCode: 400, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
{StatusCode: 429, Body: body(`{"__type":"FooException","message":"Rate exceeded."}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err != nil {
t.Fatalf("expect no error, but got %v", err)
}
if e, a := 3, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if e, a := "valid", out.Data; e != a {
t.Errorf("expect %q output got %q", e, a)
}
}
// test that retries don't occur for 4xx status codes with a response type that can't be retried
func TestRequest4xxUnretryable(t *testing.T) {
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err == nil {
t.Fatalf("expect error, but did not get one")
}
aerr := err.(awserr.RequestFailure)
if e, a := 401, aerr.StatusCode(); e != a {
t.Errorf("expect %d status code, got %d", e, a)
}
if e, a := "SignatureDoesNotMatch", aerr.Code(); e != a {
t.Errorf("expect %q error code, got %q", e, a)
}
if e, a := "Signature does not match.", aerr.Message(); e != a {
t.Errorf("expect %q error message, got %q", e, a)
}
if e, a := 0, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
}
func TestRequestExhaustRetries(t *testing.T) {
delays := []time.Duration{}
sleepDelay := func(delay time.Duration) {
delays = append(delays, delay)
}
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := r.Send()
if err == nil {
t.Fatalf("expect error, but did not get one")
}
aerr := err.(awserr.RequestFailure)
if e, a := 500, aerr.StatusCode(); e != a {
t.Errorf("expect %d status code, got %d", e, a)
}
if e, a := "UnknownError", aerr.Code(); e != a {
t.Errorf("expect %q error code, got %q", e, a)
}
if e, a := "An error occurred.", aerr.Message(); e != a {
t.Errorf("expect %q error message, got %q", e, a)
}
if e, a := 3, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}}
for i, v := range delays {
min := expectDelays[i].min * time.Millisecond
max := expectDelays[i].max * time.Millisecond
if !(min <= v && v <= max) {
t.Errorf("Expect delay to be within range, i:%d, v:%s, min:%s, max:%s",
i, v, min, max)
}
}
}
// test that the request is retried after the credentials are expired.
func TestRequestRecoverExpiredCreds(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
credExpiredBeforeRetry := false
credExpiredAfterRetry := false
s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
credExpiredAfterRetry = r.Config.Credentials.IsExpired()
})
s.Handlers.Sign.Clear()
s.Handlers.Sign.PushBack(func(r *request.Request) {
r.Config.Credentials.Get()
})
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if credExpiredBeforeRetry {
t.Errorf("Expect valid creds before retry check")
}
if !credExpiredAfterRetry {
t.Errorf("Expect expired creds after retry check")
}
if s.Config.Credentials.IsExpired() {
t.Errorf("Expect valid creds after cred expired recovery")
}
if e, a := 1, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if e, a := "valid", out.Data; e != a {
t.Errorf("expect %q output got %q", e, a)
}
}
func TestMakeAddtoUserAgentHandler(t *testing.T) {
fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2")
r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
fn(r)
if e, a := "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"); !strings.HasPrefix(a, e) {
t.Errorf("expect %q user agent, got %q", e, a)
}
}
func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) {
fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)")
r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
fn(r)
if e, a := "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"); !strings.HasPrefix(a, e) {
t.Errorf("expect %q user agent, got %q", e, a)
}
}
func TestRequestUserAgent(t *testing.T) {
s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")})
req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{})
req.HTTPRequest.Header.Set("User-Agent", "foo/bar")
if err := req.Build(); err != nil {
t.Fatalf("expect no error, got %v", err)
}
expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)",
aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
if e, a := expectUA, req.HTTPRequest.Header.Get("User-Agent"); !strings.HasPrefix(a, e) {
t.Errorf("expect %q user agent, got %q", e, a)
}
}
func TestRequestThrottleRetries(t *testing.T) {
delays := []time.Duration{}
sleepDelay := func(delay time.Duration) {
delays = append(delays, delay)
}
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := r.Send()
if err == nil {
t.Fatalf("expect error, but did not get one")
}
aerr := err.(awserr.RequestFailure)
if e, a := 500, aerr.StatusCode(); e != a {
t.Errorf("expect %d status code, got %d", e, a)
}
if e, a := "Throttling", aerr.Code(); e != a {
t.Errorf("expect %q error code, got %q", e, a)
}
if e, a := "An error occurred.", aerr.Message(); e != a {
t.Errorf("expect %q error message, got %q", e, a)
}
if e, a := 3, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
expectDelays := []struct{ min, max time.Duration }{{500, 999}, {1000, 1998}, {2000, 3996}}
for i, v := range delays {
min := expectDelays[i].min * time.Millisecond
max := expectDelays[i].max * time.Millisecond
if !(min <= v && v <= max) {
t.Errorf("Expect delay to be within range, i:%d, v:%s, min:%s, max:%s",
i, v, min, max)
}
}
}
// test that retries occur for request timeouts when response.Body can be nil
func TestRequestRecoverTimeoutWithNilBody(t *testing.T) {
reqNum := 0
reqs := []*http.Response{
{StatusCode: 0, Body: nil}, // body can be nil when requests time out
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
errors := []error{
errTimeout, nil,
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.AfterRetry.Clear() // force retry on all errors
s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
if r.Error != nil {
r.Error = nil
r.Retryable = aws.Bool(true)
r.RetryCount++
}
})
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = reqs[reqNum]
r.Error = errors[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err != nil {
t.Fatalf("expect no error, but got %v", err)
}
if e, a := 1, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if e, a := "valid", out.Data; e != a {
t.Errorf("expect %q output got %q", e, a)
}
}
func TestRequestRecoverTimeoutWithNilResponse(t *testing.T) {
reqNum := 0
reqs := []*http.Response{
nil,
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
errors := []error{
errTimeout,
nil,
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.AfterRetry.Clear() // force retry on all errors
s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
if r.Error != nil {
r.Error = nil
r.Retryable = aws.Bool(true)
r.RetryCount++
}
})
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = reqs[reqNum]
r.Error = errors[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err != nil {
t.Fatalf("expect no error, but got %v", err)
}
if e, a := 1, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if e, a := "valid", out.Data; e != a {
t.Errorf("expect %q output got %q", e, a)
}
}
func TestRequest_NoBody(t *testing.T) {
cases := []string{
"GET", "HEAD", "DELETE",
"PUT", "POST", "PATCH",
}
for i, c := range cases {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if v := r.TransferEncoding; len(v) > 0 {
t.Errorf("%d, expect no body sent with Transfer-Encoding, %v", i, v)
}
outMsg := []byte(`{"Value": "abc"}`)
if b, err := ioutil.ReadAll(r.Body); err != nil {
t.Fatalf("%d, expect no error reading request body, got %v", i, err)
} else if n := len(b); n > 0 {
t.Errorf("%d, expect no request body, got %d bytes", i, n)
}
w.Header().Set("Content-Length", strconv.Itoa(len(outMsg)))
if _, err := w.Write(outMsg); err != nil {
t.Fatalf("%d, expect no error writing server response, got %v", i, err)
}
}))
s := awstesting.NewClient(&aws.Config{
Region: aws.String("mock-region"),
MaxRetries: aws.Int(0),
Endpoint: aws.String(server.URL),
DisableSSL: aws.Bool(true),
})
s.Handlers.Build.PushBack(rest.Build)
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
in := struct {
Bucket *string `location:"uri" locationName:"bucket"`
Key *string `location:"uri" locationName:"key"`
}{
Bucket: aws.String("mybucket"), Key: aws.String("myKey"),
}
out := struct {
Value *string
}{}
r := s.NewRequest(&request.Operation{
Name: "OpName", HTTPMethod: c, HTTPPath: "/{bucket}/{key+}",
}, &in, &out)
if err := r.Send(); err != nil {
t.Fatalf("%d, expect no error sending request, got %v", i, err)
}
}
}
func TestIsSerializationErrorRetryable(t *testing.T) {
testCases := []struct {
err error
expected bool
}{
{
err: awserr.New(request.ErrCodeSerialization, "foo error", nil),
expected: false,
},
{
err: awserr.New("ErrFoo", "foo error", nil),
expected: false,
},
{
err: nil,
expected: false,
},
{
err: awserr.New(request.ErrCodeSerialization, "foo error", errAcceptConnectionResetStub),
expected: true,
},
}
for i, c := range testCases {
r := &request.Request{
Error: c.err,
}
if r.IsErrorRetryable() != c.expected {
t.Errorf("Case %d: Expected %v, but received %v", i+1, c.expected, !c.expected)
}
}
}
func TestWithLogLevel(t *testing.T) {
r := &request.Request{}
opt := request.WithLogLevel(aws.LogDebugWithHTTPBody)
r.ApplyOptions(opt)
if !r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
t.Errorf("expect log level to be set, but was not, %v",
r.Config.LogLevel.Value())
}
}
func TestWithGetResponseHeader(t *testing.T) {
r := &request.Request{}
var val, val2 string
r.ApplyOptions(
request.WithGetResponseHeader("x-a-header", &val),
request.WithGetResponseHeader("x-second-header", &val2),
)
r.HTTPResponse = &http.Response{
Header: func() http.Header {
h := http.Header{}
h.Set("x-a-header", "first")
h.Set("x-second-header", "second")
return h
}(),
}
r.Handlers.Complete.Run(r)
if e, a := "first", val; e != a {
t.Errorf("expect %q header value got %q", e, a)
}
if e, a := "second", val2; e != a {
t.Errorf("expect %q header value got %q", e, a)
}
}
func TestWithGetResponseHeaders(t *testing.T) {
r := &request.Request{}
var headers http.Header
opt := request.WithGetResponseHeaders(&headers)
r.ApplyOptions(opt)
r.HTTPResponse = &http.Response{
Header: func() http.Header {
h := http.Header{}
h.Set("x-a-header", "headerValue")
return h
}(),
}
r.Handlers.Complete.Run(r)
if e, a := "headerValue", headers.Get("x-a-header"); e != a {
t.Errorf("expect %q header value got %q", e, a)
}
}
type testRetryer struct {
shouldRetry bool
}
func (d *testRetryer) MaxRetries() int {
return 3
}
// RetryRules returns the delay duration before retrying this request again
func (d *testRetryer) RetryRules(r *request.Request) time.Duration {
return time.Duration(time.Millisecond)
}
func (d *testRetryer) ShouldRetry(r *request.Request) bool {
d.shouldRetry = true
if r.Retryable != nil {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 {
return true
}
return r.IsErrorRetryable()
}
func TestEnforceShouldRetryCheck(t *testing.T) {
tp := &http.Transport{
Proxy: http.ProxyFromEnvironment,
ResponseHeaderTimeout: 1 * time.Millisecond,
}
client := &http.Client{Transport: tp}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// This server should wait forever. Requests will timeout and the SDK should
// attempt to retry.
select {}
}))
retryer := &testRetryer{}
s := awstesting.NewClient(&aws.Config{
Region: aws.String("mock-region"),
MaxRetries: aws.Int(0),
Endpoint: aws.String(server.URL),
DisableSSL: aws.Bool(true),
Retryer: retryer,
HTTPClient: client,
EnforceShouldRetryCheck: aws.Bool(true),
})
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err == nil {
t.Fatalf("expect error, but got nil")
}
if e, a := 3, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
if !retryer.shouldRetry {
t.Errorf("expect 'true' for ShouldRetry, but got %v", retryer.shouldRetry)
}
}
type errReader struct {
err error
}
func (reader *errReader) Read(b []byte) (int, error) {
return 0, reader.err
}
func (reader *errReader) Close() error {
return nil
}
func TestIsNoBodyReader(t *testing.T) {
cases := []struct {
reader io.ReadCloser
expect bool
}{
{ioutil.NopCloser(bytes.NewReader([]byte("abc"))), false},
{ioutil.NopCloser(bytes.NewReader(nil)), false},
{nil, false},
{request.NoBody, true},
}
for i, c := range cases {
if e, a := c.expect, request.NoBody == c.reader; e != a {
t.Errorf("%d, expect %t match, but was %t", i, e, a)
}
}
}
func TestRequest_TemporaryRetry(t *testing.T) {
done := make(chan struct{})
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", "1024")
w.WriteHeader(http.StatusOK)
w.Write(make([]byte, 100))
f := w.(http.Flusher)
f.Flush()
<-done
}))
client := &http.Client{
Timeout: 100 * time.Millisecond,
}
svc := awstesting.NewClient(&aws.Config{
Region: unit.Session.Config.Region,
MaxRetries: aws.Int(1),
HTTPClient: client,
DisableSSL: aws.Bool(true),
Endpoint: aws.String(server.URL),
})
req := svc.NewRequest(&request.Operation{
Name: "name", HTTPMethod: "GET", HTTPPath: "/path",
}, &struct{}{}, &struct{}{})
req.Handlers.Unmarshal.PushBack(func(r *request.Request) {
defer req.HTTPResponse.Body.Close()
_, err := io.Copy(ioutil.Discard, req.HTTPResponse.Body)
r.Error = awserr.New(request.ErrCodeSerialization, "error", err)
})
err := req.Send()
if err == nil {
t.Errorf("expect error, got none")
}
close(done)
aerr := err.(awserr.Error)
if e, a := request.ErrCodeSerialization, aerr.Code(); e != a {
t.Errorf("expect %q error code, got %q", e, a)
}
if e, a := 1, req.RetryCount; e != a {
t.Errorf("expect %d retries, got %d", e, a)
}
type temporary interface {
Temporary() bool
}
terr := aerr.OrigErr().(temporary)
if !terr.Temporary() {
t.Errorf("expect temporary error, was not")
}
}
func TestRequest_Presign(t *testing.T) {
presign := func(r *request.Request, expire time.Duration) (string, http.Header, error) {
u, err := r.Presign(expire)
return u, nil, err
}
presignRequest := func(r *request.Request, expire time.Duration) (string, http.Header, error) {
return r.PresignRequest(expire)
}
mustParseURL := func(v string) *url.URL {
u, err := url.Parse(v)
if err != nil {
panic(err)
}
return u
}
cases := []struct {
Expire time.Duration
PresignFn func(*request.Request, time.Duration) (string, http.Header, error)
SignerFn func(*request.Request)
URL string
Header http.Header
Err string
}{
{
PresignFn: presign,
Err: request.ErrCodeInvalidPresignExpire,
},
{
PresignFn: presignRequest,
Err: request.ErrCodeInvalidPresignExpire,
},
{
Expire: -1,
PresignFn: presign,
Err: request.ErrCodeInvalidPresignExpire,
},
{
// Presign clear NotHoist
Expire: 1 * time.Minute,
PresignFn: func(r *request.Request, dur time.Duration) (string, http.Header, error) {
r.NotHoist = true
return presign(r, dur)
},
SignerFn: func(r *request.Request) {
r.HTTPRequest.URL = mustParseURL("https://endpoint/presignedURL")
if r.NotHoist {
r.Error = fmt.Errorf("expect NotHoist to be cleared")
}
},
URL: "https://endpoint/presignedURL",
},
{
// PresignRequest does not clear NotHoist
Expire: 1 * time.Minute,
PresignFn: func(r *request.Request, dur time.Duration) (string, http.Header, error) {
r.NotHoist = true
return presignRequest(r, dur)
},
SignerFn: func(r *request.Request) {
r.HTTPRequest.URL = mustParseURL("https://endpoint/presignedURL")
if !r.NotHoist {
r.Error = fmt.Errorf("expect NotHoist not to be cleared")
}
},
URL: "https://endpoint/presignedURL",
},
{
// PresignRequest returns signed headers
Expire: 1 * time.Minute,
PresignFn: presignRequest,
SignerFn: func(r *request.Request) {
r.HTTPRequest.URL = mustParseURL("https://endpoint/presignedURL")
r.HTTPRequest.Header.Set("UnsigndHeader", "abc")
r.SignedHeaderVals = http.Header{
"X-Amzn-Header": []string{"abc", "123"},
"X-Amzn-Header2": []string{"efg", "456"},
}
},
URL: "https://endpoint/presignedURL",
Header: http.Header{
"X-Amzn-Header": []string{"abc", "123"},
"X-Amzn-Header2": []string{"efg", "456"},
},
},
}
svc := awstesting.NewClient()
svc.Handlers.Clear()
for i, c := range cases {
req := svc.NewRequest(&request.Operation{
Name: "name", HTTPMethod: "GET", HTTPPath: "/path",
}, &struct{}{}, &struct{}{})
req.Handlers.Sign.PushBack(c.SignerFn)
u, h, err := c.PresignFn(req, c.Expire)
if len(c.Err) != 0 {
if e, a := c.Err, err.Error(); !strings.Contains(a, e) {
t.Errorf("%d, expect %v to be in %v", i, e, a)
}
continue
} else if err != nil {
t.Errorf("%d, expect no error, got %v", i, err)
continue
}
if e, a := c.URL, u; e != a {
t.Errorf("%d, expect %v URL, got %v", i, e, a)
}
if e, a := c.Header, h; !reflect.DeepEqual(e, a) {
t.Errorf("%d, expect %v header got %v", i, e, a)
}
}
}
func TestNew_EndpointWithDefaultPort(t *testing.T) {
endpoint := "https://estest.us-east-1.es.amazonaws.com:443"
expectedRequestHost := "estest.us-east-1.es.amazonaws.com"
r := request.New(
aws.Config{},
metadata.ClientInfo{Endpoint: endpoint},
defaults.Handlers(),
client.DefaultRetryer{},
&request.Operation{},
nil,
nil,
)
if h := r.HTTPRequest.Host; h != expectedRequestHost {
t.Errorf("expect %v host, got %q", expectedRequestHost, h)
}
}
func TestSanitizeHostForHeader(t *testing.T) {
cases := []struct {
url string
expectedRequestHost string
}{
{"https://estest.us-east-1.es.amazonaws.com:443", "estest.us-east-1.es.amazonaws.com"},
{"https://estest.us-east-1.es.amazonaws.com", "estest.us-east-1.es.amazonaws.com"},
{"https://localhost:9200", "localhost:9200"},
{"http://localhost:80", "localhost"},
{"http://localhost:8080", "localhost:8080"},
}
for _, c := range cases {
r, _ := http.NewRequest("GET", c.url, nil)
request.SanitizeHostForHeader(r)
if h := r.Host; h != c.expectedRequestHost {
t.Errorf("expect %v host, got %q", c.expectedRequestHost, h)
}
}
}
func TestRequestWillRetry_ByBody(t *testing.T) {
svc := awstesting.NewClient()
cases := []struct {
WillRetry bool
HTTPMethod string
Body io.ReadSeeker
IsReqNoBody bool
}{
{
WillRetry: true,
HTTPMethod: "GET",
Body: bytes.NewReader([]byte{}),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "GET",
Body: bytes.NewReader(nil),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: bytes.NewReader([]byte("abc123")),
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewReader([]byte("abc123"))),
},
{
WillRetry: true,
HTTPMethod: "GET",
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
IsReqNoBody: true,
},
{
WillRetry: false,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("abc123"))),
},
}
for i, c := range cases {
req := svc.NewRequest(&request.Operation{
Name: "Operation",
HTTPMethod: c.HTTPMethod,
HTTPPath: "/",
}, nil, nil)
req.SetReaderBody(c.Body)
req.Build()
req.Error = fmt.Errorf("some error")
req.Retryable = aws.Bool(true)
req.HTTPResponse = &http.Response{
StatusCode: 500,
}
if e, a := c.IsReqNoBody, request.NoBody == req.HTTPRequest.Body; e != a {
t.Errorf("%d, expect request to be no body, %t, got %t, %T", i, e, a, req.HTTPRequest.Body)
}
if e, a := c.WillRetry, req.WillRetry(); e != a {
t.Errorf("%d, expect %t willRetry, got %t", i, e, a)
}
if req.Error == nil {
t.Fatalf("%d, expect error, got none", i)
}
if e, a := "some error", req.Error.Error(); !strings.Contains(a, e) {
t.Errorf("%d, expect %q error in %q", i, e, a)
}
if e, a := 0, req.RetryCount; e != a {
t.Errorf("%d, expect retry count to be %d, got %d", i, e, a)
}
}
}
func Test501NotRetrying(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 501, Body: body(`{"__type":"NotImplemented","message":"An error occurred."}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
if err == nil {
t.Fatal("expect error, but got none")
}
aerr := err.(awserr.Error)
if e, a := "NotImplemented", aerr.Code(); e != a {
t.Errorf("expected error code %q, but received %q", e, a)
}
if e, a := 1, int(r.RetryCount); e != a {
t.Errorf("expect %d retry count, got %d", e, a)
}
}
func TestRequestNoConnection(t *testing.T) {
port, err := getFreePort()
if err != nil {
t.Fatalf("failed to get free port for test")
}
s := awstesting.NewClient(aws.NewConfig().
WithMaxRetries(10).
WithEndpoint("https://localhost:" + strconv.Itoa(port)).
WithSleepDelay(func(time.Duration) {}),
)
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
if err = r.Send(); err == nil {
t.Fatal("expect error, but got none")
}
if e, a := 10, r.RetryCount; e != a {
t.Errorf("expect %v retry count, got %v", e, a)
}
}
func getFreePort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
return 0, err
}
defer l.Close()
strAddr := l.Addr().String()
parts := strings.Split(strAddr, ":")
strPort := parts[len(parts)-1]
port, err := strconv.ParseInt(strPort, 10, 32)
if err != nil {
return 0, err
}
return int(port), nil
}
| 1 | 9,760 | nit, would be good to include prefix for this log statement. | aws-aws-sdk-go | go |
@@ -57,4 +57,12 @@ describe('aria-required-attr', function () {
axe.commons.aria.requiredAttr = orig;
});
+ describe('options', function () {
+ it('should require provided attribute names', function () {
+ fixture.innerHTML = '<div role="slider" id="target"></div>';
+ var target = fixture.children[0];
+ assert.isFalse(checks['aria-required-attr'].evaluate.call(checkContext, target, ['aria-valuemax', 'aria-bats']));
+ assert.deepEqual(checkContext._data, ['aria-valuenow', 'aria-valuemax', 'aria-valuemin', 'aria-bats']);
+ });
+ });
}); | 1 | describe('aria-required-attr', function () {
'use strict';
var fixture = document.getElementById('fixture');
var checkContext = {
_data: null,
data: function (d) {
this._data = d;
}
};
afterEach(function () {
fixture.innerHTML = '';
checkContext._data = null;
});
it('should detect missing attributes', function () {
var node = document.createElement('div');
node.setAttribute('role', 'slider');
node.id = 'test';
node.tabIndex = 1;
fixture.appendChild(node);
assert.isFalse(checks['aria-required-attr'].evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, ['aria-valuenow', 'aria-valuemax', 'aria-valuemin']);
});
it('should return true if there is no role', function () {
var node = document.createElement('div');
fixture.appendChild(node);
assert.isTrue(checks['aria-required-attr'].evaluate.call(checkContext, node));
assert.isNull(checkContext._data);
});
it('should determine attribute validity by calling axe.commons.aria.requiredAttr', function () {
var node = document.createElement('div');
node.id = 'test';
node.tabIndex = 1;
node.setAttribute('role', 'cats');
node.setAttribute('aria-cats', 'maybe');
fixture.appendChild(node);
var orig = axe.commons.aria.requiredAttr;
var called = 0;
axe.commons.aria.requiredAttr = function (role) {
assert.equal(role, 'cats');
called++;
return ['aria-cats', 'aria-bats'];
};
assert.isFalse(checks['aria-required-attr'].evaluate.call(checkContext, node));
assert.deepEqual(checkContext._data, ['aria-bats']);
assert.equal(called, 1);
axe.commons.aria.requiredAttr = orig;
});
}); | 1 | 11,982 | Same here, you should use a made up role. May I suggest `role="McCheddarton"`? :) | dequelabs-axe-core | js |
@@ -372,7 +372,9 @@ class SparkWrite {
}
Expression conflictDetectionFilter = conflictDetectionFilter();
- overwriteFiles.validateNoConflictingAppends(conflictDetectionFilter);
+ overwriteFiles.conflictDetectionFilter(conflictDetectionFilter);
+ overwriteFiles.validateNoConflictingData();
+ overwriteFiles.validateNoConflictingDeletes();
String commitMsg = String.format(
"overwrite of %d data files with %d new data files, scanSnapshotId: %d, conflictDetectionFilter: %s", | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.ContentFile;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.IsolationLevel;
import org.apache.iceberg.OverwriteFiles;
import org.apache.iceberg.PartitionKey;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.ReplacePartitions;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SerializableTable;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.SnapshotSummary;
import org.apache.iceberg.SnapshotUpdate;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.encryption.EncryptedOutputFile;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.io.ClusteredDataWriter;
import org.apache.iceberg.io.DataWriteResult;
import org.apache.iceberg.io.FanoutDataWriter;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.FileWriter;
import org.apache.iceberg.io.OutputFileFactory;
import org.apache.iceberg.io.PartitioningWriter;
import org.apache.iceberg.io.RollingDataWriter;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.spark.FileRewriteCoordinator;
import org.apache.iceberg.spark.SparkWriteOptions;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.connector.write.BatchWrite;
import org.apache.spark.sql.connector.write.DataWriter;
import org.apache.spark.sql.connector.write.DataWriterFactory;
import org.apache.spark.sql.connector.write.LogicalWriteInfo;
import org.apache.spark.sql.connector.write.PhysicalWriteInfo;
import org.apache.spark.sql.connector.write.WriterCommitMessage;
import org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory;
import org.apache.spark.sql.connector.write.streaming.StreamingWrite;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.IsolationLevel.SERIALIZABLE;
import static org.apache.iceberg.IsolationLevel.SNAPSHOT;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT_DEFAULT;
import static org.apache.iceberg.TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED;
import static org.apache.iceberg.TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT;
import static org.apache.iceberg.TableProperties.WRITE_TARGET_FILE_SIZE_BYTES;
import static org.apache.iceberg.TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT;
class SparkWrite {
private static final Logger LOG = LoggerFactory.getLogger(SparkWrite.class);
private final JavaSparkContext sparkContext;
private final Table table;
private final String queryId;
private final FileFormat format;
private final String applicationId;
private final String wapId;
private final long targetFileSize;
private final Schema writeSchema;
private final StructType dsSchema;
private final Map<String, String> extraSnapshotMetadata;
private final boolean partitionedFanoutEnabled;
SparkWrite(SparkSession spark, Table table, LogicalWriteInfo writeInfo,
String applicationId, String wapId,
Schema writeSchema, StructType dsSchema) {
this.sparkContext = JavaSparkContext.fromSparkContext(spark.sparkContext());
this.table = table;
this.queryId = writeInfo.queryId();
this.format = getFileFormat(table.properties(), writeInfo.options());
this.applicationId = applicationId;
this.wapId = wapId;
this.writeSchema = writeSchema;
this.dsSchema = dsSchema;
this.extraSnapshotMetadata = Maps.newHashMap();
writeInfo.options().forEach((key, value) -> {
if (key.startsWith(SnapshotSummary.EXTRA_METADATA_PREFIX)) {
extraSnapshotMetadata.put(key.substring(SnapshotSummary.EXTRA_METADATA_PREFIX.length()), value);
}
});
long tableTargetFileSize = PropertyUtil.propertyAsLong(
table.properties(), WRITE_TARGET_FILE_SIZE_BYTES, WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT);
this.targetFileSize = writeInfo.options().getLong(SparkWriteOptions.TARGET_FILE_SIZE_BYTES, tableTargetFileSize);
boolean tablePartitionedFanoutEnabled = PropertyUtil.propertyAsBoolean(
table.properties(), SPARK_WRITE_PARTITIONED_FANOUT_ENABLED, SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT);
this.partitionedFanoutEnabled = writeInfo.options()
.getBoolean(SparkWriteOptions.FANOUT_ENABLED, tablePartitionedFanoutEnabled);
}
BatchWrite asBatchAppend() {
return new BatchAppend();
}
BatchWrite asDynamicOverwrite() {
return new DynamicOverwrite();
}
BatchWrite asOverwriteByFilter(Expression overwriteExpr) {
return new OverwriteByFilter(overwriteExpr);
}
BatchWrite asCopyOnWriteMergeWrite(SparkMergeScan scan, IsolationLevel isolationLevel) {
return new CopyOnWriteMergeWrite(scan, isolationLevel);
}
BatchWrite asRewrite(String fileSetID) {
return new RewriteFiles(fileSetID);
}
StreamingWrite asStreamingAppend() {
return new StreamingAppend();
}
StreamingWrite asStreamingOverwrite() {
return new StreamingOverwrite();
}
private FileFormat getFileFormat(Map<String, String> tableProperties, Map<String, String> options) {
Optional<String> formatOption = Optional.ofNullable(options.get(SparkWriteOptions.WRITE_FORMAT));
String formatString = formatOption
.orElseGet(() -> tableProperties.getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT));
return FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH));
}
private boolean isWapTable() {
return Boolean.parseBoolean(table.properties().getOrDefault(
TableProperties.WRITE_AUDIT_PUBLISH_ENABLED, TableProperties.WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT));
}
// the writer factory works for both batch and streaming
private WriterFactory createWriterFactory() {
// broadcast the table metadata as the writer factory will be sent to executors
Broadcast<Table> tableBroadcast = sparkContext.broadcast(SerializableTable.copyOf(table));
return new WriterFactory(tableBroadcast, format, targetFileSize, writeSchema, dsSchema, partitionedFanoutEnabled);
}
private void commitOperation(SnapshotUpdate<?> operation, String description) {
LOG.info("Committing {} to table {}", description, table);
if (applicationId != null) {
operation.set("spark.app.id", applicationId);
}
if (!extraSnapshotMetadata.isEmpty()) {
extraSnapshotMetadata.forEach(operation::set);
}
if (isWapTable() && wapId != null) {
// write-audit-publish is enabled for this table and job
// stage the changes without changing the current snapshot
operation.set(SnapshotSummary.STAGED_WAP_ID_PROP, wapId);
operation.stageOnly();
}
long start = System.currentTimeMillis();
operation.commit(); // abort is automatically called if this fails
long duration = System.currentTimeMillis() - start;
LOG.info("Committed in {} ms", duration);
}
private void abort(WriterCommitMessage[] messages) {
Map<String, String> props = table.properties();
Tasks.foreach(files(messages))
.retry(PropertyUtil.propertyAsInt(props, COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
PropertyUtil.propertyAsInt(props, COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
PropertyUtil.propertyAsInt(props, COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
PropertyUtil.propertyAsInt(props, COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.throwFailureWhenFinished()
.run(file -> {
table.io().deleteFile(file.path().toString());
});
}
private Iterable<DataFile> files(WriterCommitMessage[] messages) {
if (messages.length > 0) {
return Iterables.concat(Iterables.transform(Arrays.asList(messages), message -> message != null ?
ImmutableList.copyOf(((TaskCommit) message).files()) :
ImmutableList.of()));
}
return ImmutableList.of();
}
private abstract class BaseBatchWrite implements BatchWrite {
@Override
public DataWriterFactory createBatchWriterFactory(PhysicalWriteInfo info) {
return createWriterFactory();
}
@Override
public void abort(WriterCommitMessage[] messages) {
SparkWrite.this.abort(messages);
}
@Override
public String toString() {
return String.format("IcebergBatchWrite(table=%s, format=%s)", table, format);
}
}
private class BatchAppend extends BaseBatchWrite {
@Override
public void commit(WriterCommitMessage[] messages) {
AppendFiles append = table.newAppend();
int numFiles = 0;
for (DataFile file : files(messages)) {
numFiles += 1;
append.appendFile(file);
}
commitOperation(append, String.format("append with %d new data files", numFiles));
}
}
private class DynamicOverwrite extends BaseBatchWrite {
@Override
public void commit(WriterCommitMessage[] messages) {
Iterable<DataFile> files = files(messages);
if (!files.iterator().hasNext()) {
LOG.info("Dynamic overwrite is empty, skipping commit");
return;
}
ReplacePartitions dynamicOverwrite = table.newReplacePartitions();
int numFiles = 0;
for (DataFile file : files) {
numFiles += 1;
dynamicOverwrite.addFile(file);
}
commitOperation(dynamicOverwrite, String.format("dynamic partition overwrite with %d new data files", numFiles));
}
}
private class OverwriteByFilter extends BaseBatchWrite {
private final Expression overwriteExpr;
private OverwriteByFilter(Expression overwriteExpr) {
this.overwriteExpr = overwriteExpr;
}
@Override
public void commit(WriterCommitMessage[] messages) {
OverwriteFiles overwriteFiles = table.newOverwrite();
overwriteFiles.overwriteByRowFilter(overwriteExpr);
int numFiles = 0;
for (DataFile file : files(messages)) {
numFiles += 1;
overwriteFiles.addFile(file);
}
String commitMsg = String.format("overwrite by filter %s with %d new data files", overwriteExpr, numFiles);
commitOperation(overwriteFiles, commitMsg);
}
}
private class CopyOnWriteMergeWrite extends BaseBatchWrite {
private final SparkMergeScan scan;
private final IsolationLevel isolationLevel;
private CopyOnWriteMergeWrite(SparkMergeScan scan, IsolationLevel isolationLevel) {
this.scan = scan;
this.isolationLevel = isolationLevel;
}
private List<DataFile> overwrittenFiles() {
return scan.files().stream().map(FileScanTask::file).collect(Collectors.toList());
}
private Expression conflictDetectionFilter() {
// the list of filter expressions may be empty but is never null
List<Expression> scanFilterExpressions = scan.filterExpressions();
Expression filter = Expressions.alwaysTrue();
for (Expression expr : scanFilterExpressions) {
filter = Expressions.and(filter, expr);
}
return filter;
}
@Override
public void commit(WriterCommitMessage[] messages) {
OverwriteFiles overwriteFiles = table.newOverwrite();
List<DataFile> overwrittenFiles = overwrittenFiles();
int numOverwrittenFiles = overwrittenFiles.size();
for (DataFile overwrittenFile : overwrittenFiles) {
overwriteFiles.deleteFile(overwrittenFile);
}
int numAddedFiles = 0;
for (DataFile file : files(messages)) {
numAddedFiles += 1;
overwriteFiles.addFile(file);
}
if (isolationLevel == SERIALIZABLE) {
commitWithSerializableIsolation(overwriteFiles, numOverwrittenFiles, numAddedFiles);
} else if (isolationLevel == SNAPSHOT) {
commitWithSnapshotIsolation(overwriteFiles, numOverwrittenFiles, numAddedFiles);
} else {
throw new IllegalArgumentException("Unsupported isolation level: " + isolationLevel);
}
}
private void commitWithSerializableIsolation(OverwriteFiles overwriteFiles,
int numOverwrittenFiles,
int numAddedFiles) {
Long scanSnapshotId = scan.snapshotId();
if (scanSnapshotId != null) {
overwriteFiles.validateFromSnapshot(scanSnapshotId);
}
Expression conflictDetectionFilter = conflictDetectionFilter();
overwriteFiles.validateNoConflictingAppends(conflictDetectionFilter);
String commitMsg = String.format(
"overwrite of %d data files with %d new data files, scanSnapshotId: %d, conflictDetectionFilter: %s",
numOverwrittenFiles, numAddedFiles, scanSnapshotId, conflictDetectionFilter);
commitOperation(overwriteFiles, commitMsg);
}
private void commitWithSnapshotIsolation(OverwriteFiles overwriteFiles,
int numOverwrittenFiles,
int numAddedFiles) {
String commitMsg = String.format(
"overwrite of %d data files with %d new data files",
numOverwrittenFiles, numAddedFiles);
commitOperation(overwriteFiles, commitMsg);
}
}
private class RewriteFiles extends BaseBatchWrite {
private final String fileSetID;
private RewriteFiles(String fileSetID) {
this.fileSetID = fileSetID;
}
@Override
public void commit(WriterCommitMessage[] messages) {
FileRewriteCoordinator coordinator = FileRewriteCoordinator.get();
Set<DataFile> newDataFiles = Sets.newHashSetWithExpectedSize(messages.length);
for (DataFile file : files(messages)) {
newDataFiles.add(file);
}
coordinator.stageRewrite(table, fileSetID, Collections.unmodifiableSet(newDataFiles));
}
}
private abstract class BaseStreamingWrite implements StreamingWrite {
private static final String QUERY_ID_PROPERTY = "spark.sql.streaming.queryId";
private static final String EPOCH_ID_PROPERTY = "spark.sql.streaming.epochId";
protected abstract String mode();
@Override
public StreamingDataWriterFactory createStreamingWriterFactory(PhysicalWriteInfo info) {
return createWriterFactory();
}
@Override
public final void commit(long epochId, WriterCommitMessage[] messages) {
LOG.info("Committing epoch {} for query {} in {} mode", epochId, queryId, mode());
table.refresh();
Long lastCommittedEpochId = findLastCommittedEpochId();
if (lastCommittedEpochId != null && epochId <= lastCommittedEpochId) {
LOG.info("Skipping epoch {} for query {} as it was already committed", epochId, queryId);
return;
}
doCommit(epochId, messages);
}
protected abstract void doCommit(long epochId, WriterCommitMessage[] messages);
protected <T> void commit(SnapshotUpdate<T> snapshotUpdate, long epochId, String description) {
snapshotUpdate.set(QUERY_ID_PROPERTY, queryId);
snapshotUpdate.set(EPOCH_ID_PROPERTY, Long.toString(epochId));
commitOperation(snapshotUpdate, description);
}
private Long findLastCommittedEpochId() {
Snapshot snapshot = table.currentSnapshot();
Long lastCommittedEpochId = null;
while (snapshot != null) {
Map<String, String> summary = snapshot.summary();
String snapshotQueryId = summary.get(QUERY_ID_PROPERTY);
if (queryId.equals(snapshotQueryId)) {
lastCommittedEpochId = Long.valueOf(summary.get(EPOCH_ID_PROPERTY));
break;
}
Long parentSnapshotId = snapshot.parentId();
snapshot = parentSnapshotId != null ? table.snapshot(parentSnapshotId) : null;
}
return lastCommittedEpochId;
}
@Override
public void abort(long epochId, WriterCommitMessage[] messages) {
SparkWrite.this.abort(messages);
}
@Override
public String toString() {
return String.format("IcebergStreamingWrite(table=%s, format=%s)", table, format);
}
}
private class StreamingAppend extends BaseStreamingWrite {
@Override
protected String mode() {
return "append";
}
@Override
protected void doCommit(long epochId, WriterCommitMessage[] messages) {
AppendFiles append = table.newFastAppend();
int numFiles = 0;
for (DataFile file : files(messages)) {
append.appendFile(file);
numFiles++;
}
commit(append, epochId, String.format("streaming append with %d new data files", numFiles));
}
}
private class StreamingOverwrite extends BaseStreamingWrite {
@Override
protected String mode() {
return "complete";
}
@Override
public void doCommit(long epochId, WriterCommitMessage[] messages) {
OverwriteFiles overwriteFiles = table.newOverwrite();
overwriteFiles.overwriteByRowFilter(Expressions.alwaysTrue());
int numFiles = 0;
for (DataFile file : files(messages)) {
overwriteFiles.addFile(file);
numFiles++;
}
commit(overwriteFiles, epochId, String.format("streaming complete overwrite with %d new data files", numFiles));
}
}
public static class TaskCommit implements WriterCommitMessage {
private final DataFile[] taskFiles;
TaskCommit(DataFile[] taskFiles) {
this.taskFiles = taskFiles;
}
DataFile[] files() {
return taskFiles;
}
}
private static class WriterFactory implements DataWriterFactory, StreamingDataWriterFactory {
private final Broadcast<Table> tableBroadcast;
private final FileFormat format;
private final long targetFileSize;
private final Schema writeSchema;
private final StructType dsSchema;
private final boolean partitionedFanoutEnabled;
protected WriterFactory(Broadcast<Table> tableBroadcast, FileFormat format, long targetFileSize,
Schema writeSchema, StructType dsSchema, boolean partitionedFanoutEnabled) {
this.tableBroadcast = tableBroadcast;
this.format = format;
this.targetFileSize = targetFileSize;
this.writeSchema = writeSchema;
this.dsSchema = dsSchema;
this.partitionedFanoutEnabled = partitionedFanoutEnabled;
}
@Override
public DataWriter<InternalRow> createWriter(int partitionId, long taskId) {
return createWriter(partitionId, taskId, 0);
}
@Override
public DataWriter<InternalRow> createWriter(int partitionId, long taskId, long epochId) {
Table table = tableBroadcast.value();
PartitionSpec spec = table.spec();
FileIO io = table.io();
OutputFileFactory fileFactory = OutputFileFactory.builderFor(table, partitionId, taskId)
.format(format)
.build();
SparkFileWriterFactory writerFactory = SparkFileWriterFactory.builderFor(table)
.dataFileFormat(format)
.dataSchema(writeSchema)
.dataSparkType(dsSchema)
.build();
if (spec.isUnpartitioned()) {
return new UnpartitionedDataWriter(writerFactory, fileFactory, io, spec, format, targetFileSize);
} else {
return new PartitionedDataWriter(
writerFactory, fileFactory, io, spec, writeSchema, dsSchema,
format, targetFileSize, partitionedFanoutEnabled);
}
}
}
private static <T extends ContentFile<T>> void deleteFiles(FileIO io, List<T> files) {
Tasks.foreach(files)
.throwFailureWhenFinished()
.noRetry()
.run(file -> io.deleteFile(file.path().toString()));
}
private static class UnpartitionedDataWriter implements DataWriter<InternalRow> {
private final FileWriter<InternalRow, DataWriteResult> delegate;
private final FileIO io;
private UnpartitionedDataWriter(SparkFileWriterFactory writerFactory, OutputFileFactory fileFactory,
FileIO io, PartitionSpec spec, FileFormat format, long targetFileSize) {
// TODO: support ORC rolling writers
if (format == FileFormat.ORC) {
EncryptedOutputFile outputFile = fileFactory.newOutputFile();
delegate = writerFactory.newDataWriter(outputFile, spec, null);
} else {
delegate = new RollingDataWriter<>(writerFactory, fileFactory, io, targetFileSize, spec, null);
}
this.io = io;
}
@Override
public void write(InternalRow record) throws IOException {
delegate.write(record);
}
@Override
public WriterCommitMessage commit() throws IOException {
close();
DataWriteResult result = delegate.result();
return new TaskCommit(result.dataFiles().toArray(new DataFile[0]));
}
@Override
public void abort() throws IOException {
close();
DataWriteResult result = delegate.result();
deleteFiles(io, result.dataFiles());
}
@Override
public void close() throws IOException {
delegate.close();
}
}
private static class PartitionedDataWriter implements DataWriter<InternalRow> {
private final PartitioningWriter<InternalRow, DataWriteResult> delegate;
private final FileIO io;
private final PartitionSpec spec;
private final PartitionKey partitionKey;
private final InternalRowWrapper internalRowWrapper;
private PartitionedDataWriter(SparkFileWriterFactory writerFactory, OutputFileFactory fileFactory,
FileIO io, PartitionSpec spec, Schema dataSchema,
StructType dataSparkType, FileFormat format,
long targetFileSize, boolean fanoutEnabled) {
if (fanoutEnabled) {
this.delegate = new FanoutDataWriter<>(writerFactory, fileFactory, io, format, targetFileSize);
} else {
this.delegate = new ClusteredDataWriter<>(writerFactory, fileFactory, io, format, targetFileSize);
}
this.io = io;
this.spec = spec;
this.partitionKey = new PartitionKey(spec, dataSchema);
this.internalRowWrapper = new InternalRowWrapper(dataSparkType);
}
@Override
public void write(InternalRow row) throws IOException {
partitionKey.partition(internalRowWrapper.wrap(row));
delegate.write(row, spec, partitionKey);
}
@Override
public WriterCommitMessage commit() throws IOException {
close();
DataWriteResult result = delegate.result();
return new TaskCommit(result.dataFiles().toArray(new DataFile[0]));
}
@Override
public void abort() throws IOException {
close();
DataWriteResult result = delegate.result();
deleteFiles(io, result.dataFiles());
}
@Override
public void close() throws IOException {
delegate.close();
}
}
}
| 1 | 42,619 | @aokolnychyi, shouldn't this check whether the operation is a delete? If this is invoked by `DELETE FROM` then we don't need to validate conflicting deletes. | apache-iceberg | java |
@@ -11,8 +11,9 @@ import (
"reflect"
"sync"
- "github.com/keybase/kbfs/kbfscodec"
"golang.org/x/net/context"
+
+ "github.com/keybase/kbfs/kbfscodec"
)
const ( | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"container/heap"
"errors"
"io"
"reflect"
"sync"
"github.com/keybase/kbfs/kbfscodec"
"golang.org/x/net/context"
)
const (
defaultBlockRetrievalWorkerQueueSize int = 100
testBlockRetrievalWorkerQueueSize int = 5
defaultOnDemandRequestPriority int = 100
)
// blockRetrievalRequest represents one consumer's request for a block.
type blockRetrievalRequest struct {
block Block
doneCh chan error
}
// blockRetrieval contains the metadata for a given block retrieval. May
// represent many requests, all of which will be handled at once.
type blockRetrieval struct {
//// Retrieval Metadata
// the block pointer to retrieve
blockPtr BlockPointer
// the key metadata for the request
kmd KeyMetadata
// the context encapsulating all request contexts
ctx *CoalescingContext
// cancel function for the context
cancelFunc context.CancelFunc
// protects requests
reqMtx sync.RWMutex
// the individual requests for this block pointer: they must be notified
// once the block is returned
requests []*blockRetrievalRequest
//// Queueing Metadata
// the index of the retrieval in the heap
index int
// the priority of the retrieval: larger priorities are processed first
priority int
// state of global request counter when this retrieval was created;
// maintains FIFO
insertionOrder uint64
}
// blockPtrLookup is used to uniquely identify block retrieval requests. The
// reflect.Type is needed because sometimes a request is placed concurrently
// for a specific block type and a generic block type. The requests will both
// cause a retrieval, but branching on type allows us to avoid special casing
// the code.
type blockPtrLookup struct {
bp BlockPointer
t reflect.Type
}
// blockRetrievalQueue manages block retrieval requests. Higher priority
// requests are executed first. Requests are executed in FIFO order within a
// given priority level.
type blockRetrievalQueue struct {
// protects ptrs, insertionCount, and the heap
mtx sync.RWMutex
// queued or in progress retrievals
ptrs map[blockPtrLookup]*blockRetrieval
// global counter of insertions to queue
// capacity: ~584 years at 1 billion requests/sec
insertionCount uint64
heap *blockRetrievalHeap
// This is a channel of channels to maximize the time that each request is
// in the heap, allowing preemption as long as possible. This way, a
// request only exits the heap once a worker is ready.
workerQueue chan chan *blockRetrieval
// channel to be closed when we're done accepting requests
doneCh chan struct{}
codec kbfscodec.Codec
}
// newBlockRetrievalQueue creates a new block retrieval queue. The numWorkers
// parameter determines how many workers can concurrently call WorkOnRequest
// (more than numWorkers will block).
func newBlockRetrievalQueue(numWorkers int, codec kbfscodec.Codec) *blockRetrievalQueue {
return &blockRetrievalQueue{
ptrs: make(map[blockPtrLookup]*blockRetrieval),
heap: &blockRetrievalHeap{},
workerQueue: make(chan chan *blockRetrieval, numWorkers),
doneCh: make(chan struct{}),
codec: codec,
}
}
func (brq *blockRetrievalQueue) popIfNotEmpty() *blockRetrieval {
brq.mtx.Lock()
defer brq.mtx.Unlock()
if brq.heap.Len() > 0 {
return heap.Pop(brq.heap).(*blockRetrieval)
}
return nil
}
// notifyWorker notifies workers that there is a new request for processing.
func (brq *blockRetrievalQueue) notifyWorker() {
go func() {
select {
case <-brq.doneCh:
retrieval := brq.popIfNotEmpty()
if retrieval != nil {
brq.FinalizeRequest(retrieval, nil, io.EOF)
}
// Get the next queued worker
case ch := <-brq.workerQueue:
ch <- brq.popIfNotEmpty()
}
}()
}
// Request submits a block request to the queue.
func (brq *blockRetrievalQueue) Request(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block) <-chan error {
// Only continue if we haven't been shut down
ch := make(chan error, 1)
select {
case <-brq.doneCh:
ch <- io.EOF
return ch
default:
}
if block == nil {
ch <- errors.New("nil block passed to blockRetrievalQueue.Request")
return ch
}
bpLookup := blockPtrLookup{ptr, reflect.TypeOf(block)}
brq.mtx.Lock()
defer brq.mtx.Unlock()
// Might have to retry if the context has been canceled.
// This loop will iterate a maximum of 2 times. It either hits the `return`
// statement at the bottom on the first iteration, or the `continue`
// statement first which causes it to `return` on the next iteration.
for {
br, exists := brq.ptrs[bpLookup]
if !exists {
// Add to the heap
br = &blockRetrieval{
blockPtr: ptr,
kmd: kmd,
index: -1,
priority: priority,
insertionOrder: brq.insertionCount,
}
br.ctx, br.cancelFunc = NewCoalescingContext(ctx)
brq.insertionCount++
brq.ptrs[bpLookup] = br
heap.Push(brq.heap, br)
defer brq.notifyWorker()
} else {
err := br.ctx.AddContext(ctx)
if err == context.Canceled {
// We need to delete the request pointer, but we'll still let the
// existing request be processed by a worker.
delete(brq.ptrs, bpLookup)
continue
}
}
br.reqMtx.Lock()
br.requests = append(br.requests, &blockRetrievalRequest{
block: block,
doneCh: ch,
})
br.reqMtx.Unlock()
// If the new request priority is higher, elevate the retrieval in the
// queue. Skip this if the request is no longer in the queue (which means
// it's actively being processed).
if br.index != -1 && priority > br.priority {
br.priority = priority
heap.Fix(brq.heap, br.index)
}
return ch
}
}
// WorkOnRequest returns a new channel for a worker to obtain a blockRetrieval.
func (brq *blockRetrievalQueue) WorkOnRequest() <-chan *blockRetrieval {
ch := make(chan *blockRetrieval, 1)
brq.workerQueue <- ch
return ch
}
// FinalizeRequest is the last step of a retrieval request once a block has
// been obtained. It removes the request from the blockRetrievalQueue,
// preventing more requests from mutating the retrieval, then notifies all
// subscribed requests.
func (brq *blockRetrievalQueue) FinalizeRequest(retrieval *blockRetrieval, block Block, err error) {
brq.mtx.Lock()
// This might have already been removed if the context has been canceled.
// That's okay, because this will then be a no-op.
bpLookup := blockPtrLookup{retrieval.blockPtr, reflect.TypeOf(block)}
delete(brq.ptrs, bpLookup)
brq.mtx.Unlock()
defer retrieval.cancelFunc()
// This is a symbolic lock, since there shouldn't be any other goroutines
// accessing requests at this point. But requests had contentious access
// earlier, so we'll lock it here as well to maintain the integrity of the
// lock.
retrieval.reqMtx.Lock()
defer retrieval.reqMtx.Unlock()
for _, r := range retrieval.requests {
req := r
if block != nil {
// Copy the decrypted block to the caller
req.block.Set(block, brq.codec)
}
// Since we created this channel with a buffer size of 1, this won't block.
req.doneCh <- err
}
}
// Shutdown is called when we are no longer accepting requests
func (brq *blockRetrievalQueue) Shutdown() {
select {
case <-brq.doneCh:
default:
close(brq.doneCh)
}
}
| 1 | 14,731 | Any reason to split up this import block? I think most files have them together in one block, right? | keybase-kbfs | go |
@@ -109,6 +109,18 @@ namespace Datadog.Trace.AppSec
internal SecuritySettings Settings => _settings;
+ private static void TagSpan(Span span)
+ {
+ // we should only tag service entry span, the first span opened for a
+ // service. For WAF it's safe to assume we always have service entry spans
+ // we'll need to revisit this for RASP.
+ if (span != null)
+ {
+ span.SetMetric(Tags.AppSecEnabled, 1.0);
+ span.SetTag(Tags.RuntimeFamily, TracerConstants.Language);
+ }
+ }
+
/// <summary>
/// Frees resouces
/// </summary> | 1 | // <copyright file="Security.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using Datadog.Trace.AppSec.Agent;
using Datadog.Trace.AppSec.EventModel;
using Datadog.Trace.AppSec.Transport;
using Datadog.Trace.AppSec.Waf;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
using Datadog.Trace.Vendors.Serilog.Events;
namespace Datadog.Trace.AppSec
{
/// <summary>
/// The Secure is responsible coordinating app sec
/// </summary>
internal class Security : IDatadogSecurity, IDisposable
{
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<Security>();
private static Security _instance;
private static bool _globalInstanceInitialized;
private static object _globalInstanceLock = new();
private readonly IWaf _powerWaf;
private readonly IAppSecAgentWriter _agentWriter;
private readonly InstrumentationGateway _instrumentationGateway;
private readonly SecuritySettings _settings;
private readonly ConcurrentDictionary<Guid, Action> toExecute = new();
/// <summary>
/// Initializes a new instance of the <see cref="Security"/> class with default settings.
/// </summary>
public Security()
: this(null, null)
{
}
private Security(SecuritySettings settings = null, InstrumentationGateway instrumentationGateway = null, IWaf powerWaf = null, IAppSecAgentWriter agentWriter = null)
{
try
{
_settings = settings ?? SecuritySettings.FromDefaultSources();
_instrumentationGateway = instrumentationGateway ?? new InstrumentationGateway();
_settings.Enabled = _settings.Enabled && AreArchitectureAndOsSupported();
if (_settings.Enabled)
{
_powerWaf = powerWaf ?? Waf.Waf.Initialize(_settings.Rules);
if (_powerWaf != null)
{
_agentWriter = agentWriter ?? new AppSecAgentWriter();
_instrumentationGateway.InstrumentationGatewayEvent += InstrumentationGatewayInstrumentationGatewayEvent;
}
else
{
_settings.Enabled = false;
}
LifetimeManager.Instance.AddShutdownTask(RunShutdown);
}
}
catch (Exception ex)
{
_settings.Enabled = false;
Log.Error(ex, "Datadog AppSec failed to initialize, your application is NOT protected");
}
}
/// <summary>
/// Gets or sets the global <see cref="Security"/> instance.
/// </summary>
public static Security Instance
{
get
{
return LazyInitializer.EnsureInitialized(ref _instance, ref _globalInstanceInitialized, ref _globalInstanceLock);
}
set
{
lock (_globalInstanceLock)
{
_instance = value;
_globalInstanceInitialized = true;
}
}
}
/// <summary>
/// Gets <see cref="InstrumentationGateway"/> instance
/// </summary>
InstrumentationGateway IDatadogSecurity.InstrumentationGateway => _instrumentationGateway;
internal InstrumentationGateway InstrumentationGateway => _instrumentationGateway;
/// <summary>
/// Gets <see cref="SecuritySettings"/> instance
/// </summary>
SecuritySettings IDatadogSecurity.Settings => _settings;
internal SecuritySettings Settings => _settings;
/// <summary>
/// Frees resouces
/// </summary>
public void Dispose() => _powerWaf?.Dispose();
internal void Execute(Guid guid)
{
if (toExecute.TryRemove(guid, out var value))
{
value();
}
}
private void Report(ITransport transport, Span span, Waf.ReturnTypes.Managed.Return result)
{
if (span != null)
{
span.SetTag(Tags.AppSecEvent, "true");
span.SetTraceSamplingPriority(SamplingPriority.AppSecKeep);
}
transport.OnCompleted(() =>
{
var attack = Attack.From(result, span, transport, _settings.CustomIpHeader, _settings.ExtraHeaders);
_agentWriter.AddEvent(attack);
});
}
private void TagSpan(Span span)
{
// we should only tag service entry span, the first span opened for a
// service. For WAF it's safe to assume we always have service entry spans
// we'll need to revisit this for RASP.
if (span != null)
{
span.SetTag(Tags.AppSecEnabled, "1");
span.SetTag(Tags.RuntimeFamily, TracerConstants.Language);
}
}
private void RunWafAndReact(IDictionary<string, object> args, ITransport transport, Span span)
{
TagSpan(span);
var additiveContext = transport.GetAdditiveContext();
if (additiveContext == null)
{
additiveContext = _powerWaf.CreateContext();
transport.SetAdditiveContext(additiveContext);
}
// run the WAF and execute the results
using var wafResult = additiveContext.Run(args);
if (wafResult.ReturnCode == ReturnCode.Monitor || wafResult.ReturnCode == ReturnCode.Block)
{
Log.Information("AppSec: Attack detected! Action: {ReturnCode}, Blocking enabled : {BlockingEnabled}", wafResult.ReturnCode, _settings.BlockingEnabled);
if (Log.IsEnabled(LogEventLevel.Debug))
{
Log.Information("AppSec: Attack arguments {Args}", Encoder.FormatArgs(args));
}
var managedWafResult = Waf.ReturnTypes.Managed.Return.From(wafResult);
if (_settings.BlockingEnabled && wafResult.ReturnCode == ReturnCode.Block)
{
transport.Block();
#if !NETFRAMEWORK
var guid = Guid.NewGuid();
toExecute.TryAdd(guid, () => Report(transport, span, managedWafResult));
transport.AddRequestScope(guid);
#else
Report(transport, span, managedWafResult);
#endif
}
else
{
Report(transport, span, managedWafResult);
}
}
}
private void InstrumentationGatewayInstrumentationGatewayEvent(object sender, InstrumentationGatewayEventArgs e)
{
try
{
RunWafAndReact(e.EventData, e.Transport, e.RelatedSpan);
}
catch (Exception ex)
{
Log.Error(ex, "Call into the security module failed");
}
}
private bool AreArchitectureAndOsSupported()
{
var frameworkDescription = FrameworkDescription.Instance;
var osSupported = false;
var supportedOs = new[] { OSPlatform.Linux, OSPlatform.MacOS, OSPlatform.Windows };
if (supportedOs.Contains(frameworkDescription.OSPlatform))
{
osSupported = true;
}
var archSupported = false;
var supportedArchs = new[] { ProcessArchitecture.Arm, ProcessArchitecture.X64, ProcessArchitecture.X86 };
if (supportedArchs.Contains(frameworkDescription.ProcessArchitecture))
{
archSupported = true;
}
if (!osSupported || !archSupported)
{
Log.Warning(
"AppSec could not start because the current environment is not supported. No security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help. Host information: {{ operating_system:{frameworkDescription.OSPlatform} }}, arch:{{ {frameworkDescription.ProcessArchitecture} }}, runtime_infos: {{ {frameworkDescription.ProductVersion} }}",
frameworkDescription.OSPlatform,
frameworkDescription.ProcessArchitecture,
frameworkDescription.ProductVersion);
}
return osSupported && archSupported;
}
private void RunShutdown()
{
_agentWriter?.Shutdown();
if (_instrumentationGateway != null)
{
_instrumentationGateway.InstrumentationGatewayEvent -= InstrumentationGatewayInstrumentationGatewayEvent;
}
Dispose();
}
}
}
| 1 | 23,000 | `Tags.AppSecEnabled` should probably move to `Metrics.AppSecEnabled` I guess? | DataDog-dd-trace-dotnet | .cs |
@@ -35,6 +35,9 @@ class atomic_file(AtomicLocalFile):
Also cleans up the temp file if close is not invoked
"""
+ def __init__(self, path, mode):
+ super(AtomicLocalFile, self).__init__(path, mode)
+
def move_to_final_destination(self):
os.rename(self.tmp_path, self.path)
| 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:class:`LocalTarget` provides a concrete implementation of a :py:class:`~luigi.target.Target` class that uses files on the local file system
"""
import os
import random
import shutil
import tempfile
import io
import warnings
from luigi.format import FileWrapper, get_default_format
from luigi.target import FileAlreadyExists, MissingParentDirectory, NotADirectory, FileSystem, FileSystemTarget, AtomicLocalFile
class atomic_file(AtomicLocalFile):
"""Simple class that writes to a temp file and moves it on close()
Also cleans up the temp file if close is not invoked
"""
def move_to_final_destination(self):
os.rename(self.tmp_path, self.path)
def generate_tmp_path(self, path):
return path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
class LocalFileSystem(FileSystem):
"""
Wrapper for access to file system operations.
Work in progress - add things as needed.
"""
def exists(self, path):
return os.path.exists(path)
def mkdir(self, path, parents=True, raise_if_exists=False):
if self.exists(path):
if raise_if_exists:
raise FileAlreadyExists()
elif not self.isdir(path):
raise NotADirectory()
else:
return
if parents:
os.makedirs(path)
else:
if not os.path.exists(os.path.dirname(path)):
raise MissingParentDirectory()
os.mkdir(path)
def isdir(self, path):
return os.path.isdir(path)
def listdir(self, path):
for dir_, _, files in os.walk(path):
assert dir_.startswith(path)
for name in files:
yield os.path.join(dir_, name)
def remove(self, path, recursive=True):
if recursive and self.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def move(self, old_path, new_path, raise_if_exists=False):
if raise_if_exists and os.path.exists(new_path):
raise RuntimeError('Destination exists: %s' % new_path)
d = os.path.dirname(new_path)
if d and not os.path.exists(d):
self.mkdir(d)
os.rename(old_path, new_path)
class LocalTarget(FileSystemTarget):
fs = LocalFileSystem()
def __init__(self, path=None, format=None, is_tmp=False):
if format is None:
format = get_default_format()
if not path:
if not is_tmp:
raise Exception('path or is_tmp must be set')
path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999))
super(LocalTarget, self).__init__(path)
self.format = format
self.is_tmp = is_tmp
def makedirs(self):
"""
Create all parent folders if they do not exist.
"""
normpath = os.path.normpath(self.path)
parentfolder = os.path.dirname(normpath)
if parentfolder:
try:
os.makedirs(parentfolder)
except OSError:
pass
def open(self, mode='r'):
rwmode = mode.replace('b', '').replace('t', '')
if rwmode == 'w':
self.makedirs()
return self.format.pipe_writer(atomic_file(self.path))
elif rwmode == 'r':
fileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode)))
return self.format.pipe_reader(fileobj)
else:
raise Exception('mode must be r/w (got:%s)' % mode)
def move(self, new_path, raise_if_exists=False):
self.fs.move(self.path, new_path, raise_if_exists=raise_if_exists)
def move_dir(self, new_path):
self.move(new_path)
def remove(self):
self.fs.remove(self.path)
def copy(self, new_path, raise_if_exists=False):
if raise_if_exists and os.path.exists(new_path):
raise RuntimeError('Destination exists: %s' % new_path)
tmp = LocalTarget(new_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10), is_tmp=True)
tmp.makedirs()
shutil.copy(self.path, tmp.fn)
tmp.move(new_path)
@property
def fn(self):
return self.path
def __del__(self):
if self.is_tmp and self.exists():
self.remove()
class File(LocalTarget):
def __init__(self, *args, **kwargs):
warnings.warn("File has been renamed LocalTarget", DeprecationWarning, stacklevel=2)
super(File, self).__init__(*args, **kwargs)
| 1 | 14,761 | What happens if you remove this? Can't you still initialize this atomic_file class, since it's just AtomicLocalFile with an additional method (move_to_final_destination)? | spotify-luigi | py |
@@ -34,7 +34,6 @@
<h2><div class='icon partners'></div><%= t ".partners_title" %></h2>
<p><%= t 'layouts.partners_html',
:ucl => link_to(t('layouts.partners_ucl'), "https://www.ucl.ac.uk"),
- :ic => link_to(t('layouts.partners_ic'), "https://www.imperial.ac.uk/"),
:bytemark => link_to(t('layouts.partners_bytemark'), "https://www.bytemark.co.uk"),
:partners => link_to(t('layouts.partners_partners'), "https://hardware.openstreetmap.org/thanks/") %>
</p> | 1 | <div class='attr'>
<div class='byosm'>
<%= t ".copyright_html" %>
</div>
<div class='user-image'></div>
<h1><%= raw t ".used_by", :name => "<span class='user-name'>OpenStreetMap</span>" %></h1>
</div>
<div class='text'>
<div class='section'>
<p><strong><%= t ".lede_text" %></strong></p>
<h2><div class='icon local'></div><%= t ".local_knowledge_title" %></h2>
<p><%= t ".local_knowledge_html" %></p>
</div>
<div class='section'>
<h2><div class='icon community'></div><%= t ".community_driven_title" %></h2>
<p><%= t ".community_driven_html", :diary_path => diary_path %></p>
</div>
<div class='section' id='open-data'>
<h2><div class='icon open'></div><%= t ".open_data_title" %></h2>
<p><%= t ".open_data_html", :copyright_path => copyright_path %></p>
</div>
<div class='section' id='legal'>
<h2><div class='icon legal'></div><%= t ".legal_title" %></h2>
<p><%= t ".legal_html" %></p>
</div>
<div class='section' id='partners'>
<h2><div class='icon partners'></div><%= t ".partners_title" %></h2>
<p><%= t 'layouts.partners_html',
:ucl => link_to(t('layouts.partners_ucl'), "https://www.ucl.ac.uk"),
:ic => link_to(t('layouts.partners_ic'), "https://www.imperial.ac.uk/"),
:bytemark => link_to(t('layouts.partners_bytemark'), "https://www.bytemark.co.uk"),
:partners => link_to(t('layouts.partners_partners'), "https://hardware.openstreetmap.org/thanks/") %>
</p>
</div>
</div>
| 1 | 11,509 | As previously discussed in #1944 it's not safe to remove IC like this as it will break every language that still has a `%{ic}` marker in the translation. | openstreetmap-openstreetmap-website | rb |
@@ -293,6 +293,19 @@ func (r *DefaultRuleRenderer) endpointIptablesChain(
},
})
+ rules = append(rules, Rule{
+ Match: Match().ProtocolNum(ProtoUDP).
+ DestPorts(uint16(r.Config.VXLANPort)).
+ VXLANVNI(uint32(r.Config.VXLANVNI)),
+ Action: DropAction{},
+ Comment: "Drop VXLAN encapped packets originating in pods",
+ })
+ rules = append(rules, Rule{
+ Match: Match().ProtocolNum(ProtoIPIP),
+ Action: DropAction{},
+ Comment: "Drop IPinIP encapped packets originating in pods",
+ })
+
if len(policyNames) > 0 {
// Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies and
// continue processing the profiles, if there are any. | 1 | // Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/hashutils"
. "github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
)
func (r *DefaultRuleRenderer) WorkloadEndpointToIptablesChains(
ifaceName string,
epMarkMapper EndpointMarkMapper,
adminUp bool,
ingressPolicies []string,
egressPolicies []string,
profileIDs []string,
) []*Chain {
result := []*Chain{}
result = append(result,
// Chain for traffic _to_ the endpoint.
r.endpointIptablesChain(
ingressPolicies,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
WorkloadToEndpointPfx,
"", // No fail-safe chains for workloads.
chainTypeNormal,
adminUp,
r.filterAllowAction, // Workload endpoint chains are only used in the filter table
),
// Chain for traffic _from_ the endpoint.
r.endpointIptablesChain(
egressPolicies,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
WorkloadFromEndpointPfx,
"", // No fail-safe chains for workloads.
chainTypeNormal,
adminUp,
r.filterAllowAction, // Workload endpoint chains are only used in the filter table
),
)
if r.KubeIPVSSupportEnabled {
// Chain for setting endpoint mark of an endpoint.
result = append(result,
r.endpointSetMarkChain(
ifaceName,
epMarkMapper,
SetEndPointMarkPfx,
),
)
}
return result
}
func (r *DefaultRuleRenderer) HostEndpointToFilterChains(
ifaceName string,
epMarkMapper EndpointMarkMapper,
ingressPolicyNames []string,
egressPolicyNames []string,
ingressForwardPolicyNames []string,
egressForwardPolicyNames []string,
profileIDs []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering filter host endpoint chain.")
result := []*Chain{}
result = append(result,
// Chain for output traffic _to_ the endpoint.
r.endpointIptablesChain(
egressPolicyNames,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointPfx,
ChainFailsafeOut,
chainTypeNormal,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for input traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressPolicyNames,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypeNormal,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for forward traffic _to_ the endpoint.
r.endpointIptablesChain(
egressForwardPolicyNames,
profileIDs,
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointForwardPfx,
"", // No fail-safe chains for forward traffic.
chainTypeForward,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
// Chain for forward traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressForwardPolicyNames,
profileIDs,
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointForwardPfx,
"", // No fail-safe chains for forward traffic.
chainTypeForward,
true, // Host endpoints are always admin up.
r.filterAllowAction,
),
)
if r.KubeIPVSSupportEnabled {
// Chain for setting endpoint mark of an endpoint.
result = append(result,
r.endpointSetMarkChain(
ifaceName,
epMarkMapper,
SetEndPointMarkPfx,
),
)
}
return result
}
func (r *DefaultRuleRenderer) HostEndpointToRawChains(
ifaceName string,
ingressPolicyNames []string,
egressPolicyNames []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering raw (untracked) host endpoint chain.")
return []*Chain{
// Chain for traffic _to_ the endpoint.
r.endpointIptablesChain(
egressPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyOutboundPfx,
ProfileOutboundPfx,
HostToEndpointPfx,
ChainFailsafeOut,
chainTypeUntracked,
true, // Host endpoints are always admin up.
AcceptAction{},
),
// Chain for traffic _from_ the endpoint.
r.endpointIptablesChain(
ingressPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypeUntracked,
true, // Host endpoints are always admin up.
AcceptAction{},
),
}
}
func (r *DefaultRuleRenderer) HostEndpointToMangleChains(
ifaceName string,
preDNATPolicyNames []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering pre-DNAT host endpoint chain.")
return []*Chain{
// Chain for traffic _from_ the endpoint. Pre-DNAT policy does not apply to
// outgoing traffic through a host endpoint.
r.endpointIptablesChain(
preDNATPolicyNames,
nil, // We don't render profiles into the raw table.
ifaceName,
PolicyInboundPfx,
ProfileInboundPfx,
HostFromEndpointPfx,
ChainFailsafeIn,
chainTypePreDNAT,
true, // Host endpoints are always admin up.
r.mangleAllowAction,
),
}
}
type endpointChainType int
const (
chainTypeNormal endpointChainType = iota
chainTypeUntracked
chainTypePreDNAT
chainTypeForward
)
func (r *DefaultRuleRenderer) endpointSetMarkChain(
name string,
epMarkMapper EndpointMarkMapper,
endpointPrefix string,
) *Chain {
rules := []Rule{}
chainName := EndpointChainName(endpointPrefix, name)
if endPointMark, err := epMarkMapper.GetEndpointMark(name); err == nil {
// Set endpoint mark.
rules = append(rules, Rule{
Action: SetMaskedMarkAction{
Mark: endPointMark,
Mask: epMarkMapper.GetMask()},
})
}
return &Chain{
Name: chainName,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) endpointIptablesChain(
policyNames []string,
profileIds []string,
name string,
policyPrefix PolicyChainNamePrefix,
profilePrefix ProfileChainNamePrefix,
endpointPrefix string,
failsafeChain string,
chainType endpointChainType,
adminUp bool,
allowAction Action,
) *Chain {
rules := []Rule{}
chainName := EndpointChainName(endpointPrefix, name)
if !adminUp {
// Endpoint is admin-down, drop all traffic to/from it.
rules = append(rules, Rule{
Match: Match(),
Action: DropAction{},
Comment: "Endpoint admin disabled",
})
return &Chain{
Name: chainName,
Rules: rules,
}
}
if chainType != chainTypeUntracked {
// Tracked chain: install conntrack rules, which implement our stateful connections.
// This allows return traffic associated with a previously-permitted request.
rules = r.appendConntrackRules(rules, allowAction)
}
// First set up failsafes.
if failsafeChain != "" {
rules = append(rules, Rule{
Action: JumpAction{Target: failsafeChain},
})
}
// Start by ensuring that the accept mark bit is clear, policies set that bit to indicate
// that they accepted the packet.
rules = append(rules, Rule{
Action: ClearMarkAction{
Mark: r.IptablesMarkAccept,
},
})
if len(policyNames) > 0 {
// Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies and
// continue processing the profiles, if there are any.
rules = append(rules, Rule{
Comment: "Start of policies",
Action: ClearMarkAction{
Mark: r.IptablesMarkPass,
},
})
// Then, jump to each policy in turn.
for _, polID := range policyNames {
polChainName := PolicyChainName(
policyPrefix,
&proto.PolicyID{Name: polID},
)
// If a previous policy didn't set the "pass" mark, jump to the policy.
rules = append(rules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: JumpAction{Target: polChainName},
})
// If policy marked packet as accepted, it returns, setting the accept
// mark bit.
if chainType == chainTypeUntracked {
// For an untracked policy, map allow to "NOTRACK and ALLOW".
rules = append(rules, Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: NoTrackAction{},
})
}
// If accept bit is set, return from this chain. We don't immediately
// accept because there may be other policy still to apply.
rules = append(rules, Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if policy accepted",
})
}
if chainType == chainTypeNormal || chainType == chainTypeForward {
// When rendering normal and forward rules, if no policy marked the packet as "pass", drop the
// packet.
//
// For untracked and pre-DNAT rules, we don't do that because there may be
// normal rules still to be applied to the packet in the filter table.
rules = append(rules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: DropAction{},
Comment: "Drop if no policies passed packet",
})
}
} else if chainType == chainTypeForward {
// Forwarded traffic is allowed when there are no policies with
// applyOnForward that apply to this endpoint (and in this direction).
rules = append(rules, Rule{
Action: SetMarkAction{Mark: r.IptablesMarkAccept},
Comment: "Allow forwarded traffic by default",
})
rules = append(rules, Rule{
Action: ReturnAction{},
Comment: "Return for accepted forward traffic",
})
}
if chainType == chainTypeNormal {
// Then, jump to each profile in turn.
for _, profileID := range profileIds {
profChainName := ProfileChainName(profilePrefix, &proto.ProfileID{Name: profileID})
rules = append(rules,
Rule{Action: JumpAction{Target: profChainName}},
// If policy marked packet as accepted, it returns, setting the
// accept mark bit. If that is set, return from this chain.
Rule{
Match: Match().MarkSingleBitSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if profile accepted",
})
}
// When rendering normal rules, if no profile marked the packet as accepted, drop
// the packet.
//
// For untracked rules, we don't do that because there may be tracked rules
// still to be applied to the packet in the filter table.
rules = append(rules, Rule{
Match: Match(),
Action: DropAction{},
Comment: "Drop if no profiles matched",
})
}
return &Chain{
Name: chainName,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) appendConntrackRules(rules []Rule, allowAction Action) []Rule {
// Allow return packets for established connections.
if allowAction != (AcceptAction{}) {
// If we've been asked to return instead of accept the packet immediately,
// make sure we flag the packet as allowed.
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: SetMarkAction{Mark: r.IptablesMarkAccept},
},
)
}
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: allowAction,
},
)
if !r.Config.DisableConntrackInvalid {
// Drop packets that aren't either a valid handshake or part of an established
// connection.
rules = append(rules, Rule{
Match: Match().ConntrackState("INVALID"),
Action: DropAction{},
})
}
return rules
}
func EndpointChainName(prefix string, ifaceName string) string {
return hashutils.GetLengthLimitedID(
prefix,
ifaceName,
MaxChainNameLength,
)
}
| 1 | 16,987 | I believe these rules will be enforced both (1) on egress from a local workload, and (2) on ingress **to** a local workload. Right? I understand that we definitely want (1), but do we really want to enforce (2) as well? | projectcalico-felix | go |
@@ -368,6 +368,8 @@ class RemoteConnection(object):
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
+ Command.W3C_MINIMIZE_WINDOW:
+ ('POST', '/session/$sessionId/window/minimize'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW: | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from selenium.webdriver.common import utils as common_utils
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_remote_connection_headers(cls, parsed_url, keep_alive=False):
"""
Get headers for remote request.
:Args:
- parsed_url - The parsed url
- keep_alive (Boolean) - Is this a keep-alive connection (default: False)
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': 'Python http auth'
}
if parsed_url.username:
base64string = base64.b64encode('{0.username}:{0.password}'.format(parsed_url).encode())
headers.update({
'Authorization': 'Basic {}'.format(base64string.decode())
})
if keep_alive:
headers.update({
'Connection': 'keep-alive'
})
return headers
def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
addr = parsed_url.hostname
if parsed_url.hostname and resolve_ip:
port = parsed_url.port or None
if parsed_url.scheme == "https":
ip = parsed_url.hostname
else:
ip = common_utils.find_connectable_ip(parsed_url.hostname,
port=port)
if ip:
netloc = ip
addr = netloc
if parsed_url.port:
netloc = common_utils.join_host_port(netloc,
parsed_url.port)
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
else:
LOGGER.info('Could not get IP address for host: %s' %
parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.W3C_GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.W3C_GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window/handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.W3C_EXECUTE_SCRIPT:
('POST', '/session/$sessionId/execute/sync'),
Command.W3C_EXECUTE_SCRIPT_ASYNC:
('POST', '/session/$sessionId/execute/async'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.GET_ELEMENT_PROPERTY:
('GET', '/session/$sessionId/element/$id/property/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.W3C_DISMISS_ALERT:
('POST', '/session/$sessionId/alert/dismiss'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.W3C_ACCEPT_ALERT:
('POST', '/session/$sessionId/alert/accept'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.W3C_SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert/text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.W3C_GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert/text'),
Command.SET_ALERT_CREDENTIALS:
('POST', '/session/$sessionId/alert/credentials'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.W3C_ACTIONS:
('POST', '/session/$sessionId/actions'),
Command.W3C_CLEAR_ACTIONS:
('DELETE', '/session/$sessionId/actions'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.W3C_GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/position'),
Command.W3C_SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/position'),
Command.SET_WINDOW_RECT:
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
headers = self.get_remote_connection_headers(parsed_url, self.keep_alive)
if self.keep_alive:
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((
parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
for key, val in headers.items():
request.add_header(key, val)
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode <= 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
| 1 | 14,688 | Update after command rename | SeleniumHQ-selenium | java |
@@ -79,7 +79,7 @@ module Faker
digits = Faker::Number.leading_zero_number(9) until digits&.match(/(\d)((?!\1)\d)+/)
first_digit = brazilian_citizen_number_checksum_digit(digits)
second_digit = brazilian_citizen_number_checksum_digit(digits + first_digit)
- [digits, first_digit, second_digit].join
+ "#{digits.gsub(/(\d{3})(?=\d)/, '\1.')}-#{first_digit}#{second_digit}"
end
private | 1 | # frozen_string_literal: true
module Faker
class IDNumber < Base
CHECKS = 'TRWAGMYFPDXBNJZSQVHLCKE'
INVALID_SSN = [
/0{3}-\d{2}-\d{4}/,
/\d{3}-0{2}-\d{4}/,
/\d{3}-\d{2}-0{4}/,
/666-\d{2}-\d{4}/,
/9\d{2}-\d{2}-\d{4}/
].freeze
ZA_RACE_DIGIT = '8'
ZA_CITIZENSHIP_DIGITS = %w[0 1].freeze
class << self
def valid
_translate('valid')
end
def invalid
_translate('invalid')
end
def ssn_valid
ssn = regexify(/[0-8]\d{2}-\d{2}-\d{4}/)
# We could still have all 0s in one segment or another
INVALID_SSN.any? { |regex| regex =~ ssn } ? ssn_valid : ssn
end
def spanish_citizen_number
num = Faker::Number.number(8)
mod = num.to_i % 23
check = CHECKS[mod]
"#{num}-#{check}"
end
def spanish_foreign_citizen_number
code = 'XYZ'
digits = Faker::Number.number(7)
prefix = code[rand(code.length)]
prefix_val = 'XYZ'.index(prefix).to_s
mod = "#{prefix_val}#{digits}".to_i % 23
check = CHECKS[mod]
"#{prefix}-#{digits}-#{check}"
end
def valid_south_african_id_number
id_number = [
Faker::Date.birthday.strftime('%y%m%d'),
Faker::Number.number(4),
ZA_CITIZENSHIP_DIGITS.sample(random: Faker::Config.random),
ZA_RACE_DIGIT
].join
[id_number, south_african_id_checksum_digit(id_number)].join
end
alias south_african_id_number valid_south_african_id_number
def invalid_south_african_id_number
invalid_date_of_birth = [
Faker::Number.number(2),
Faker::Number.between(13, 99),
Faker::Number.between(32, 99)
].map(&:to_s).join
id_number = [
invalid_date_of_birth,
Faker::Number.number(4),
ZA_CITIZENSHIP_DIGITS.sample(random: Faker::Config.random),
ZA_RACE_DIGIT
].join
[id_number, south_african_id_checksum_digit(id_number)].join
end
def brazilian_citizen_number
digits = Faker::Number.leading_zero_number(9) until digits&.match(/(\d)((?!\1)\d)+/)
first_digit = brazilian_citizen_number_checksum_digit(digits)
second_digit = brazilian_citizen_number_checksum_digit(digits + first_digit)
[digits, first_digit, second_digit].join
end
private
def south_african_id_checksum_digit(id_number)
value_parts = id_number.chars
even_digits = value_parts
.select
.with_index { |_, i| (i + 1).even? }
odd_digits_without_last_character = value_parts[0...-1]
.select
.with_index { |_, i| (i + 1).odd? }
sum_of_odd_digits = odd_digits_without_last_character.map(&:to_i).reduce(:+)
even_digits_times_two = (even_digits.join('').to_i * 2).to_s
sum_of_even_digits = even_digits_times_two.chars.map(&:to_i).reduce(:+)
total_sum = sum_of_odd_digits + sum_of_even_digits
((10 - (total_sum % 10)) % 10).to_s
end
def brazilian_citizen_number_checksum_digit(digits)
digit_sum = digits.chars.each_with_index.inject(0) do |acc, (digit, i)|
acc + digit.to_i * (digits.size + 1 - i)
end * 10
remainder = digit_sum % 11
remainder == 10 ? '0' : remainder.to_s
end
def _translate(key)
parse("id_number.#{key}")
end
end
end
end
| 1 | 8,896 | What about adding an optional parameter that allows users to choose whether they want to use the format or not? | faker-ruby-faker | rb |
@@ -35,8 +35,8 @@ func TestChainHead(t *testing.T) {
func TestChainLs(t *testing.T) {
tf.IntegrationTest(t)
- ctx := context.Background()
t.Skip("DRAGONS: fake post for integration test")
+ ctx := context.Background()
t.Run("chain ls with json encoding returns the whole chain as json", func(t *testing.T) {
builder := test.NewNodeBuilder(t) | 1 | package commands_test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"testing"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-filecoin/fixtures"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node/test"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
tf "github.com/filecoin-project/go-filecoin/internal/pkg/testhelpers/testflags"
)
func TestChainHead(t *testing.T) {
tf.IntegrationTest(t)
ctx := context.Background()
builder := test.NewNodeBuilder(t)
_, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
jsonResult := cmdClient.RunSuccess(ctx, "chain", "head", "--enc", "json").ReadStdoutTrimNewlines()
var cidsFromJSON []cid.Cid
err := json.Unmarshal([]byte(jsonResult), &cidsFromJSON)
assert.NoError(t, err)
}
func TestChainLs(t *testing.T) {
tf.IntegrationTest(t)
ctx := context.Background()
t.Skip("DRAGONS: fake post for integration test")
t.Run("chain ls with json encoding returns the whole chain as json", func(t *testing.T) {
builder := test.NewNodeBuilder(t)
buildWithMiner(t, builder)
n, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
blk, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx)
require.NoError(t, err)
c := blk.Cid()
result2 := cmdClient.RunSuccess(ctx, "chain", "ls", "--enc", "json").ReadStdoutTrimNewlines()
var bs [][]block.Block
for _, line := range bytes.Split([]byte(result2), []byte{'\n'}) {
var b []block.Block
err := json.Unmarshal(line, &b)
require.NoError(t, err)
bs = append(bs, b)
require.Equal(t, 1, len(b))
}
assert.Equal(t, 2, len(bs))
assert.True(t, bs[1][0].Parents.Empty())
assert.True(t, c.Equals(bs[0][0].Cid()))
})
t.Run("chain ls with chain of size 1 returns genesis block", func(t *testing.T) {
builder := test.NewNodeBuilder(t)
_, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
op := cmdClient.RunSuccess(ctx, "chain", "ls", "--enc", "json")
result := op.ReadStdoutTrimNewlines()
var b []block.Block
err := json.Unmarshal([]byte(result), &b)
require.NoError(t, err)
assert.True(t, b[0].Parents.Empty())
})
t.Run("chain ls with text encoding returns only CIDs", func(t *testing.T) {
builder := test.NewNodeBuilder(t)
buildWithMiner(t, builder)
n, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
var blocks []block.Block
blockJSON := cmdClient.RunSuccess(ctx, "chain", "ls", "--enc", "json").ReadStdoutTrimNewlines()
err := json.Unmarshal([]byte(blockJSON), &blocks)
genesisBlockCid := blocks[0].Cid().String()
require.NoError(t, err)
blk, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx)
require.NoError(t, err)
newBlockCid := blk.Cid()
expectedOutput := fmt.Sprintf("%s\n%s", newBlockCid, genesisBlockCid)
chainLsResult := cmdClient.RunSuccess(ctx, "chain", "ls").ReadStdoutTrimNewlines()
assert.Equal(t, chainLsResult, expectedOutput)
})
t.Run("chain ls --long returns CIDs, Miner, block height and message count", func(t *testing.T) {
builder := test.NewNodeBuilder(t)
buildWithMiner(t, builder)
n, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
blk, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx)
require.NoError(t, err)
newBlockCid := blk.Cid().String()
chainLsResult := cmdClient.RunSuccess(ctx, "chain", "ls", "--long").ReadStdoutTrimNewlines()
assert.Contains(t, chainLsResult, newBlockCid)
assert.Contains(t, chainLsResult, fixtures.TestMiners[0])
assert.Contains(t, chainLsResult, "1")
assert.Contains(t, chainLsResult, "0")
})
t.Run("chain ls --long with JSON encoding returns integer string block height", func(t *testing.T) {
builder := test.NewNodeBuilder(t)
buildWithMiner(t, builder)
n, cmdClient, done := builder.BuildAndStartAPI(ctx)
defer done()
_, err := n.BlockMining.BlockMiningAPI.MiningOnce(ctx)
require.NoError(t, err)
chainLsResult := cmdClient.RunSuccess(ctx, "chain", "ls", "--long", "--enc", "json").ReadStdoutTrimNewlines()
assert.Contains(t, chainLsResult, `"height":"0"`)
assert.Contains(t, chainLsResult, `"height":"1"`)
})
}
| 1 | 23,572 | Lint didn't like this context being before the skip | filecoin-project-venus | go |
@@ -0,0 +1,18 @@
+package tequilapi
+
+import "net/http"
+
+type corsHandler struct {
+ originalHandler http.Handler
+}
+
+func (wrapper corsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ resp.Header().Set("Access-Control-Allow-Origin", "*")
+ resp.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ wrapper.originalHandler.ServeHTTP(resp, req)
+}
+
+//ApplyCors wraps original handler by adding cors headers to response BEFORE original ServeHTTP method is called
+func ApplyCors(original http.Handler) http.Handler {
+ return corsHandler{original}
+} | 1 | 1 | 10,109 | so now basically anybody could drink my tequila? currently electron dev-env runs on `http://localhost:9080` | mysteriumnetwork-node | go |
|
@@ -259,7 +259,7 @@ bool Plugin::loadLib(ILXQtPanelPluginLibrary const * pluginLib)
mPlugin = pluginLib->instance(startupInfo);
if (!mPlugin)
{
- qWarning() << QString("Can't load plugin \"%1\". Plugin can't build ILXQtPanelPlugin.").arg(mPluginLoader->fileName());
+ qWarning() << QString("Can't load plugin \"%1\". Plugin can't build ILXQtPanelPlugin.").arg(mDesktopFile.id());
return false;
}
| 1 | /* BEGIN_COMMON_COPYRIGHT_HEADER
* (c)LGPL2+
*
* LXDE-Qt - a lightweight, Qt based, desktop toolset
* http://razor-qt.org
*
* Copyright: 2012 Razor team
* Authors:
* Alexander Sokoloff <[email protected]>
*
* This program or library is free software; you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General
* Public License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*
* END_COMMON_COPYRIGHT_HEADER */
#include "plugin.h"
#include "ilxqtpanelplugin.h"
#include "pluginsettings_p.h"
#include "lxqtpanel.h"
#include <QDebug>
#include <QProcessEnvironment>
#include <QStringList>
#include <QDir>
#include <QFileInfo>
#include <QPluginLoader>
#include <QGridLayout>
#include <QDialog>
#include <QEvent>
#include <QMenu>
#include <QMouseEvent>
#include <QApplication>
#include <QWindow>
#include <memory>
#include <LXQt/Settings>
#include <LXQt/Translator>
#include <XdgIcon>
// statically linked built-in plugins
#include "../plugin-clock/lxqtclock.h" // clock
extern void * loadPluginTranslation_clock_helper;
#include "../plugin-desktopswitch/desktopswitch.h" // desktopswitch
extern void * loadPluginTranslation_desktopswitch_helper;
#include "../plugin-mainmenu/lxqtmainmenu.h" // mainmenu
extern void * loadPluginTranslation_mainmenu_helper;
#include "../plugin-quicklaunch/lxqtquicklaunchplugin.h" // quicklaunch
extern void * loadPluginTranslation_quicklaunch_helper;
#include "../plugin-showdesktop/showdesktop.h" // showdesktop
extern void * loadPluginTranslation_showdesktop_helper;
#include "../plugin-spacer/spacer.h" // spacer
extern void * loadPluginTranslation_spacer_helper;
#include "../plugin-statusnotifier/statusnotifier.h" // statusnotifier
extern void * loadPluginTranslation_statusnotifier_helper;
#include "../plugin-taskbar/lxqttaskbarplugin.h" // taskbar
extern void * loadPluginTranslation_taskbar_helper;
#include "../plugin-tray/lxqttrayplugin.h" // tray
extern void * loadPluginTranslation_tray_helper;
#include "../plugin-worldclock/lxqtworldclock.h" // worldclock
extern void * loadPluginTranslation_worldclock_helper;
QColor Plugin::mMoveMarkerColor= QColor(255, 0, 0, 255);
/************************************************
************************************************/
Plugin::Plugin(const LXQt::PluginInfo &desktopFile, LXQt::Settings *settings, const QString &settingsGroup, LXQtPanel *panel) :
QFrame(panel),
mDesktopFile(desktopFile),
mPluginLoader(0),
mPlugin(0),
mPluginWidget(0),
mAlignment(AlignLeft),
mPanel(panel)
{
mSettings = PluginSettingsFactory::create(settings, settingsGroup);
setWindowTitle(desktopFile.name());
mName = desktopFile.name();
QStringList dirs;
dirs << QProcessEnvironment::systemEnvironment().value("LXQTPANEL_PLUGIN_PATH").split(":");
dirs << PLUGIN_DIR;
bool found = false;
if(ILXQtPanelPluginLibrary const * pluginLib = findStaticPlugin(desktopFile.id()))
{
// this is a static plugin
found = true;
loadLib(pluginLib);
}
else {
// this plugin is a dynamically loadable module
QString baseName = QString("lib%1.so").arg(desktopFile.id());
foreach(const QString &dirName, dirs)
{
QFileInfo fi(QDir(dirName), baseName);
if (fi.exists())
{
found = true;
if (loadModule(fi.absoluteFilePath()))
break;
}
}
}
if (!isLoaded())
{
if (!found)
qWarning() << QString("Plugin %1 not found in the").arg(desktopFile.id()) << dirs;
return;
}
setObjectName(mPlugin->themeId() + "Plugin");
// plugin handle for easy context menu
setProperty("NeedsHandle", mPlugin->flags().testFlag(ILXQtPanelPlugin::NeedsHandle));
QString s = mSettings->value("alignment").toString();
// Retrun default value
if (s.isEmpty())
{
mAlignment = (mPlugin->flags().testFlag(ILXQtPanelPlugin::PreferRightAlignment)) ?
Plugin::AlignRight :
Plugin::AlignLeft;
}
else
{
mAlignment = (s.toUpper() == "RIGHT") ?
Plugin::AlignRight :
Plugin::AlignLeft;
}
if (mPluginWidget)
{
QGridLayout* layout = new QGridLayout(this);
layout->setSpacing(0);
layout->setContentsMargins(0, 0, 0, 0);
setLayout(layout);
layout->addWidget(mPluginWidget, 0, 0);
}
// delay the connection to settingsChanged to avoid conflicts
// while the plugin is still being initialized
connect(mSettings, &PluginSettings::settingsChanged,
this, &Plugin::settingsChanged);
saveSettings();
}
/************************************************
************************************************/
Plugin::~Plugin()
{
delete mPlugin;
delete mPluginLoader;
delete mSettings;
}
void Plugin::setAlignment(Plugin::Alignment alignment)
{
mAlignment = alignment;
saveSettings();
}
/************************************************
************************************************/
namespace
{
//helper types for static plugins storage & binary search
typedef std::unique_ptr<ILXQtPanelPluginLibrary> plugin_ptr_t;
typedef std::tuple<QString, plugin_ptr_t, void *> plugin_tuple_t;
//NOTE: Please keep the plugins sorted by name while adding new plugins.
//NOTE2: we need to reference some (dummy) symbol from (autogenerated) LXQtPluginTranslationLoader.cpp
// to be not stripped (as unused/unreferenced) in static linking time
static plugin_tuple_t const static_plugins[] = {
#if defined(WITH_CLOCK_PLUGIN)
std::make_tuple(QLatin1String("clock"), plugin_ptr_t{new LXQtClockPluginLibrary}, loadPluginTranslation_clock_helper),// clock
#endif
#if defined(WITH_DESKTOPSWITCH_PLUGIN)
std::make_tuple(QLatin1String("desktopswitch"), plugin_ptr_t{new DesktopSwitchPluginLibrary}, loadPluginTranslation_desktopswitch_helper),// desktopswitch
#endif
#if defined(WITH_MAINMENU_PLUGIN)
std::make_tuple(QLatin1String("mainmenu"), plugin_ptr_t{new LXQtMainMenuPluginLibrary}, loadPluginTranslation_mainmenu_helper),// mainmenu
#endif
#if defined(WITH_QUICKLAUNCH_PLUGIN)
std::make_tuple(QLatin1String("quicklaunch"), plugin_ptr_t{new LXQtQuickLaunchPluginLibrary}, loadPluginTranslation_quicklaunch_helper),// quicklaunch
#endif
#if defined(WITH_SHOWDESKTOP_PLUGIN)
std::make_tuple(QLatin1String("showdesktop"), plugin_ptr_t{new ShowDesktopLibrary}, loadPluginTranslation_showdesktop_helper),// showdesktop
#endif
#if defined(WITH_SPACER_PLUGIN)
std::make_tuple(QLatin1String("spacer"), plugin_ptr_t{new SpacerPluginLibrary}, loadPluginTranslation_spacer_helper),// spacer
#endif
#if defined(WITH_STATUSNOTIFIER_PLUGIN)
std::make_tuple(QLatin1String("statusnotifier"), plugin_ptr_t{new StatusNotifierLibrary}, loadPluginTranslation_statusnotifier_helper),// statusnotifier
#endif
#if defined(WITH_TASKBAR_PLUGIN)
std::make_tuple(QLatin1String("taskbar"), plugin_ptr_t{new LXQtTaskBarPluginLibrary}, loadPluginTranslation_taskbar_helper),// taskbar
#endif
#if defined(WITH_TRAY_PLUGIN)
std::make_tuple(QLatin1String("tray"), plugin_ptr_t{new LXQtTrayPluginLibrary}, loadPluginTranslation_tray_helper),// tray
#endif
#if defined(WITH_WORLDCLOCK_PLUGIN)
std::make_tuple(QLatin1String("worldclock"), plugin_ptr_t{new LXQtWorldClockLibrary}, loadPluginTranslation_worldclock_helper),// worldclock
#endif
};
static constexpr plugin_tuple_t const * const plugins_begin = static_plugins;
static constexpr plugin_tuple_t const * const plugins_end = static_plugins + sizeof (static_plugins) / sizeof (static_plugins[0]);
struct assert_helper
{
assert_helper()
{
Q_ASSERT(std::is_sorted(plugins_begin, plugins_end
, [] (plugin_tuple_t const & p1, plugin_tuple_t const & p2) -> bool { return std::get<0>(p1) < std::get<0>(p2); }));
}
};
static assert_helper h;
}
ILXQtPanelPluginLibrary const * Plugin::findStaticPlugin(const QString &libraryName)
{
// find a static plugin library by name -> binary search
plugin_tuple_t const * plugin = std::lower_bound(plugins_begin, plugins_end, libraryName
, [] (plugin_tuple_t const & plugin, QString const & name) -> bool { return std::get<0>(plugin) < name; });
if (plugins_end != plugin && libraryName == std::get<0>(*plugin))
return std::get<1>(*plugin).get();
return nullptr;
}
// load a plugin from a library
bool Plugin::loadLib(ILXQtPanelPluginLibrary const * pluginLib)
{
ILXQtPanelPluginStartupInfo startupInfo;
startupInfo.settings = mSettings;
startupInfo.desktopFile = &mDesktopFile;
startupInfo.lxqtPanel = mPanel;
mPlugin = pluginLib->instance(startupInfo);
if (!mPlugin)
{
qWarning() << QString("Can't load plugin \"%1\". Plugin can't build ILXQtPanelPlugin.").arg(mPluginLoader->fileName());
return false;
}
mPluginWidget = mPlugin->widget();
if (mPluginWidget)
{
mPluginWidget->setObjectName(mPlugin->themeId());
}
this->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
return true;
}
// load dynamic plugin from a *.so module
bool Plugin::loadModule(const QString &libraryName)
{
mPluginLoader = new QPluginLoader(libraryName);
if (!mPluginLoader->load())
{
qWarning() << mPluginLoader->errorString();
return false;
}
QObject *obj = mPluginLoader->instance();
if (!obj)
{
qWarning() << mPluginLoader->errorString();
return false;
}
ILXQtPanelPluginLibrary* pluginLib= qobject_cast<ILXQtPanelPluginLibrary*>(obj);
if (!pluginLib)
{
qWarning() << QString("Can't load plugin \"%1\". Plugin is not a ILXQtPanelPluginLibrary.").arg(mPluginLoader->fileName());
delete obj;
return false;
}
return loadLib(pluginLib);
}
/************************************************
************************************************/
void Plugin::settingsChanged()
{
mPlugin->settingsChanged();
}
/************************************************
************************************************/
void Plugin::saveSettings()
{
mSettings->setValue("alignment", (mAlignment == AlignLeft) ? "Left" : "Right");
mSettings->setValue("type", mDesktopFile.id());
mSettings->sync();
}
/************************************************
************************************************/
void Plugin::contextMenuEvent(QContextMenuEvent *event)
{
mPanel->showPopupMenu(this);
}
/************************************************
************************************************/
void Plugin::mousePressEvent(QMouseEvent *event)
{
switch (event->button())
{
case Qt::LeftButton:
mPlugin->activated(ILXQtPanelPlugin::Trigger);
break;
case Qt::MidButton:
mPlugin->activated(ILXQtPanelPlugin::MiddleClick);
break;
default:
break;
}
}
/************************************************
************************************************/
void Plugin::mouseDoubleClickEvent(QMouseEvent*)
{
mPlugin->activated(ILXQtPanelPlugin::DoubleClick);
}
/************************************************
************************************************/
void Plugin::showEvent(QShowEvent *)
{
if (mPluginWidget)
mPluginWidget->adjustSize();
}
/************************************************
************************************************/
QMenu *Plugin::popupMenu() const
{
QString name = this->name().replace("&", "&&");
QMenu* menu = new QMenu(windowTitle());
if (mPlugin->flags().testFlag(ILXQtPanelPlugin::HaveConfigDialog))
{
QAction* configAction = new QAction(
XdgIcon::fromTheme(QLatin1String("preferences-other")),
tr("Configure \"%1\"").arg(name), menu);
menu->addAction(configAction);
connect(configAction, SIGNAL(triggered()), this, SLOT(showConfigureDialog()));
}
QAction* moveAction = new QAction(XdgIcon::fromTheme("transform-move"), tr("Move \"%1\"").arg(name), menu);
menu->addAction(moveAction);
connect(moveAction, SIGNAL(triggered()), this, SIGNAL(startMove()));
menu->addSeparator();
QAction* removeAction = new QAction(
XdgIcon::fromTheme(QLatin1String("list-remove")),
tr("Remove \"%1\"").arg(name), menu);
menu->addAction(removeAction);
connect(removeAction, SIGNAL(triggered()), this, SLOT(requestRemove()));
return menu;
}
/************************************************
************************************************/
bool Plugin::isSeparate() const
{
return mPlugin->isSeparate();
}
/************************************************
************************************************/
bool Plugin::isExpandable() const
{
return mPlugin->isExpandable();
}
/************************************************
************************************************/
void Plugin::realign()
{
if (mPlugin)
mPlugin->realign();
}
/************************************************
************************************************/
void Plugin::showConfigureDialog()
{
if (!mConfigDialog)
mConfigDialog = mPlugin->configureDialog();
if (!mConfigDialog)
return;
connect(this, &Plugin::destroyed, mConfigDialog.data(), &QWidget::close);
mPanel->willShowWindow(mConfigDialog);
mConfigDialog->show();
mConfigDialog->raise();
mConfigDialog->activateWindow();
WId wid = mConfigDialog->windowHandle()->winId();
KWindowSystem::activateWindow(wid);
KWindowSystem::setOnDesktop(wid, KWindowSystem::currentDesktop());
}
/************************************************
************************************************/
void Plugin::requestRemove()
{
emit remove();
deleteLater();
}
| 1 | 5,932 | `mPluginLoader` is nullptr in static plugins, which causes segmentation faults | lxqt-lxqt-panel | cpp |
@@ -1,12 +1,8 @@
package org.phoenicis.javafx.views.mainwindow.ui;
-import javafx.event.ActionEvent;
-import javafx.event.EventHandler;
-import org.phoenicis.javafx.views.common.ThemeManager;
-
import javafx.beans.property.StringProperty;
-import javafx.scene.control.TextField;
import javafx.scene.control.Button;
+import javafx.scene.control.TextField;
import javafx.scene.layout.AnchorPane;
import java.util.function.Consumer; | 1 | package org.phoenicis.javafx.views.mainwindow.ui;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import org.phoenicis.javafx.views.common.ThemeManager;
import javafx.beans.property.StringProperty;
import javafx.scene.control.TextField;
import javafx.scene.control.Button;
import javafx.scene.layout.AnchorPane;
import java.util.function.Consumer;
public class SearchBox extends AnchorPane {
private TextField searchField;
private Button clearButton;
public SearchBox(Consumer<String> onSearch, Runnable onClear) {
super();
this.getStyleClass().add("searchBox");
this.searchField = new TextField();
this.searchField.getStyleClass().add("searchBar");
this.searchField.prefHeightProperty().bind(this.prefHeightProperty());
this.searchField.prefWidthProperty().bind(this.prefWidthProperty());
this.searchField.textProperty().addListener(event -> onSearch.accept(getText()));
AnchorPane.setLeftAnchor(searchField, 0.0);
AnchorPane.setRightAnchor(searchField, 0.0);
this.clearButton = new Button();
this.clearButton.getStyleClass().add("searchCleanButton");
this.clearButton.setOnMouseClicked(event -> {
this.searchField.clear();
onClear.run();
});
AnchorPane.setRightAnchor(clearButton, 0.0);
this.getChildren().addAll(searchField, clearButton);
}
public StringProperty textProperty() {
return this.searchField.textProperty();
}
public String getText() {
return this.searchField.getText();
}
}
| 1 | 11,139 | Better use `Optional<Consumer<String>> onSearch` here. | PhoenicisOrg-phoenicis | java |
@@ -16,7 +16,6 @@ BOOST_AUTO_TEST_CASE(rfc4648_test_vectors)
{
using namespace osrm::engine;
- BOOST_CHECK_EQUAL(encodeBase64(""), "");
BOOST_CHECK_EQUAL(encodeBase64("f"), "Zg==");
BOOST_CHECK_EQUAL(encodeBase64("fo"), "Zm8=");
BOOST_CHECK_EQUAL(encodeBase64("foo"), "Zm9v"); | 1 | #include "engine/base64.hpp"
#include "engine/hint.hpp"
#include "mocks/mock_datafacade.hpp"
#include <boost/test/unit_test.hpp>
#include <boost/test/test_case_template.hpp>
#include <iostream>
#include <algorithm>
// RFC 4648 "The Base16, Base32, and Base64 Data Encodings"
BOOST_AUTO_TEST_SUITE(base64)
// For test vectors see section 10: https://tools.ietf.org/html/rfc4648#section-10
BOOST_AUTO_TEST_CASE(rfc4648_test_vectors)
{
using namespace osrm::engine;
BOOST_CHECK_EQUAL(encodeBase64(""), "");
BOOST_CHECK_EQUAL(encodeBase64("f"), "Zg==");
BOOST_CHECK_EQUAL(encodeBase64("fo"), "Zm8=");
BOOST_CHECK_EQUAL(encodeBase64("foo"), "Zm9v");
BOOST_CHECK_EQUAL(encodeBase64("foob"), "Zm9vYg==");
BOOST_CHECK_EQUAL(encodeBase64("fooba"), "Zm9vYmE=");
BOOST_CHECK_EQUAL(encodeBase64("foobar"), "Zm9vYmFy");
}
BOOST_AUTO_TEST_CASE(rfc4648_test_vectors_roundtrip)
{
using namespace osrm::engine;
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("")), "");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("f")), "f");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("fo")), "fo");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("foo")), "foo");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("foob")), "foob");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("fooba")), "fooba");
BOOST_CHECK_EQUAL(decodeBase64(encodeBase64("foobar")), "foobar");
}
BOOST_AUTO_TEST_CASE(hint_encoding_decoding_roundtrip)
{
using namespace osrm::engine;
using namespace osrm::util;
const Coordinate coordinate;
const PhantomNode phantom;
const osrm::test::MockDataFacade facade{};
const Hint hint{phantom, facade.GetCheckSum()};
const auto base64 = hint.ToBase64();
BOOST_CHECK(0 == std::count(begin(base64), end(base64), '+'));
BOOST_CHECK(0 == std::count(begin(base64), end(base64), '/'));
const auto decoded = Hint::FromBase64(base64);
BOOST_CHECK_EQUAL(hint, decoded);
}
BOOST_AUTO_TEST_CASE(hint_encoding_decoding_roundtrip_bytewise)
{
using namespace osrm::engine;
using namespace osrm::util;
const Coordinate coordinate;
const PhantomNode phantom;
const osrm::test::MockDataFacade facade{};
const Hint hint{phantom, facade.GetCheckSum()};
const auto decoded = Hint::FromBase64(hint.ToBase64());
BOOST_CHECK(std::equal(reinterpret_cast<const unsigned char *>(&hint),
reinterpret_cast<const unsigned char *>(&hint) + sizeof(Hint),
reinterpret_cast<const unsigned char *>(&decoded)));
}
BOOST_AUTO_TEST_SUITE_END()
| 1 | 15,918 | this is asserted as invalid input | Project-OSRM-osrm-backend | cpp |
@@ -10,12 +10,15 @@ import (
"time"
"github.com/thought-machine/please/src/core"
+ "github.com/thought-machine/please/src/parse"
)
// Print produces a Python call which would (hopefully) regenerate the same build rule if run.
// This is of course not ideal since they were almost certainly created as a java_library
// or some similar wrapper rule, but we've lost that information by now.
-func Print(graph *core.BuildGraph, targets []core.BuildLabel, fields, labels []string) {
+func Print(state *core.BuildState, targets []core.BuildLabel, fields, labels []string) {
+ graph := state.Graph
+ order := parse.NewAspParser(state).BuildRuleArgOrder()
for _, target := range targets {
t := graph.TargetOrDie(target)
if len(labels) > 0 { | 1 | package query
import (
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
"time"
"github.com/thought-machine/please/src/core"
)
// Print produces a Python call which would (hopefully) regenerate the same build rule if run.
// This is of course not ideal since they were almost certainly created as a java_library
// or some similar wrapper rule, but we've lost that information by now.
func Print(graph *core.BuildGraph, targets []core.BuildLabel, fields, labels []string) {
for _, target := range targets {
t := graph.TargetOrDie(target)
if len(labels) > 0 {
for _, prefix := range labels {
for _, label := range t.Labels {
if strings.HasPrefix(label, prefix) {
fmt.Printf("%s\n", strings.TrimPrefix(label, prefix))
}
}
}
continue
}
if len(fields) == 0 {
fmt.Fprintf(os.Stdout, "# %s:\n", target)
}
if len(fields) > 0 {
newPrinter(os.Stdout, t, 0).PrintFields(fields)
} else {
newPrinter(os.Stdout, t, 0).PrintTarget()
}
}
}
// specialFields is a mapping of field name -> any special casing relating to how to print it.
var specialFields = map[string]func(*printer) (string, bool){
"name": func(p *printer) (string, bool) {
return "'" + p.target.Label.Name + "'", true
},
"building_description": func(p *printer) (string, bool) {
s, ok := p.genericPrint(reflect.ValueOf(p.target.BuildingDescription))
return s, ok && p.target.BuildingDescription != core.DefaultBuildingDescription
},
"deps": func(p *printer) (string, bool) {
return p.genericPrint(reflect.ValueOf(p.target.DeclaredDependenciesStrict()))
},
"exported_deps": func(p *printer) (string, bool) {
return p.genericPrint(reflect.ValueOf(p.target.ExportedDependencies()))
},
"visibility": func(p *printer) (string, bool) {
if len(p.target.Visibility) == 1 && p.target.Visibility[0] == core.WholeGraph[0] {
return "['PUBLIC']", true
}
return p.genericPrint(reflect.ValueOf(p.target.Visibility))
},
"tools": func(p *printer) (string, bool) {
if tools := p.target.AllNamedTools(); len(tools) > 0 {
return p.genericPrint(reflect.ValueOf(tools))
}
return p.genericPrint(reflect.ValueOf(p.target.AllTools()))
},
"test_tools": func(p *printer) (string, bool) {
if tools := p.target.NamedTestTools(); len(tools) > 0 {
return p.genericPrint(reflect.ValueOf(tools))
}
return p.genericPrint(reflect.ValueOf(p.target.AllTestTools()))
},
"data": func(p *printer) (string, bool) {
if data := p.target.NamedData(); len(data) > 0 {
return p.genericPrint(reflect.ValueOf(data))
}
return p.genericPrint(reflect.ValueOf(p.target.Data))
},
}
// fieldPrecedence defines a specific ordering for fields.
var fieldPrecedence = map[string]int{
"name": -100,
"srcs": -90,
"visibility": 90,
"deps": 100,
}
// A printer is responsible for creating the output of 'plz query print'.
type printer struct {
w io.Writer
target *core.BuildTarget
indent int
doneFields map[string]bool
error bool // true if something went wrong
surroundSyntax bool // true if we are quoting strings or surrounding slices with [] etc.
}
// newPrinter creates a new printer instance.
func newPrinter(w io.Writer, target *core.BuildTarget, indent int) *printer {
return &printer{
w: w,
target: target,
indent: indent,
doneFields: make(map[string]bool, 50), // Leave enough space for all of BuildTarget's fields.
}
}
// printf is an internal function which prints to the internal writer with an indent.
func (p *printer) printf(msg string, args ...interface{}) {
fmt.Fprint(p.w, strings.Repeat(" ", p.indent))
fmt.Fprintf(p.w, msg, args...)
}
// PrintTarget prints an entire build target.
func (p *printer) PrintTarget() {
if p.target.IsFilegroup {
p.printf("filegroup(\n")
} else if p.target.IsRemoteFile {
p.printf("remote_file(\n")
} else {
p.printf("build_rule(\n")
}
p.surroundSyntax = true
p.indent += 4
v := reflect.ValueOf(p.target).Elem()
t := v.Type()
f := make(orderedFields, t.NumField())
for i := 0; i < t.NumField(); i++ {
f[i].structIndex = i
f[i].printIndex = i
if index, present := fieldPrecedence[p.fieldName(t.Field(i))]; present {
f[i].printIndex = index
}
}
sort.Sort(f)
for _, orderedField := range f {
p.printField(t.Field(orderedField.structIndex), v.Field(orderedField.structIndex))
}
p.indent -= 4
p.printf(")\n\n")
}
// PrintFields prints a subset of fields of a build target.
func (p *printer) PrintFields(fields []string) bool {
v := reflect.ValueOf(p.target).Elem()
for _, field := range fields {
f := p.findField(field)
if contents, shouldPrint := p.shouldPrintField(f, v.FieldByIndex(f.Index)); shouldPrint {
if !strings.HasSuffix(contents, "\n") {
contents += "\n"
}
p.printf("%s", contents)
}
}
return p.error
}
// findField returns the field which would print with the given name.
// This isn't as simple as using reflect.Value.FieldByName since the print names
// are different to the actual struct names.
func (p *printer) findField(field string) reflect.StructField {
t := reflect.ValueOf(p.target).Elem().Type()
for i := 0; i < t.NumField(); i++ {
if f := t.Field(i); p.fieldName(f) == field {
return f
}
}
log.Fatalf("Unknown field %s", field)
return reflect.StructField{}
}
// fieldName returns the name we'll use to print a field.
func (p *printer) fieldName(f reflect.StructField) string {
if name := f.Tag.Get("name"); name != "" {
return name
}
// We don't bother specifying on some fields when it's equivalent other than case.
return strings.ToLower(f.Name)
}
// printField prints a single field of a build target.
func (p *printer) printField(f reflect.StructField, v reflect.Value) {
if contents, shouldPrint := p.shouldPrintField(f, v); shouldPrint {
name := p.fieldName(f)
p.printf("%s = %s,\n", name, contents)
p.doneFields[name] = true
}
}
// shouldPrintField returns whether we should print a field and what we'd print if we did.
func (p *printer) shouldPrintField(f reflect.StructField, v reflect.Value) (string, bool) {
if f.Tag.Get("print") == "false" { // Indicates not to print the field.
return "", false
} else if p.target.IsFilegroup && f.Tag.Get("hide") == "filegroup" {
return "", false
}
name := p.fieldName(f)
if p.doneFields[name] {
return "", false
}
if customFunc, present := specialFields[name]; present {
return customFunc(p)
}
return p.genericPrint(v)
}
// genericPrint is the generic print function for a field.
func (p *printer) genericPrint(v reflect.Value) (string, bool) {
switch v.Kind() {
case reflect.Slice:
return p.printSlice(v), v.Len() > 0
case reflect.Map:
return p.printMap(v), v.Len() > 0
case reflect.String:
return p.quote(v.String()), v.Len() > 0
case reflect.Bool:
return "True", v.Bool()
case reflect.Int, reflect.Int32:
return fmt.Sprintf("%d", v.Int()), v.Int() > 0
case reflect.Struct, reflect.Interface:
if stringer, ok := v.Interface().(fmt.Stringer); ok {
return p.quote(stringer.String()), true
}
return "", false
case reflect.Int64:
if v.Type().Name() == "Duration" {
secs := v.Interface().(time.Duration).Seconds()
return fmt.Sprintf("%0.0f", secs), secs > 0.0
}
case reflect.Ptr:
if v.IsNil() {
return "", false
}
return p.genericPrint(v.Elem())
}
log.Error("Unknown field type %s: %s", v.Kind(), v.Type().Name())
p.error = true
return "", false
}
// printSlice prints the representation of a slice field.
func (p *printer) printSlice(v reflect.Value) string {
if v.Len() == 1 {
// Single-element slices are printed on one line
elem, _ := p.genericPrint(v.Index(0))
return p.surround("[", elem, "]", "")
}
s := make([]string, v.Len())
indent := strings.Repeat(" ", p.indent+4)
for i := 0; i < v.Len(); i++ {
elem, _ := p.genericPrint(v.Index(i))
s[i] = p.surround(indent, elem, ",", "\n")
}
return p.surround("[\n", strings.Join(s, ""), strings.Repeat(" ", p.indent)+"]", "")
}
// printMap prints the representation of a map field.
func (p *printer) printMap(v reflect.Value) string {
keys := v.MapKeys()
sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() })
s := make([]string, len(keys))
indent := strings.Repeat(" ", p.indent+4)
for i, key := range keys {
keyElem, _ := p.genericPrint(key)
valElem, _ := p.genericPrint(v.MapIndex(key))
s[i] = p.surround(indent, keyElem+": "+valElem, ",", "\n")
}
return p.surround("{\n", strings.Join(s, ""), strings.Repeat(" ", p.indent)+"}", "")
}
// quote quotes the given string appropriately for the current printing method.
func (p *printer) quote(s string) string {
if p.surroundSyntax {
return "'" + s + "'"
}
return s
}
// surround surrounds the given string with a prefix and suffix, if appropriate for the current printing method.
func (p *printer) surround(prefix, s, suffix, always string) string {
if p.surroundSyntax {
return prefix + s + suffix + always
}
return s + always
}
// An orderedField is used to sort the fields into the order we print them in.
// This isn't necessarily the same as the order on the struct.
type orderedField struct {
structIndex, printIndex int
}
type orderedFields []orderedField
func (f orderedFields) Len() int { return len(f) }
func (f orderedFields) Swap(a, b int) { f[a], f[b] = f[b], f[a] }
func (f orderedFields) Less(a, b int) bool { return f[a].printIndex < f[b].printIndex }
| 1 | 10,068 | Why do you need to create a new parser? Can't you just use `state.Parser` to answer this question? | thought-machine-please | go |
@@ -155,5 +155,10 @@ namespace Nethermind.Core
HashNumberDiffAndTx,
Short
}
+
+ public virtual Block CreateBlockForProcessing(BlockHeader header) =>
+ new(header, Transactions, Ommers);
+
+ public virtual IEnumerable<Transaction> GetTransactions() => Transactions;
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using Nethermind.Core.Crypto;
using Nethermind.Int256;
namespace Nethermind.Core
{
[DebuggerDisplay("{Hash} ({Number})")]
public class Block
{
public Block(BlockHeader blockHeader, BlockBody body)
{
Header = blockHeader;
Body = body;
}
public Block(BlockHeader blockHeader, IEnumerable<Transaction> transactions, IEnumerable<BlockHeader> ommers)
{
Header = blockHeader;
Body = new BlockBody(transactions.ToArray(), ommers.ToArray());
}
public Block(BlockHeader blockHeader)
: this(blockHeader, BlockBody.Empty)
{
}
public Block WithReplacedHeader(BlockHeader newHeader)
{
return new(newHeader, Body);
}
public Block WithReplacedBody(BlockBody newBody)
{
return new(Header, newBody);
}
public BlockHeader Header { get; }
public BlockBody Body { get; }
public bool IsGenesis => Header.IsGenesis;
public Transaction[] Transactions { get => Body.Transactions; protected set => Body.Transactions = value; } // setter needed to produce blocks with unknown transaction count on start
public BlockHeader[] Ommers => Body.Ommers; // do not add setter here
public Keccak? Hash => Header.Hash; // do not add setter here
public Keccak? ParentHash => Header.ParentHash; // do not add setter here
public ulong Nonce => Header.Nonce; // do not add setter here
public Keccak? MixHash => Header.MixHash; // do not add setter here
public byte[]? ExtraData => Header.ExtraData; // do not add setter here
public Bloom? Bloom => Header.Bloom; // do not add setter here
public Keccak? OmmersHash => Header.OmmersHash; // do not add setter here
public Address? Beneficiary => Header.Beneficiary; // do not add setter here
public Address? Author => Header.Author; // do not add setter here
public Keccak? StateRoot => Header.StateRoot; // do not add setter here
public Keccak? TxRoot => Header.TxRoot; // do not add setter here
public Keccak? ReceiptsRoot => Header.ReceiptsRoot; // do not add setter here
public long GasLimit => Header.GasLimit; // do not add setter here
public long GasUsed => Header.GasUsed; // do not add setter here
public UInt256 Timestamp => Header.Timestamp; // do not add setter here
public DateTime TimestampDate => Header.TimestampDate; // do not add setter here
public long Number => Header.Number; // do not add setter here
public UInt256 Difficulty => Header.Difficulty; // do not add setter here
public UInt256? TotalDifficulty => Header.TotalDifficulty; // do not add setter here
public UInt256 BaseFeePerGas => Header.BaseFeePerGas; // do not add setter here
public override string ToString()
{
return ToString(Format.Short);
}
public string ToString(Format format)
{
return format switch
{
Format.Full => ToFullString(),
Format.FullHashAndNumber => Hash == null ? $"{Number} null" : $"{Number} ({Hash})",
Format.HashNumberAndTx => Hash == null
? $"{Number} null, tx count: {Body.Transactions.Length}"
: $"{Number} {TimestampDate:HH:mm:ss} ({Hash?.ToShortString()}), tx count: {Body.Transactions.Length}",
Format.HashNumberDiffAndTx => Hash == null
? $"{Number} null, diff: {Difficulty}, tx count: {Body.Transactions.Length}"
: $"{Number} ({Hash?.ToShortString()}), diff: {Difficulty}, tx count: {Body.Transactions.Length}",
_ => Hash == null ? $"{Number} null" : $"{Number} ({Hash?.ToShortString()})"
};
}
private string ToFullString()
{
StringBuilder builder = new();
builder.AppendLine($"Block {Number}");
builder.AppendLine(" Header:");
builder.Append($"{Header.ToString(" ")}");
builder.AppendLine(" Ommers:");
foreach (BlockHeader ommer in Body.Ommers ?? Array.Empty<BlockHeader>())
{
builder.Append($"{ommer.ToString(" ")}");
}
builder.AppendLine(" Transactions:");
foreach (Transaction tx in Body?.Transactions ?? Array.Empty<Transaction>())
{
builder.Append($"{tx.ToString(" ")}");
}
return builder.ToString();
}
public enum Format
{
Full,
FullHashAndNumber,
HashNumberAndTx,
HashNumberDiffAndTx,
Short
}
}
}
| 1 | 25,729 | remove processing references from Nethermind.Core where possible, keep clean | NethermindEth-nethermind | .cs |
@@ -0,0 +1,7 @@
+package com.fsck.k9.mail.internet;
+
+public class UnsupportedContentTransferEncodingException extends Exception {
+ public UnsupportedContentTransferEncodingException(String encoding) {
+ super("Unsupported encoding: "+encoding);
+ }
+} | 1 | 1 | 14,355 | Why not make this a subclass of `MessagingException` so we don't need to change all `throws` clause? | k9mail-k-9 | java |
|
@@ -22,6 +22,8 @@ import qrcode
import traceback
from hashlib import sha256
from decimal import Decimal
+import binascii
+from .drbg.hmac_drbg import DRBG
from PyQt5.QtPrintSupport import QPrinter
| 1 | '''
Revealer
So you have something to hide?
plug-in for the electrum wallet.
Features:
- Deep Cold multi-factor backup solution
- Safety - One time pad security
- Redundancy - Trustless printing & distribution
- Encrypt your seedphrase or any secret you want for your revealer
- Based on crypto by legendary cryptographers Naor and Shamir
Tiago Romagnani Silveira, 2017
'''
import os
import random
import qrcode
import traceback
from hashlib import sha256
from decimal import Decimal
from PyQt5.QtPrintSupport import QPrinter
from electrum.plugin import BasePlugin, hook
from electrum.i18n import _
from electrum.util import to_bytes, make_dir
from electrum.gui.qt.util import *
from electrum.gui.qt.qrtextedit import ScanQRTextEdit
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.base_dir = config.electrum_path()+'/revealer/'
if self.config.get('calibration_h') is None:
self.config.set_key('calibration_h', 0)
if self.config.get('calibration_v') is None:
self.config.set_key('calibration_v', 0)
self.calibration_h = self.config.get('calibration_h')
self.calibration_v = self.config.get('calibration_v')
self.version = '0'
self.size = (159, 97)
self.f_size = QSize(1014*2, 642*2)
self.abstand_h = 21
self.abstand_v = 34
self.calibration_noise = int('10' * 128)
self.rawnoise = False
make_dir(self.base_dir)
@hook
def set_seed(self, seed, has_extension, parent):
self.cseed = seed.upper()
self.has_extension = has_extension
parent.addButton(':icons/revealer.png', partial(self.setup_dialog, parent), "Revealer"+_(" secret backup utility"))
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Printer Calibration'), partial(self.calibration_dialog, window))
def setup_dialog(self, window):
self.update_wallet_name(window.parent().parent().wallet)
self.user_input = False
self.noise_seed = False
self.d = WindowModalDialog(window, "Revealer")
self.d.setMinimumWidth(420)
vbox = QVBoxLayout(self.d)
vbox.addSpacing(21)
logo = QLabel()
vbox.addWidget(logo)
logo.setPixmap(QPixmap(':icons/revealer.png'))
logo.setAlignment(Qt.AlignCenter)
vbox.addSpacing(42)
self.load_noise = ScanQRTextEdit()
self.load_noise.setTabChangesFocus(True)
self.load_noise.textChanged.connect(self.on_edit)
self.load_noise.setMaximumHeight(33)
vbox.addWidget(WWLabel("<b>"+_("Enter your physical revealer code:")+"<b>"))
vbox.addWidget(self.load_noise)
vbox.addSpacing(11)
self.next_button = QPushButton(_("Next"), self.d)
self.next_button.setDefault(True)
self.next_button.setEnabled(False)
vbox.addLayout(Buttons(self.next_button))
self.next_button.clicked.connect(self.d.close)
self.next_button.clicked.connect(partial(self.cypherseed_dialog, window))
vbox.addSpacing(21)
vbox.addWidget(WWLabel(_("or, alternatively: ")))
bcreate = QPushButton(_("Create a digital Revealer"))
def mk_digital():
try:
self.make_digital(self.d)
except Exception:
traceback.print_exc(file=sys.stdout)
else:
self.cypherseed_dialog(window)
bcreate.clicked.connect(mk_digital)
vbox.addWidget(bcreate)
vbox.addSpacing(11)
vbox.addWidget(QLabel(''.join([ "<b>"+_("WARNING")+ "</b>:" + _("Printing a revealer and encrypted seed"), '<br/>',
_("on the same printer is not trustless towards the printer."), '<br/>',
])))
vbox.addSpacing(11)
vbox.addLayout(Buttons(CloseButton(self.d)))
return bool(self.d.exec_())
def get_noise(self):
text = self.load_noise.text()
return ''.join(text.split()).lower()
def on_edit(self):
s = self.get_noise()
b = self.is_noise(s)
if b:
self.noise_seed = s[:-3]
self.user_input = True
self.next_button.setEnabled(b)
def code_hashid(self, txt):
x = to_bytes(txt, 'utf8')
hash = sha256(x).hexdigest()
return hash[-3:].upper()
def is_noise(self, txt):
if (len(txt) >= 34):
try:
int(txt, 16)
except:
self.user_input = False
return False
else:
id = self.code_hashid(txt[:-3])
if (txt[-3:].upper() == id.upper()):
self.code_id = id
self.user_input = True
return True
else:
return False
else:
self.user_input = False
return False
def make_digital(self, dialog):
self.make_rawnoise(True)
self.bdone(dialog)
self.d.close()
def bcrypt(self, dialog):
self.rawnoise = False
dialog.show_message(''.join([_("{} encrypted for Revealer {}_{} saved as PNG and PDF at:").format(self.was, self.version, self.code_id),
"<br/>","<b>", self.base_dir+ self.filename+self.version+"_"+self.code_id,"</b>"]))
dialog.close()
def ext_warning(self, dialog):
dialog.show_message(''.join(["<b>",_("Warning: "), "</b>", _("your seed extension will not be included in the encrypted backup.")]))
dialog.close()
def bdone(self, dialog):
dialog.show_message(''.join([_("Digital Revealer ({}_{}) saved as PNG and PDF at:").format(self.version, self.code_id),
"<br/>","<b>", self.base_dir + 'revealer_' +self.version + '_'+ self.code_id, '</b>']))
def customtxt_limits(self):
txt = self.text.text()
self.max_chars.setVisible(False)
self.char_count.setText("("+str(len(txt))+"/216)")
if len(txt)>0:
self.ctext.setEnabled(True)
if len(txt) > 216:
self.text.setPlainText(self.text.toPlainText()[:216])
self.max_chars.setVisible(True)
def t(self):
self.txt = self.text.text()
self.seed_img(is_seed=False)
def cypherseed_dialog(self, window):
d = WindowModalDialog(window, "Revealer")
d.setMinimumWidth(420)
self.c_dialog = d
self.vbox = QVBoxLayout(d)
self.vbox.addSpacing(21)
logo = QLabel()
self.vbox.addWidget(logo)
logo.setPixmap(QPixmap(':icons/revealer.png'))
logo.setAlignment(Qt.AlignCenter)
self.vbox.addSpacing(42)
grid = QGridLayout()
self.vbox.addLayout(grid)
cprint = QPushButton(_("Generate encrypted seed PDF"))
cprint.clicked.connect(partial(self.seed_img, True))
self.vbox.addWidget(cprint)
self.vbox.addSpacing(14)
self.vbox.addWidget(WWLabel(_("and/or type any secret below:")))
self.text = ScanQRTextEdit()
self.text.setTabChangesFocus(True)
self.text.setMaximumHeight(70)
self.text.textChanged.connect(self.customtxt_limits)
self.vbox.addWidget(self.text)
self.char_count = WWLabel("")
self.char_count.setAlignment(Qt.AlignRight)
self.vbox.addWidget(self.char_count)
self.max_chars = WWLabel("<font color='red'>" + _("This version supports a maximum of 216 characters.")+"</font>")
self.vbox.addWidget(self.max_chars)
self.max_chars.setVisible(False)
self.ctext = QPushButton(_("Generate custom secret encrypted PDF"))
self.ctext.clicked.connect(self.t)
self.vbox.addWidget(self.ctext)
self.ctext.setEnabled(False)
self.vbox.addSpacing(21)
self.vbox.addLayout(Buttons(CloseButton(d)))
return bool(d.exec_())
def update_wallet_name (self, name):
self.wallet_name = str(name)
self.base_name = self.base_dir + self.wallet_name
def seed_img(self, is_seed = True):
if not self.cseed and self.txt == False:
return
if is_seed:
txt = self.cseed
else:
txt = self.txt.upper()
img = QImage(self.size[0],self.size[1], QImage.Format_Mono)
bitmap = QBitmap.fromImage(img, Qt.MonoOnly)
bitmap.fill(Qt.white)
painter = QPainter()
painter.begin(bitmap)
QFontDatabase.addApplicationFont(os.path.join(os.path.dirname(__file__), 'SourceSansPro-Bold.otf') )
if len(txt) < 102 :
fontsize = 15
linespace = 15
max_letters = 17
max_lines = 6
max_words = 3
else:
fontsize = 12
linespace = 10
max_letters = 23
max_lines = 9
max_words = int(max_letters/4)
font = QFont('Source Sans Pro', fontsize, QFont.Bold)
font.setLetterSpacing(QFont.PercentageSpacing, 100)
font.setPixelSize(fontsize)
painter.setFont(font)
seed_array = txt.split(' ')
for n in range(max_lines):
nwords = max_words
temp_seed = seed_array[:nwords]
while len(' '.join(map(str, temp_seed))) > max_letters:
nwords = nwords - 1
temp_seed = seed_array[:nwords]
painter.drawText(QRect(0, linespace*n , self.size[0], self.size[1]), Qt.AlignHCenter, ' '.join(map(str, temp_seed)))
del seed_array[:nwords]
painter.end()
img = bitmap.toImage()
if (self.rawnoise == False):
self.make_rawnoise()
self.make_cypherseed(img, self.rawnoise, False, is_seed)
return img
def make_rawnoise(self, create_revealer=False):
w = self.size[0]
h = self.size[1]
rawnoise = QImage(w, h, QImage.Format_Mono)
if(self.noise_seed == False):
self.noise_seed = random.SystemRandom().getrandbits(128)
self.hex_noise = format(self.noise_seed, '02x')
self.hex_noise = self.version + str(self.hex_noise)
if (self.user_input == True):
self.noise_seed = int(self.noise_seed, 16)
self.hex_noise = self.version + str(format(self.noise_seed, '02x'))
self.code_id = self.code_hashid(self.hex_noise)
self.hex_noise = ' '.join(self.hex_noise[i:i+4] for i in range(0,len(self.hex_noise),4))
random.seed(self.noise_seed)
for x in range(w):
for y in range(h):
rawnoise.setPixel(x,y,random.randint(0, 1))
self.rawnoise = rawnoise
if create_revealer==True:
self.make_revealer()
self.noise_seed = False
def make_calnoise(self):
random.seed(self.calibration_noise)
w = self.size[0]
h = self.size[1]
rawnoise = QImage(w, h, QImage.Format_Mono)
for x in range(w):
for y in range(h):
rawnoise.setPixel(x,y,random.randint(0, 1))
self.calnoise = self.pixelcode_2x2(rawnoise)
def make_revealer(self):
revealer = self.pixelcode_2x2(self.rawnoise)
revealer.invertPixels()
revealer = QBitmap.fromImage(revealer)
revealer = revealer.scaled(self.f_size, Qt.KeepAspectRatio)
revealer = self.overlay_marks(revealer)
self.filename = 'Revealer - '
revealer.save(self.base_dir + self.filename + self.version+'_'+self.code_id + '.png')
self.toPdf(QImage(revealer))
QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.abspath(self.base_dir + self.filename + self.version+'_'+ self.code_id + '.pdf')))
def make_cypherseed(self, img, rawnoise, calibration=False, is_seed = True):
img = img.convertToFormat(QImage.Format_Mono)
p = QPainter()
p.begin(img)
p.setCompositionMode(26) #xor
p.drawImage(0, 0, rawnoise)
p.end()
cypherseed = self.pixelcode_2x2(img)
cypherseed = QBitmap.fromImage(cypherseed)
cypherseed = cypherseed.scaled(self.f_size, Qt.KeepAspectRatio)
cypherseed = self.overlay_marks(cypherseed, True, calibration)
if not is_seed:
self.filename = _('custom_secret')+'_'
self.was = _('Custom secret')
else:
self.filename = self.wallet_name+'_'+ _('seed')+'_'
self.was = self.wallet_name +' ' + _('seed')
if self.has_extension:
self.ext_warning(self.c_dialog)
if not calibration:
self.toPdf(QImage(cypherseed))
QDesktopServices.openUrl (QUrl.fromLocalFile(os.path.abspath(self.base_dir+self.filename+self.version+'_'+self.code_id+'.pdf')))
cypherseed.save(self.base_dir + self.filename +self.version + '_'+ self.code_id + '.png')
self.bcrypt(self.c_dialog)
return cypherseed
def calibration(self):
img = QImage(self.size[0],self.size[1], QImage.Format_Mono)
bitmap = QBitmap.fromImage(img, Qt.MonoOnly)
bitmap.fill(Qt.black)
self.make_calnoise()
img = self.overlay_marks(self.calnoise.scaledToHeight(self.f_size.height()), False, True)
self.calibration_pdf(img)
QDesktopServices.openUrl (QUrl.fromLocalFile(os.path.abspath(self.base_dir+_('calibration')+'.pdf')))
return img
def toPdf(self, image):
printer = QPrinter()
printer.setPaperSize(QSizeF(210, 297), QPrinter.Millimeter)
printer.setResolution(600)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(self.base_dir+self.filename+self.version + '_'+self.code_id+'.pdf')
printer.setPageMargins(0,0,0,0,6)
painter = QPainter()
painter.begin(printer)
delta_h = round(image.width()/self.abstand_v)
delta_v = round(image.height()/self.abstand_h)
size_h = 2028+((int(self.calibration_h)*2028/(2028-(delta_h*2)+int(self.calibration_h)))/2)
size_v = 1284+((int(self.calibration_v)*1284/(1284-(delta_v*2)+int(self.calibration_v)))/2)
image = image.scaled(size_h, size_v)
painter.drawImage(553,533, image)
wpath = QPainterPath()
wpath.addRoundedRect(QRectF(553,533, size_h, size_v), 19, 19)
painter.setPen(QPen(Qt.black, 1))
painter.drawPath(wpath)
painter.end()
def calibration_pdf(self, image):
printer = QPrinter()
printer.setPaperSize(QSizeF(210, 297), QPrinter.Millimeter)
printer.setResolution(600)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(self.base_dir+_('calibration')+'.pdf')
printer.setPageMargins(0,0,0,0,6)
painter = QPainter()
painter.begin(printer)
painter.drawImage(553,533, image)
font = QFont('Source Sans Pro', 10, QFont.Bold)
painter.setFont(font)
painter.drawText(254,277, _("Calibration sheet"))
font = QFont('Source Sans Pro', 7, QFont.Bold)
painter.setFont(font)
painter.drawText(600,2077, _("Instructions:"))
font = QFont('Source Sans Pro', 7, QFont.Normal)
painter.setFont(font)
painter.drawText(700, 2177, _("1. Place this paper on a flat and well iluminated surface."))
painter.drawText(700, 2277, _("2. Align your Revealer borderlines to the dashed lines on the top and left."))
painter.drawText(700, 2377, _("3. Press slightly the Revealer against the paper and read the numbers that best "
"match on the opposite sides. "))
painter.drawText(700, 2477, _("4. Type the numbers in the software"))
painter.end()
def pixelcode_2x2(self, img):
result = QImage(img.width()*2, img.height()*2, QImage.Format_ARGB32 )
white = qRgba(255,255,255,0)
black = qRgba(0,0,0,255)
for x in range(img.width()):
for y in range(img.height()):
c = img.pixel(QPoint(x,y))
colors = QColor(c).getRgbF()
if colors[0]:
result.setPixel(x*2+1,y*2+1, black)
result.setPixel(x*2,y*2+1, white)
result.setPixel(x*2+1,y*2, white)
result.setPixel(x*2, y*2, black)
else:
result.setPixel(x*2+1,y*2+1, white)
result.setPixel(x*2,y*2+1, black)
result.setPixel(x*2+1,y*2, black)
result.setPixel(x*2, y*2, white)
return result
def overlay_marks(self, img, is_cseed=False, calibration_sheet=False):
border_color = Qt.white
base_img = QImage(self.f_size.width(),self.f_size.height(), QImage.Format_ARGB32)
base_img.fill(border_color)
img = QImage(img)
painter = QPainter()
painter.begin(base_img)
total_distance_h = round(base_img.width() / self.abstand_v)
dist_v = round(total_distance_h) / 2
dist_h = round(total_distance_h) / 2
img = img.scaledToWidth(base_img.width() - (2 * (total_distance_h)))
painter.drawImage(total_distance_h,
total_distance_h,
img)
#frame around image
pen = QPen(Qt.black, 2)
painter.setPen(pen)
#horz
painter.drawLine(0, total_distance_h, base_img.width(), total_distance_h)
painter.drawLine(0, base_img.height()-(total_distance_h), base_img.width(), base_img.height()-(total_distance_h))
#vert
painter.drawLine(total_distance_h, 0, total_distance_h, base_img.height())
painter.drawLine(base_img.width()-(total_distance_h), 0, base_img.width()-(total_distance_h), base_img.height())
#border around img
border_thick = 6
Rpath = QPainterPath()
Rpath.addRect(QRectF((total_distance_h)+(border_thick/2),
(total_distance_h)+(border_thick/2),
base_img.width()-((total_distance_h)*2)-((border_thick)-1),
(base_img.height()-((total_distance_h))*2)-((border_thick)-1)))
pen = QPen(Qt.black, border_thick)
pen.setJoinStyle (Qt.MiterJoin)
painter.setPen(pen)
painter.drawPath(Rpath)
Bpath = QPainterPath()
Bpath.addRect(QRectF((total_distance_h), (total_distance_h),
base_img.width()-((total_distance_h)*2), (base_img.height()-((total_distance_h))*2)))
pen = QPen(Qt.black, 1)
painter.setPen(pen)
painter.drawPath(Bpath)
pen = QPen(Qt.black, 1)
painter.setPen(pen)
painter.drawLine(0, base_img.height()/2, total_distance_h, base_img.height()/2)
painter.drawLine(base_img.width()/2, 0, base_img.width()/2, total_distance_h)
painter.drawLine(base_img.width()-total_distance_h, base_img.height()/2, base_img.width(), base_img.height()/2)
painter.drawLine(base_img.width()/2, base_img.height(), base_img.width()/2, base_img.height() - total_distance_h)
#print code
f_size = 37
QFontDatabase.addApplicationFont(os.path.join(os.path.dirname(__file__), 'DejaVuSansMono-Bold.ttf'))
font = QFont("DejaVu Sans Mono", f_size-11, QFont.Bold)
font.setPixelSize(35)
painter.setFont(font)
if not calibration_sheet:
if is_cseed: #its a secret
painter.setPen(QPen(Qt.black, 1, Qt.DashDotDotLine))
painter.drawLine(0, dist_v, base_img.width(), dist_v)
painter.drawLine(dist_h, 0, dist_h, base_img.height())
painter.drawLine(0, base_img.height()-dist_v, base_img.width(), base_img.height()-(dist_v))
painter.drawLine(base_img.width()-(dist_h), 0, base_img.width()-(dist_h), base_img.height())
painter.drawImage(((total_distance_h))+11, ((total_distance_h))+11,
QImage(':icons/electrumb.png').scaledToWidth(2.1*(total_distance_h), Qt.SmoothTransformation))
painter.setPen(QPen(Qt.white, border_thick*8))
painter.drawLine(base_img.width()-((total_distance_h))-(border_thick*8)/2-(border_thick/2)-2,
(base_img.height()-((total_distance_h)))-((border_thick*8)/2)-(border_thick/2)-2,
base_img.width()-((total_distance_h))-(border_thick*8)/2-(border_thick/2)-2 - 77,
(base_img.height()-((total_distance_h)))-((border_thick*8)/2)-(border_thick/2)-2)
painter.setPen(QColor(0,0,0,255))
painter.drawText(QRect(0, base_img.height()-107, base_img.width()-total_distance_h - border_thick - 11,
base_img.height()-total_distance_h - border_thick), Qt.AlignRight, self.version + '_'+self.code_id)
painter.end()
else: # revealer
painter.setPen(QPen(border_color, 17))
painter.drawLine(0, dist_v, base_img.width(), dist_v)
painter.drawLine(dist_h, 0, dist_h, base_img.height())
painter.drawLine(0, base_img.height()-dist_v, base_img.width(), base_img.height()-(dist_v))
painter.drawLine(base_img.width()-(dist_h), 0, base_img.width()-(dist_h), base_img.height())
painter.setPen(QPen(Qt.black, 2))
painter.drawLine(0, dist_v, base_img.width(), dist_v)
painter.drawLine(dist_h, 0, dist_h, base_img.height())
painter.drawLine(0, base_img.height()-dist_v, base_img.width(), base_img.height()-(dist_v))
painter.drawLine(base_img.width()-(dist_h), 0, base_img.width()-(dist_h), base_img.height())
logo = QImage(':icons/revealer_c.png').scaledToWidth(1.3*(total_distance_h))
painter.drawImage((total_distance_h)+ (border_thick), ((total_distance_h))+ (border_thick), logo, Qt.SmoothTransformation)
#frame around logo
painter.setPen(QPen(Qt.black, border_thick))
painter.drawLine(total_distance_h+border_thick, total_distance_h+logo.height()+3*(border_thick/2),
total_distance_h+logo.width()+border_thick, total_distance_h+logo.height()+3*(border_thick/2))
painter.drawLine(logo.width()+total_distance_h+3*(border_thick/2), total_distance_h+(border_thick),
total_distance_h+logo.width()+3*(border_thick/2), total_distance_h+logo.height()+(border_thick))
#frame around code/qr
qr_size = 179
painter.drawLine((base_img.width()-((total_distance_h))-(border_thick/2)-2)-qr_size,
(base_img.height()-((total_distance_h)))-((border_thick*8))-(border_thick/2)-2,
(base_img.width()/2+(total_distance_h/2)-border_thick-(border_thick*8)/2)-qr_size,
(base_img.height()-((total_distance_h)))-((border_thick*8))-(border_thick/2)-2)
painter.drawLine((base_img.width()/2+(total_distance_h/2)-border_thick-(border_thick*8)/2)-qr_size,
(base_img.height()-((total_distance_h)))-((border_thick*8))-(border_thick/2)-2,
base_img.width()/2 + (total_distance_h/2)-border_thick-(border_thick*8)/2-qr_size,
((base_img.height()-((total_distance_h)))-(border_thick/2)-2))
painter.setPen(QPen(Qt.white, border_thick * 8))
painter.drawLine(
base_img.width() - ((total_distance_h)) - (border_thick * 8) / 2 - (border_thick / 2) - 2,
(base_img.height() - ((total_distance_h))) - ((border_thick * 8) / 2) - (border_thick / 2) - 2,
base_img.width() / 2 + (total_distance_h / 2) - border_thick - qr_size,
(base_img.height() - ((total_distance_h))) - ((border_thick * 8) / 2) - (border_thick / 2) - 2)
painter.setPen(QColor(0,0,0,255))
painter.drawText(QRect(((base_img.width()/2) +21)-qr_size, base_img.height()-107,
base_img.width()-total_distance_h - border_thick -93,
base_img.height()-total_distance_h - border_thick), Qt.AlignLeft, self.hex_noise.upper())
painter.drawText(QRect(0, base_img.height()-107, base_img.width()-total_distance_h - border_thick -3 -qr_size,
base_img.height()-total_distance_h - border_thick), Qt.AlignRight, self.code_id)
# draw qr code
qr_qt = self.paintQR(self.hex_noise.upper() +self.code_id)
target = QRectF(base_img.width()-65-qr_size,
base_img.height()-65-qr_size,
qr_size, qr_size )
painter.drawImage(target, qr_qt)
painter.setPen(QPen(Qt.black, 4))
painter.drawLine(base_img.width()-65-qr_size,
base_img.height()-65-qr_size,
base_img.width() - 65 - qr_size,
(base_img.height() - ((total_distance_h))) - ((border_thick * 8)) - (border_thick / 2) - 4
)
painter.drawLine(base_img.width()-65-qr_size,
base_img.height()-65-qr_size,
base_img.width() - 65,
base_img.height()-65-qr_size
)
painter.end()
else: # calibration only
painter.end()
cal_img = QImage(self.f_size.width() + 100, self.f_size.height() + 100,
QImage.Format_ARGB32)
cal_img.fill(Qt.white)
cal_painter = QPainter()
cal_painter.begin(cal_img)
cal_painter.drawImage(0,0, base_img)
#black lines in the middle of border top left only
cal_painter.setPen(QPen(Qt.black, 1, Qt.DashDotDotLine))
cal_painter.drawLine(0, dist_v, base_img.width(), dist_v)
cal_painter.drawLine(dist_h, 0, dist_h, base_img.height())
pen = QPen(Qt.black, 2, Qt.DashDotDotLine)
cal_painter.setPen(pen)
n=15
cal_painter.setFont(QFont("DejaVu Sans Mono", 21, QFont.Bold))
for x in range(-n,n):
#lines on bottom (vertical calibration)
cal_painter.drawLine((((base_img.width())/(n*2)) *(x))+ (base_img.width()/2)-13,
x+2+base_img.height()-(dist_v),
(((base_img.width())/(n*2)) *(x))+ (base_img.width()/2)+13,
x+2+base_img.height()-(dist_v))
num_pos = 9
if x > 9 : num_pos = 17
if x < 0 : num_pos = 20
if x < -9: num_pos = 27
cal_painter.drawText((((base_img.width())/(n*2)) *(x))+ (base_img.width()/2)-num_pos,
50+base_img.height()-(dist_v),
str(x))
#lines on the right (horizontal calibrations)
cal_painter.drawLine(x+2+(base_img.width()-(dist_h)),
((base_img.height()/(2*n)) *(x))+ (base_img.height()/n)+(base_img.height()/2)-13,
x+2+(base_img.width()-(dist_h)),
((base_img.height()/(2*n)) *(x))+ (base_img.height()/n)+(base_img.height()/2)+13)
cal_painter.drawText(30+(base_img.width()-(dist_h)),
((base_img.height()/(2*n)) *(x))+ (base_img.height()/2)+13, str(x))
cal_painter.end()
base_img = cal_img
return base_img
def paintQR(self, data):
if not data:
return
qr = qrcode.QRCode()
qr.add_data(data)
matrix = qr.get_matrix()
k = len(matrix)
border_color = Qt.white
base_img = QImage(k * 5, k * 5, QImage.Format_ARGB32)
base_img.fill(border_color)
qrpainter = QPainter()
qrpainter.begin(base_img)
boxsize = 5
size = k * boxsize
left = (base_img.width() - size)/2
top = (base_img.height() - size)/2
qrpainter.setBrush(Qt.black)
qrpainter.setPen(Qt.black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qrpainter.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qrpainter.end()
return base_img
def calibration_dialog(self, window):
d = WindowModalDialog(window, _("Revealer - Printer calibration settings"))
d.setMinimumSize(100, 200)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(''.join(["<br/>", _("If you have an old printer, or want optimal precision"),"<br/>",
_("print the calibration pdf and follow the instructions "), "<br/>","<br/>",
])))
self.calibration_h = self.config.get('calibration_h')
self.calibration_v = self.config.get('calibration_v')
cprint = QPushButton(_("Open calibration pdf"))
cprint.clicked.connect(self.calibration)
vbox.addWidget(cprint)
vbox.addWidget(QLabel(_('Calibration values:')))
grid = QGridLayout()
vbox.addLayout(grid)
grid.addWidget(QLabel(_('Right side')), 0, 0)
horizontal = QLineEdit()
horizontal.setText(str(self.calibration_h))
grid.addWidget(horizontal, 0, 1)
grid.addWidget(QLabel(_('Bottom')), 1, 0)
vertical = QLineEdit()
vertical.setText(str(self.calibration_v))
grid.addWidget(vertical, 1, 1)
vbox.addStretch()
vbox.addSpacing(13)
vbox.addLayout(Buttons(CloseButton(d), OkButton(d)))
if not d.exec_():
return
self.calibration_h = int(Decimal(horizontal.text()))
self.config.set_key('calibration_h', self.calibration_h)
self.calibration_v = int(Decimal(vertical.text()))
self.config.set_key('calibration_v', self.calibration_v)
| 1 | 12,777 | it should instead be `from .hmac_drbg import DRBG` also, please put it at the bottom of the imports (PEP 8 order), so near line 36 | spesmilo-electrum | py |
@@ -138,12 +138,13 @@ class SchemaUpdate implements UpdateSchema {
parentId = parentField.fieldId();
Preconditions.checkArgument(!deletes.contains(parentId),
"Cannot add to a column that will be deleted: %s", parent);
- Preconditions.checkArgument(schema.findField(parent + "." + name) == null,
- "Cannot add column, name already exists: %s.%s", parent, name);
+ Preconditions.checkArgument(schema.findField(parent + "." + name) == null ||
+ deletes.contains(schema.findField(parent + "." + name).fieldId()),
+ "Cannot add column, name already exists and is not being deleted: %s.%s", parent, name);
fullName = schema.findColumnName(parentId) + "." + name;
} else {
- Preconditions.checkArgument(schema.findField(name) == null,
- "Cannot add column, name already exists: %s", name);
+ Preconditions.checkArgument(schema.findField(name) == null || deletes.contains(schema.findField(name).fieldId()),
+ "Cannot add column, name already exists and is not being deleted: %s", name);
fullName = name;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.iceberg.mapping.MappingUtil;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.mapping.NameMappingParser;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Multimap;
import org.apache.iceberg.relocated.com.google.common.collect.Multimaps;
import org.apache.iceberg.schema.UnionByNameVisitor;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Schema evolution API implementation.
*/
class SchemaUpdate implements UpdateSchema {
private static final Logger LOG = LoggerFactory.getLogger(SchemaUpdate.class);
private static final int TABLE_ROOT_ID = -1;
private final TableOperations ops;
private final TableMetadata base;
private final Schema schema;
private final Map<Integer, Integer> idToParent;
private final List<Integer> deletes = Lists.newArrayList();
private final Map<Integer, Types.NestedField> updates = Maps.newHashMap();
private final Multimap<Integer, Types.NestedField> adds =
Multimaps.newListMultimap(Maps.newHashMap(), Lists::newArrayList);
private final Map<String, Integer> addedNameToId = Maps.newHashMap();
private final Multimap<Integer, Move> moves = Multimaps.newListMultimap(Maps.newHashMap(), Lists::newArrayList);
private int lastColumnId;
private boolean allowIncompatibleChanges = false;
SchemaUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
this.schema = base.schema();
this.lastColumnId = base.lastColumnId();
this.idToParent = Maps.newHashMap(TypeUtil.indexParents(schema.asStruct()));
}
/**
* For testing only.
*/
SchemaUpdate(Schema schema, int lastColumnId) {
this.ops = null;
this.base = null;
this.schema = schema;
this.lastColumnId = lastColumnId;
this.idToParent = Maps.newHashMap(TypeUtil.indexParents(schema.asStruct()));
}
@Override
public SchemaUpdate allowIncompatibleChanges() {
this.allowIncompatibleChanges = true;
return this;
}
@Override
public UpdateSchema addColumn(String name, Type type, String doc) {
Preconditions.checkArgument(!name.contains("."),
"Cannot add column with ambiguous name: %s, use addColumn(parent, name, type)", name);
return addColumn(null, name, type, doc);
}
@Override
public UpdateSchema addColumn(String parent, String name, Type type, String doc) {
internalAddColumn(parent, name, true, type, doc);
return this;
}
@Override
public UpdateSchema addRequiredColumn(String name, Type type, String doc) {
Preconditions.checkArgument(!name.contains("."),
"Cannot add column with ambiguous name: %s, use addColumn(parent, name, type)", name);
addRequiredColumn(null, name, type, doc);
return this;
}
@Override
public UpdateSchema addRequiredColumn(String parent, String name, Type type, String doc) {
Preconditions.checkArgument(allowIncompatibleChanges,
"Incompatible change: cannot add required column: %s", name);
internalAddColumn(parent, name, false, type, doc);
return this;
}
private void internalAddColumn(String parent, String name, boolean isOptional, Type type, String doc) {
int parentId = TABLE_ROOT_ID;
String fullName;
if (parent != null) {
Types.NestedField parentField = schema.findField(parent);
Preconditions.checkArgument(parentField != null, "Cannot find parent struct: %s", parent);
Type parentType = parentField.type();
if (parentType.isNestedType()) {
Type.NestedType nested = parentType.asNestedType();
if (nested.isMapType()) {
// fields are added to the map value type
parentField = nested.asMapType().fields().get(1);
} else if (nested.isListType()) {
// fields are added to the element type
parentField = nested.asListType().fields().get(0);
}
}
Preconditions.checkArgument(
parentField.type().isNestedType() && parentField.type().asNestedType().isStructType(),
"Cannot add to non-struct column: %s: %s", parent, parentField.type());
parentId = parentField.fieldId();
Preconditions.checkArgument(!deletes.contains(parentId),
"Cannot add to a column that will be deleted: %s", parent);
Preconditions.checkArgument(schema.findField(parent + "." + name) == null,
"Cannot add column, name already exists: %s.%s", parent, name);
fullName = schema.findColumnName(parentId) + "." + name;
} else {
Preconditions.checkArgument(schema.findField(name) == null,
"Cannot add column, name already exists: %s", name);
fullName = name;
}
// assign new IDs in order
int newId = assignNewColumnId();
// update tracking for moves
addedNameToId.put(fullName, newId);
if (parentId != TABLE_ROOT_ID) {
idToParent.put(newId, parentId);
}
adds.put(parentId, Types.NestedField.of(newId, isOptional, name,
TypeUtil.assignFreshIds(type, this::assignNewColumnId), doc));
}
@Override
public UpdateSchema deleteColumn(String name) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot delete missing column: %s", name);
Preconditions.checkArgument(!adds.containsKey(field.fieldId()),
"Cannot delete a column that has additions: %s", name);
Preconditions.checkArgument(!updates.containsKey(field.fieldId()),
"Cannot delete a column that has updates: %s", name);
deletes.add(field.fieldId());
return this;
}
@Override
public UpdateSchema renameColumn(String name, String newName) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot rename missing column: %s", name);
Preconditions.checkArgument(newName != null, "Cannot rename a column to null");
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot rename a column that will be deleted: %s", field.name());
// merge with an update, if present
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.of(fieldId, update.isOptional(), newName, update.type(), update.doc()));
} else {
updates.put(fieldId, Types.NestedField.of(fieldId, field.isOptional(), newName, field.type(), field.doc()));
}
return this;
}
@Override
public UpdateSchema requireColumn(String name) {
internalUpdateColumnRequirement(name, false);
return this;
}
@Override
public UpdateSchema makeColumnOptional(String name) {
internalUpdateColumnRequirement(name, true);
return this;
}
private void internalUpdateColumnRequirement(String name, boolean isOptional) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot update missing column: %s", name);
if ((!isOptional && field.isRequired()) || (isOptional && field.isOptional())) {
// if the change is a noop, allow it even if allowIncompatibleChanges is false
return;
}
Preconditions.checkArgument(isOptional || allowIncompatibleChanges,
"Cannot change column nullability: %s: optional -> required", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot update a column that will be deleted: %s", field.name());
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.of(fieldId, isOptional, update.name(), update.type(), update.doc()));
} else {
updates.put(fieldId, Types.NestedField.of(fieldId, isOptional, field.name(), field.type(), field.doc()));
}
}
@Override
public UpdateSchema updateColumn(String name, Type.PrimitiveType newType) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot update missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot update a column that will be deleted: %s", field.name());
if (field.type().equals(newType)) {
return this;
}
Preconditions.checkArgument(TypeUtil.isPromotionAllowed(field.type(), newType),
"Cannot change column type: %s: %s -> %s", name, field.type(), newType);
// merge with a rename, if present
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.of(fieldId, update.isOptional(), update.name(), newType, update.doc()));
} else {
updates.put(fieldId, Types.NestedField.of(fieldId, field.isOptional(), field.name(), newType, field.doc()));
}
return this;
}
@Override
public UpdateSchema updateColumnDoc(String name, String doc) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot update missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot update a column that will be deleted: %s", field.name());
if (Objects.equals(field.doc(), doc)) {
return this;
}
// merge with a rename or update, if present
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.of(fieldId, update.isOptional(), update.name(), update.type(), doc));
} else {
updates.put(fieldId, Types.NestedField.of(fieldId, field.isOptional(), field.name(), field.type(), doc));
}
return this;
}
@Override
public UpdateSchema moveFirst(String name) {
Integer fieldId = findForMove(name);
Preconditions.checkArgument(fieldId != null, "Cannot move missing column: %s", name);
internalMove(name, Move.first(fieldId));
return this;
}
@Override
public UpdateSchema moveBefore(String name, String beforeName) {
Integer fieldId = findForMove(name);
Preconditions.checkArgument(fieldId != null, "Cannot move missing column: %s", name);
Integer beforeId = findForMove(beforeName);
Preconditions.checkArgument(beforeId != null, "Cannot move %s before missing column: %s", name, beforeName);
Preconditions.checkArgument(!fieldId.equals(beforeId), "Cannot move %s before itself", name);
internalMove(name, Move.before(fieldId, beforeId));
return this;
}
@Override
public UpdateSchema moveAfter(String name, String afterName) {
Integer fieldId = findForMove(name);
Preconditions.checkArgument(fieldId != null, "Cannot move missing column: %s", name);
Integer afterId = findForMove(afterName);
Preconditions.checkArgument(afterId != null, "Cannot move %s after missing column: %s", name, afterName);
Preconditions.checkArgument(!fieldId.equals(afterId), "Cannot move %s after itself", name);
internalMove(name, Move.after(fieldId, afterId));
return this;
}
@Override
public UpdateSchema unionByNameWith(Schema newSchema) {
UnionByNameVisitor.visit(this, schema, newSchema);
return this;
}
private Integer findForMove(String name) {
Types.NestedField field = schema.findField(name);
if (field != null) {
return field.fieldId();
}
return addedNameToId.get(name);
}
private void internalMove(String name, Move move) {
Integer parentId = idToParent.get(move.fieldId());
if (parentId != null) {
Types.NestedField parent = schema.findField(parentId);
Preconditions.checkArgument(parent.type().isStructType(),
"Cannot move fields in non-struct type: %s", parent.type());
if (move.type() == Move.MoveType.AFTER || move.type() == Move.MoveType.BEFORE) {
Preconditions.checkArgument(
parentId.equals(idToParent.get(move.referenceFieldId())),
"Cannot move field %s to a different struct", name);
}
moves.put(parentId, move);
} else {
if (move.type() == Move.MoveType.AFTER || move.type() == Move.MoveType.BEFORE) {
Preconditions.checkArgument(
idToParent.get(move.referenceFieldId()) == null,
"Cannot move field %s to a different struct", name);
}
moves.put(TABLE_ROOT_ID, move);
}
}
/**
* Apply the pending changes to the original schema and returns the result.
* <p>
* This does not result in a permanent update.
*
* @return the result Schema when all pending updates are applied
*/
@Override
public Schema apply() {
Schema newSchema = applyChanges(schema, deletes, updates, adds, moves);
// Validate the metrics if we have existing properties.
if (base != null && base.properties() != null) {
MetricsConfig.fromProperties(base.properties()).validateReferencedColumns(newSchema);
}
return newSchema;
}
@Override
public void commit() {
TableMetadata update = applyChangesToMapping(base.updateSchema(apply(), lastColumnId));
ops.commit(base, update);
}
private int assignNewColumnId() {
int next = lastColumnId + 1;
this.lastColumnId = next;
return next;
}
private TableMetadata applyChangesToMapping(TableMetadata metadata) {
String mappingJson = metadata.property(TableProperties.DEFAULT_NAME_MAPPING, null);
if (mappingJson != null) {
try {
// parse and update the mapping
NameMapping mapping = NameMappingParser.fromJson(mappingJson);
NameMapping updated = MappingUtil.update(mapping, updates, adds);
// replace the table property
Map<String, String> updatedProperties = Maps.newHashMap();
updatedProperties.putAll(metadata.properties());
updatedProperties.put(TableProperties.DEFAULT_NAME_MAPPING, NameMappingParser.toJson(updated));
return metadata.replaceProperties(updatedProperties);
} catch (RuntimeException e) {
// log the error, but do not fail the update
LOG.warn("Failed to update external schema mapping: {}", mappingJson, e);
}
}
return metadata;
}
private static Schema applyChanges(Schema schema, List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds,
Multimap<Integer, Move> moves) {
Types.StructType struct = TypeUtil
.visit(schema, new ApplyChanges(deletes, updates, adds, moves))
.asNestedType().asStructType();
return new Schema(struct.fields());
}
private static class ApplyChanges extends TypeUtil.SchemaVisitor<Type> {
private final List<Integer> deletes;
private final Map<Integer, Types.NestedField> updates;
private final Multimap<Integer, Types.NestedField> adds;
private final Multimap<Integer, Move> moves;
private ApplyChanges(List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds,
Multimap<Integer, Move> moves) {
this.deletes = deletes;
this.updates = updates;
this.adds = adds;
this.moves = moves;
}
@Override
public Type schema(Schema schema, Type structResult) {
List<Types.NestedField> fields = addAndMoveFields(structResult.asStructType().fields(),
adds.get(TABLE_ROOT_ID), moves.get(TABLE_ROOT_ID));
if (fields != null) {
return Types.StructType.of(fields);
}
return structResult;
}
@Override
public Type struct(Types.StructType struct, List<Type> fieldResults) {
boolean hasChange = false;
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fieldResults.size());
for (int i = 0; i < fieldResults.size(); i += 1) {
Type resultType = fieldResults.get(i);
if (resultType == null) {
hasChange = true;
continue;
}
Types.NestedField field = struct.fields().get(i);
String name = field.name();
String doc = field.doc();
boolean isOptional = field.isOptional();
Types.NestedField update = updates.get(field.fieldId());
if (update != null) {
name = update.name();
doc = update.doc();
isOptional = update.isOptional();
}
if (name.equals(field.name()) &&
isOptional == field.isOptional() &&
field.type() == resultType &&
Objects.equals(doc, field.doc())) {
newFields.add(field);
} else {
hasChange = true;
newFields.add(Types.NestedField.of(field.fieldId(), isOptional, name, resultType, doc));
}
}
if (hasChange) {
// TODO: What happens if there are no fields left?
return Types.StructType.of(newFields);
}
return struct;
}
@Override
public Type field(Types.NestedField field, Type fieldResult) {
// the API validates deletes, updates, and additions don't conflict
// handle deletes
int fieldId = field.fieldId();
if (deletes.contains(fieldId)) {
return null;
}
// handle updates
Types.NestedField update = updates.get(field.fieldId());
if (update != null && update.type() != field.type()) {
// rename is handled in struct, but struct needs the correct type from the field result
return update.type();
}
// handle adds
Collection<Types.NestedField> newFields = adds.get(fieldId);
Collection<Move> columnsToMove = moves.get(fieldId);
if (!newFields.isEmpty() || !columnsToMove.isEmpty()) {
// if either collection is non-null, then this must be a struct type. try to apply the changes
List<Types.NestedField> fields = addAndMoveFields(
fieldResult.asStructType().fields(), newFields, columnsToMove);
if (fields != null) {
return Types.StructType.of(fields);
}
}
return fieldResult;
}
@Override
public Type list(Types.ListType list, Type elementResult) {
// use field to apply updates
Types.NestedField elementField = list.fields().get(0);
Type elementType = field(elementField, elementResult);
if (elementType == null) {
throw new IllegalArgumentException("Cannot delete element type from list: " + list);
}
Types.NestedField elementUpdate = updates.get(elementField.fieldId());
boolean isElementOptional = elementUpdate != null ? elementUpdate.isOptional() : list.isElementOptional();
if (isElementOptional == elementField.isOptional() && list.elementType() == elementType) {
return list;
}
if (isElementOptional) {
return Types.ListType.ofOptional(list.elementId(), elementType);
} else {
return Types.ListType.ofRequired(list.elementId(), elementType);
}
}
@Override
public Type map(Types.MapType map, Type kResult, Type valueResult) {
// if any updates are intended for the key, throw an exception
int keyId = map.fields().get(0).fieldId();
if (deletes.contains(keyId)) {
throw new IllegalArgumentException("Cannot delete map keys: " + map);
} else if (updates.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot update map keys: " + map);
} else if (adds.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot add fields to map keys: " + map);
} else if (!map.keyType().equals(kResult)) {
throw new IllegalArgumentException("Cannot alter map keys: " + map);
}
// use field to apply updates to the value
Types.NestedField valueField = map.fields().get(1);
Type valueType = field(valueField, valueResult);
if (valueType == null) {
throw new IllegalArgumentException("Cannot delete value type from map: " + map);
}
Types.NestedField valueUpdate = updates.get(valueField.fieldId());
boolean isValueOptional = valueUpdate != null ? valueUpdate.isOptional() : map.isValueOptional();
if (isValueOptional == map.isValueOptional() && map.valueType() == valueType) {
return map;
}
if (isValueOptional) {
return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueType);
} else {
return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueType);
}
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return primitive;
}
}
private static List<Types.NestedField> addAndMoveFields(List<Types.NestedField> fields,
Collection<Types.NestedField> adds,
Collection<Move> moves) {
if (adds != null && !adds.isEmpty()) {
if (moves != null && !moves.isEmpty()) {
// always apply adds first so that added fields can be moved
return moveFields(addFields(fields, adds), moves);
} else {
return addFields(fields, adds);
}
} else if (moves != null && !moves.isEmpty()) {
return moveFields(fields, moves);
}
return null;
}
private static List<Types.NestedField> addFields(List<Types.NestedField> fields,
Collection<Types.NestedField> adds) {
List<Types.NestedField> newFields = Lists.newArrayList(fields);
newFields.addAll(adds);
return newFields;
}
@SuppressWarnings("checkstyle:IllegalType")
private static List<Types.NestedField> moveFields(List<Types.NestedField> fields,
Collection<Move> moves) {
LinkedList<Types.NestedField> reordered = Lists.newLinkedList(fields);
for (Move move : moves) {
Types.NestedField toMove = Iterables.find(reordered, field -> field.fieldId() == move.fieldId());
reordered.remove(toMove);
switch (move.type()) {
case FIRST:
reordered.addFirst(toMove);
break;
case BEFORE:
Types.NestedField before = Iterables.find(reordered, field -> field.fieldId() == move.referenceFieldId());
int beforeIndex = reordered.indexOf(before);
// insert the new node at the index of the existing node
reordered.add(beforeIndex, toMove);
break;
case AFTER:
Types.NestedField after = Iterables.find(reordered, field -> field.fieldId() == move.referenceFieldId());
int afterIndex = reordered.indexOf(after);
reordered.add(afterIndex + 1, toMove);
break;
default:
throw new UnsupportedOperationException("Unknown move type: " + move.type());
}
}
return reordered;
}
/**
* Represents a requested column move in a struct.
*/
private static class Move {
private enum MoveType {
FIRST,
BEFORE,
AFTER
}
static Move first(int fieldId) {
return new Move(fieldId, -1, MoveType.FIRST);
}
static Move before(int fieldId, int referenceFieldId) {
return new Move(fieldId, referenceFieldId, MoveType.BEFORE);
}
static Move after(int fieldId, int referenceFieldId) {
return new Move(fieldId, referenceFieldId, MoveType.AFTER);
}
private final int fieldId;
private final int referenceFieldId;
private final MoveType type;
private Move(int fieldId, int referenceFieldId, MoveType type) {
this.fieldId = fieldId;
this.referenceFieldId = referenceFieldId;
this.type = type;
}
public int fieldId() {
return fieldId;
}
public int referenceFieldId() {
return referenceFieldId;
}
public MoveType type() {
return type;
}
}
}
| 1 | 33,062 | Nit: indentation is 2 indents (4 spaces) from the previous line. | apache-iceberg | java |
@@ -224,7 +224,7 @@ func (c *Config) RenderComposeYAML() (string, error) {
// path.Join is desired over filepath.Join here,
// as we always want a unix-style path for the mount.
"docroot": path.Join("../", c.Docroot),
- "plugin": c.Platform,
+ "plugin": "ddev",
"appType": c.AppType,
"mailhogport": appports.GetPort("mailhog"),
"dbaport": appports.GetPort("dba"), | 1 | package ddevapp
import (
"bytes"
"fmt"
"html/template"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/drud/ddev/pkg/appports"
"github.com/drud/ddev/pkg/util"
"github.com/drud/ddev/pkg/version"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
// CurrentAppVersion sets the current YAML config file version.
// We're not doing anything with AppVersion, so just default it to 1 for now.
const CurrentAppVersion = "1"
// DDevDefaultPlatform defines the DDev Platform. It's just hardcoded for now, but should be adjusted as we add more platforms.
const DDevDefaultPlatform = "local"
// DDevTLD defines the tld to use for DDev site URLs.
const DDevTLD = "ddev.local"
// AllowedAppTypes lists the types of site/app that can be used.
var AllowedAppTypes = []string{"drupal7", "drupal8", "wordpress"}
// Config defines the yaml config file format for ddev applications
type Config struct {
APIVersion string `yaml:"APIVersion"`
Name string `yaml:"name"`
AppType string `yaml:"type"`
Docroot string `yaml:"docroot"`
WebImage string `yaml:"webimage"`
DBImage string `yaml:"dbimage"`
DBAImage string `yaml:"dbaimage"`
ConfigPath string `yaml:"-"`
AppRoot string `yaml:"-"`
Platform string `yaml:"-"`
}
// NewConfig creates a new Config struct with defaults set. It is preferred to using new() directly.
func NewConfig(AppRoot string) (*Config, error) {
// Set defaults.
c := &Config{}
err := prepLocalSiteDirs(AppRoot)
util.CheckErr(err)
c.ConfigPath = filepath.Join(AppRoot, ".ddev", "config.yaml")
c.AppRoot = AppRoot
c.APIVersion = CurrentAppVersion
// Default platform for now.
c.Platform = DDevDefaultPlatform
// These should always default to the latest image/tag names from the Version package.
c.WebImage = version.WebImg + ":" + version.WebTag
c.DBImage = version.DBImg + ":" + version.DBTag
c.DBAImage = version.DBAImg + ":" + version.DBATag
// Load from file if available. This will return an error if the file doesn't exist,
// and it is up to the caller to determine if that's an issue.
err = c.Read()
if err != nil {
return c, err
}
return c, nil
}
// Write the app configuration to the .ddev folder.
func (c *Config) Write() error {
err := PrepDdevDirectory(filepath.Dir(c.ConfigPath))
if err != nil {
return err
}
cfgbytes, err := yaml.Marshal(c)
if err != nil {
return err
}
log.WithFields(log.Fields{
"location": c.ConfigPath,
}).Debug("Writing Config")
err = ioutil.WriteFile(c.ConfigPath, cfgbytes, 0644)
if err != nil {
return err
}
return nil
}
// Read app configuration from a specified location on disk, falling back to defaults for config
// values not defined in the read config file.
func (c *Config) Read() error {
source, err := ioutil.ReadFile(c.ConfigPath)
if err != nil {
return err
}
// Read config values from file.
err = yaml.Unmarshal(source, c)
if err != nil {
return err
}
log.WithFields(log.Fields{
"Read config": awsutil.Prettify(c),
}).Debug("Finished config read")
// If any of these values aren't defined in the config file, set them to defaults.
if c.Name == "" {
c.Name = filepath.Base(c.AppRoot)
}
if c.WebImage == "" {
c.WebImage = version.WebImg + ":" + version.WebTag
}
if c.DBImage == "" {
c.DBImage = version.DBImg + ":" + version.DBTag
}
if c.DBAImage == "" {
c.DBAImage = version.DBAImg + ":" + version.DBATag
}
log.WithFields(log.Fields{
"Active config": awsutil.Prettify(c),
}).Debug("Finished config set")
return nil
}
// Config goes through a set of prompts to receive user input and generate an Config struct.
func (c *Config) Config() error {
if c.ConfigExists() {
fmt.Printf("Editing existing ddev project at %s\n\n", c.AppRoot)
} else {
fmt.Printf("Creating a new ddev project config in the current directory (%s)\n", c.AppRoot)
fmt.Printf("Once completed, your configuration will be written to %s\n\n\n", c.ConfigPath)
}
// Log what the starting config is, for debugging purposes.
log.WithFields(log.Fields{
"Existing config": awsutil.Prettify(c),
}).Debug("Configuring application")
namePrompt := "Project name"
if c.Name == "" {
dir, err := os.Getwd()
if err == nil {
c.Name = filepath.Base(dir)
}
}
namePrompt = fmt.Sprintf("%s (%s)", namePrompt, c.Name)
// Define an application name.
fmt.Print(namePrompt + ": ")
c.Name = util.GetInput(c.Name)
err := c.docrootPrompt()
util.CheckErr(err)
err = c.appTypePrompt()
if err != nil {
return err
}
// Log the resulting config, for debugging purposes.
log.WithFields(log.Fields{
"Config": awsutil.Prettify(c),
}).Debug("Configuration completed")
return nil
}
// DockerComposeYAMLPath returns the absolute path to where the docker-compose.yaml should exist for this app configuration.
func (c *Config) DockerComposeYAMLPath() string {
return filepath.Join(c.AppRoot, ".ddev", "docker-compose.yaml")
}
// Hostname returns the hostname to the app controlled by this config.
func (c *Config) Hostname() string {
return c.Name + "." + DDevTLD
}
// WriteDockerComposeConfig writes a docker-compose.yaml to the app configuration directory.
func (c *Config) WriteDockerComposeConfig() error {
log.WithFields(log.Fields{
"Location": c.DockerComposeYAMLPath(),
}).Debug("Writing docker-compose.yaml")
f, err := os.Create(c.DockerComposeYAMLPath())
if err != nil {
return err
}
defer util.CheckClose(f)
rendered, err := c.RenderComposeYAML()
if err != nil {
return err
}
_, err = f.WriteString(rendered)
return err
}
// RenderComposeYAML renders the contents of docker-compose.yaml.
func (c *Config) RenderComposeYAML() (string, error) {
var doc bytes.Buffer
var err error
templ := template.New("compose template")
templ, err = templ.Parse(DDevComposeTemplate)
if err != nil {
return "", err
}
templateVars := map[string]string{
"name": c.Name,
// path.Join is desired over filepath.Join here,
// as we always want a unix-style path for the mount.
"docroot": path.Join("../", c.Docroot),
"plugin": c.Platform,
"appType": c.AppType,
"mailhogport": appports.GetPort("mailhog"),
"dbaport": appports.GetPort("dba"),
"dbport": appports.GetPort("db"),
}
err = templ.Execute(&doc, templateVars)
return doc.String(), err
}
func (c *Config) docrootPrompt() error {
// Determine the document root.
fmt.Printf("\nThe docroot is the directory from which your site is served. This is a relative path from your application root (%s)\n", c.AppRoot)
fmt.Println("You may leave this value blank if your site files are in the application root")
var docrootPrompt = "Docroot Location"
if c.Docroot != "" {
docrootPrompt = fmt.Sprintf("%s (%s)", docrootPrompt, c.Docroot)
}
fmt.Print(docrootPrompt + ": ")
c.Docroot = util.GetInput(c.Docroot)
// Ensure the docroot exists. If it doesn't, prompt the user to verify they entered it correctly.
fullPath := filepath.Join(c.AppRoot, c.Docroot)
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
fmt.Printf("No directory could be found at %s. Please enter a valid docroot", fullPath)
c.Docroot = ""
return c.docrootPrompt()
}
return nil
}
// ConfigExists determines if a ddev config file exists for this application.
func (c *Config) ConfigExists() bool {
if _, err := os.Stat(c.ConfigPath); os.IsNotExist(err) {
return false
}
return true
}
// appTypePrompt handles the AppType workflow.
func (c *Config) appTypePrompt() error {
var appType string
typePrompt := fmt.Sprintf("Application Type [%s]", strings.Join(AllowedAppTypes, ", "))
// First, see if we can auto detect what kind of site it is so we can set a sane default.
absDocroot := filepath.Join(c.AppRoot, c.Docroot)
log.WithFields(log.Fields{
"Location": absDocroot,
}).Debug("Attempting to auto-determine application type")
appType, err := determineAppType(absDocroot)
if err == nil {
// If we found an application type just set it and inform the user.
fmt.Printf("Found a %s codebase at %s\n", appType, filepath.Join(c.AppRoot, c.Docroot))
c.AppType = appType
return nil
}
typePrompt = fmt.Sprintf("%s (%s)", typePrompt, c.AppType)
for IsAllowedAppType(appType) != true {
fmt.Printf(typePrompt + ": ")
appType = strings.ToLower(util.GetInput(c.AppType))
if IsAllowedAppType(appType) != true {
fmt.Printf("%s is not a valid application type. Allowed application types are: %s\n", appType, strings.Join(AllowedAppTypes, ", "))
}
c.AppType = appType
}
return nil
}
// IsAllowedAppType determines if a given string exists in the AllowedAppTypes slice.
func IsAllowedAppType(appType string) bool {
for _, t := range AllowedAppTypes {
if appType == t {
return true
}
}
return false
}
// PrepDdevDirectory creates a .ddev directory in the current working
func PrepDdevDirectory(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
log.WithFields(log.Fields{
"directory": dir,
}).Debug("Config Directory does not exist, attempting to create.")
err := os.MkdirAll(dir, 0755)
if err != nil {
return err
}
}
return nil
}
// DetermineAppType uses some predetermined file checks to determine if a local app
// is of any of the known types
func determineAppType(basePath string) (string, error) {
defaultLocations := map[string]string{
"scripts/drupal.sh": "drupal7",
"core/scripts/drupal.sh": "drupal8",
"wp-settings.php": "wordpress",
}
for k, v := range defaultLocations {
fp := filepath.Join(basePath, k)
log.WithFields(log.Fields{
"file": fp,
}).Debug("Looking for app fingerprint.")
if _, err := os.Stat(fp); err == nil {
log.WithFields(log.Fields{
"file": fp,
"app": v,
}).Debug("Found app fingerprint.")
return v, nil
}
}
return "", errors.New("determineAppType() couldn't determine app's type")
}
// prepLocalSiteDirs creates a site's directories for local dev in .ddev
func prepLocalSiteDirs(base string) error {
dirs := []string{
".ddev",
".ddev/data",
}
for _, d := range dirs {
dirPath := filepath.Join(base, d)
fileInfo, err := os.Stat(dirPath)
if os.IsNotExist(err) { // If it doesn't exist, create it.
err := os.MkdirAll(dirPath, os.FileMode(int(0774)))
if err != nil {
return fmt.Errorf("Failed to create directory %s, err: %v", dirPath, err)
}
} else if err == nil && fileInfo.IsDir() { // If the directory exists, we're fine and don't have to create it.
continue
} else { // But otherwise it must have existed as a file, so bail
return fmt.Errorf("Error where trying to create directory %s, err: %v", dirPath, err)
}
}
return nil
}
| 1 | 11,383 | I don't understand why this is changing to a hard-coded string. | drud-ddev | php |
@@ -24,7 +24,7 @@ import (
// Devnet is a FAST lib environment that is meant to be used
// when working with kittyhawk devnets run by the Filecoin development team.
type Devnet struct {
- network string
+ network DevnetConfig
location string
log logging.EventLogger | 1 | package environment
// The devnet FAST environment provides an environment for using FAST with the deployed kittyhawk
// devnet infrasturture run by the Filecoin development team. It can be used to setup and manage nodes
// connected to either the nightly, test, or user devnets for running automation with the FAST library.
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"sync"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
iptb "github.com/ipfs/iptb/testbed"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/tools/fast"
)
// Devnet is a FAST lib environment that is meant to be used
// when working with kittyhawk devnets run by the Filecoin development team.
type Devnet struct {
network string
location string
log logging.EventLogger
processesMu sync.Mutex
processes []*fast.Filecoin
processCountMu sync.Mutex
processCount int
}
// NewDevnet builds an environment that uses deployed infrastructure to
// the kittyhawk devnets.
func NewDevnet(network, location string) (Environment, error) {
env := &Devnet{
network: network,
location: location,
log: logging.Logger("environment"),
}
if err := os.MkdirAll(env.location, 0775); err != nil {
return nil, err
}
return env, nil
}
// GenesisCar provides a url where the genesis file can be fetched from
func (e *Devnet) GenesisCar() string {
uri := url.URL{
Host: fmt.Sprintf("genesis.%s.kittyhawk.wtf", e.network),
Path: "genesis.car",
Scheme: "https",
}
return uri.String()
}
// GenesisMiner returns a ErrNoGenesisMiner for this environment
func (e *Devnet) GenesisMiner() (*GenesisMiner, error) {
return nil, ErrNoGenesisMiner
}
// Log returns the logger for the environment.
func (e *Devnet) Log() logging.EventLogger {
return e.log
}
// NewProcess builds a iptb process of the given type and options passed. The
// process is tracked by the environment and returned.
func (e *Devnet) NewProcess(ctx context.Context, processType string, options map[string]string, eo fast.FilecoinOpts) (*fast.Filecoin, error) {
e.processesMu.Lock()
defer e.processesMu.Unlock()
e.processCountMu.Lock()
defer e.processCountMu.Unlock()
ns := iptb.NodeSpec{
Type: processType,
Dir: fmt.Sprintf("%s/%d", e.location, e.processCount),
Attrs: options,
}
e.processCount = e.processCount + 1
e.log.Infof("New Process type: %s, dir: %s", processType, ns.Dir)
if err := os.MkdirAll(ns.Dir, 0775); err != nil {
return nil, err
}
c, err := ns.Load()
if err != nil {
return nil, err
}
// We require a slightly more extended core interface
fc, ok := c.(fast.IPTBCoreExt)
if !ok {
return nil, fmt.Errorf("%s does not implement the extended IPTB.Core interface IPTBCoreExt", processType)
}
p := fast.NewFilecoinProcess(ctx, fc, eo)
e.processes = append(e.processes, p)
return p, nil
}
// Processes returns all processes the environment knows about.
func (e *Devnet) Processes() []*fast.Filecoin {
e.processesMu.Lock()
defer e.processesMu.Unlock()
return e.processes[:]
}
// Teardown stops all of the nodes and cleans up the environment.
func (e *Devnet) Teardown(ctx context.Context) error {
e.processesMu.Lock()
defer e.processesMu.Unlock()
e.log.Info("Teardown environment")
for _, p := range e.processes {
if err := p.StopDaemon(ctx); err != nil {
return err
}
}
return os.RemoveAll(e.location)
}
// TeardownProcess stops the running process and removes it from the
// environment.
func (e *Devnet) TeardownProcess(ctx context.Context, p *fast.Filecoin) error {
e.processesMu.Lock()
defer e.processesMu.Unlock()
e.log.Infof("Teardown process: %s", p.String())
if err := p.StopDaemon(ctx); err != nil {
return err
}
for i, n := range e.processes {
if n == p {
e.processes = append(e.processes[:i], e.processes[i+1:]...)
break
}
}
// remove the provess from the process list
return os.RemoveAll(p.Dir())
}
// GetFunds retrieves a fixed amount of tokens from the environment to the
// Filecoin processes default wallet address.
// GetFunds will send a request to the Faucet, the amount of tokens returned and
// number of requests permitted is determined by the Faucet configuration.
func (e *Devnet) GetFunds(ctx context.Context, p *fast.Filecoin) error {
e.processesMu.Lock()
defer e.processesMu.Unlock()
e.log.Infof("GetFunds for process: %s", p.String())
var toAddr address.Address
if err := p.ConfigGet(ctx, "wallet.defaultAddress", &toAddr); err != nil {
return err
}
data := url.Values{}
data.Set("target", toAddr.String())
uri := url.URL{
Host: fmt.Sprintf("faucet.%s.kittyhawk.wtf", e.network),
Path: "tap",
Scheme: "https",
}
resp, err := http.PostForm(uri.String(), data)
if err != nil {
return err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
switch resp.StatusCode {
case 200:
msgcid := resp.Header.Get("Message-Cid")
mcid, err := cid.Decode(msgcid)
if err != nil {
return err
}
if _, err := p.MessageWait(ctx, mcid); err != nil {
return err
}
return nil
case 400:
return fmt.Errorf("Bad Request: %s", string(b))
case 429:
return fmt.Errorf("Rate Limit: %s", string(b))
default:
return fmt.Errorf("Unhandled Status: %s", resp.Status)
}
}
| 1 | 21,301 | nit: perhaps calling this `config` would be more clear? | filecoin-project-venus | go |
@@ -110,7 +110,7 @@ de_DE.strings = {
'1': '+%{smart_count} Dateien hochladen',
'2': '+%{smart_count} Dateien hochladen',
},
- uploading: 'Uploading',
+ uploading: 'Lade hoch',
uploadingXFiles: {
'0': '%{smart_count} Datei wird hochgeladen',
'1': '%{smart_count} Dateien werden hochgeladen', | 1 | const de_DE = {}
de_DE.strings = {
addMoreFiles: 'Dateien hinzufügen',
addingMoreFiles: 'Dateien hinzufügen',
allowAccessDescription: 'Um Bilder oder Videos mit Ihrer Kamera aufzunehmen, erlauben Sie dieser Website bitte Zugriff auf Ihre Kamera.',
allowAccessTitle: 'Bitte erlauben Sie Zugriff auf Ihre Kamera',
authenticateWith: 'Mit %{pluginName} verbinden',
authenticateWithTitle: 'Bitte authentifizieren Sie sich mit %{pluginName}, um Dateien auszuwählen',
back: 'Zurück',
addMore: 'Dateien hinzufügen',
browse: 'Suche',
browseFiles: 'Suche',
cancel: 'Abbrechen',
cancelUpload: 'Upload abbrechen',
chooseFiles: 'Dateien auswählen',
closeModal: 'Modal schließen',
companionError: 'Verbindung zu Companion fehlgeschlagen',
complete: 'Fertig',
connectedToInternet: 'Verbunden mit dem Internet',
copyLink: 'Link kopieren',
copyLinkToClipboardFallback: 'Untenstehenden URL kopieren',
copyLinkToClipboardSuccess: 'Link in die Zwischenablage kopiert',
creatingAssembly: 'Upload vorbereiten...',
creatingAssemblyFailed: 'Transloadit: Konnte Assembly nicht erstellen',
dashboardTitle: 'Datei Uploader',
dashboardWindowTitle: 'Datei Uploader Fenster (Esc drücken zum Schließen)',
dataUploadedOfTotal: '%{complete} von %{total}',
done: 'Abgeschlossen',
dropHereOr: 'Dateien können über Drag/Drop oder per %{browse} hinzugefügt werden',
dropHint: 'Dateien können über Drag/Drop hinzugefügt werden',
dropPasteBoth: 'Dateien können über Drag/Drop, Einfügen oder per %{browse} hinzugefügt werden',
dropPasteFiles: 'Dateien können über Drag/Drop, Einfügen oder per %{browse} hinzugefügt werden',
dropPasteFolders: 'Dateien können über Drag/Drop, Einfügen oder per %{browse} hinzugefügt werden',
dropPasteImportBoth: 'Dateien können über Drag/Drop, Einfügen, per %{browse} oder von folgenden Services hinzugefügt werden',
dropPasteImportFiles: 'Dateien können über Drag/Drop, Einfügen, per %{browse} oder von folgenden Services hinzugefügt werden',
dropPasteImportFolders: 'Dateien können über Drag/Drop, Einfügen, per %{browse} oder von folgenden Services hinzugefügt werden',
editFile: 'Datei bearbeiten',
editing: '%{file} bearbeiten',
emptyFolderAdded: 'Keine Dateien konnten hinzugefügt werden, da der Ordner leer war',
encoding: 'Enkodieren...',
enterCorrectUrl: 'Falsche URL: Bitte stellen Sie sicher, dass Sie einen direkten Link zu einer Datei eingeben',
enterUrlToImport: 'URL zum Importieren einer Datei eingeben',
exceedsSize: 'Diese Datei ist größer als die maximal erlaubte Dateigröße',
failedToFetch: 'Companion konnte diese URL nicht verarbeiten - stellen Sie bitte sicher, dass sie korrekt ist',
failedToUpload: 'Fehler beim Upload von Datei %{file}',
fileSource: 'Dateiquelle: %{name}',
filesUploadedOfTotal: {
'0': '%{complete} von %{smart_count} Datei hochgeladen',
'1': '%{complete} von %{smart_count} Dateien hochgeladen',
'2': '%{complete} von %{smart_count} Dateien hochgeladen',
},
filter: 'Filter',
finishEditingFile: 'Dateibearbeitung beenden',
folderAdded: {
'0': '%{smart_count} Datei von %{folder} hinzugefügt',
'1': '%{smart_count} Dateien von %{folder} hinzugefügt',
'2': '%{smart_count} Dateien von %{folder} hinzugefügt',
},
import: 'Import',
importFrom: 'Importieren von %{name}',
loading: 'Laden...',
logOut: 'Ausloggen',
myDevice: 'Mein Gerät',
noCameraDescription: 'Bitte Kamera anschließen, um Bilder oder Videos aufzunehmen',
noCameraTitle: 'Kamera nicht verfügbar',
noDuplicates: 'Datei \'%{fileName}\' existiert bereits und kann nicht erneut hinzugefügt werden',
noFilesFound: 'Sie haben keine Dateien oder Ordner hier',
noInternetConnection: 'Keine Internetverbindung',
noNewAlreadyUploading: 'Während der Upload läuft, können keine weiteren Dateien hinzugefügt werden',
pause: 'Pausieren',
pauseUpload: 'Upload pausieren',
paused: 'Pausiert',
poweredBy: 'Angetrieben von',
processingXFiles: {
'0': '%{smart_count} Datei verarbeiten',
'1': '%{smart_count} Dateien verarbeiten',
'2': '%{smart_count} Dateien verarbeiten',
},
recordingLength: 'Aufnahmedauer %{recording_length}',
removeFile: 'Datei entfernen',
resetFilter: 'Filter zurücksetzen',
resume: 'Fortsetzen',
resumeUpload: 'Upload fortsetzen',
retry: 'Erneut versuchen',
retryUpload: 'Upload erneut versuchen',
saveChanges: 'Änderungen speichern',
selectX: {
'0': 'Wählen Sie %{smart_count}',
'1': 'Wählen Sie %{smart_count}',
'2': 'Wählen Sie %{smart_count}',
},
smile: 'Bitte lächeln!',
startRecording: 'Videoaufnahme starten',
stopRecording: 'Videoaufnahme beenden',
takePicture: 'Ein Foto machen',
timedOut: 'Upload für %{seconds} Sekunden stehen geblieben, breche ab.',
upload: 'Upload',
uploadComplete: 'Upload beendet',
uploadFailed: 'Upload fehlgeschlagen',
uploadPaused: 'Upload pausiert',
uploadXFiles: {
'0': '%{smart_count} Datei hochladen',
'1': '%{smart_count} Dateien hochladen',
'2': '%{smart_count} Dateien hochladen',
},
uploadXNewFiles: {
'0': '+%{smart_count} Datei hochladen',
'1': '+%{smart_count} Dateien hochladen',
'2': '+%{smart_count} Dateien hochladen',
},
uploading: 'Uploading',
uploadingXFiles: {
'0': '%{smart_count} Datei wird hochgeladen',
'1': '%{smart_count} Dateien werden hochgeladen',
'2': '%{smart_count} Dateien werden hochgeladen',
},
xFilesSelected: {
'0': '%{smart_count} Datei ausgewählt',
'1': '%{smart_count} Dateien ausgewählt',
'2': '%{smart_count} Dateien ausgewählt',
},
xMoreFilesAdded: {
'0': '%{smart_count} weitere Datei hinzugefügt',
'1': '%{smart_count} weitere Dateien hinzugefügt',
'2': '%{smart_count} weitere Dateien hinzugefügt',
},
xTimeLeft: '%{time} verbleibend',
youCanOnlyUploadFileTypes: 'Sie können nur folgende Dateitypen hochladen: %{types}',
youCanOnlyUploadX: {
'0': 'Sie können nur %{smart_count} Datei hochladen',
'1': 'Sie können nur %{smart_count} Dateien hochladen',
'2': 'Sie können nur %{smart_count} Dateien hochladen',
},
youHaveToAtLeastSelectX: {
'0': 'Sie müssen mindestens %{smart_count} Datei auswählen',
'1': 'Sie müssen mindestens %{smart_count} Dateien auswählen',
'2': 'Sie müssen mindestens %{smart_count} Dateien auswählen',
},
selectAllFilesFromFolderNamed: 'Wählen Sie alle Dateien aus dem Ordner %{name}',
unselectAllFilesFromFolderNamed: 'Heben Sie die Auswahl aller Dateien aus dem Ordner auf %{name}',
selectFileNamed: 'Datei aussuchen %{name}',
unselectFileNamed: 'Datei abwählen %{name}',
openFolderNamed: 'Ordner öffnen %{name}',
}
de_DE.pluralize = function (n) {
if (n === 1) {
return 0
}
return 1
}
if (typeof window !== 'undefined' && typeof window.Uppy !== 'undefined') {
window.Uppy.locales.de_DE = de_DE
}
module.exports = de_DE
| 1 | 14,317 | I would rather recommend using 'Wird hochgeladen' since the imperative form of 'Lade hoch' confuses me whether this is an action I need to do or whether it's an action that Uppy is currently doing. Just my two cents. | transloadit-uppy | js |
@@ -30,6 +30,8 @@ const (
defaultDataDir = "."
defaultLogLevel = "INFO"
defaultUmask = 0077
+
+ minimumUmask = 0027
)
// RunConfig represents the available configurables for file | 1 | package run
import (
"context"
"crypto/x509"
"encoding/pem"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"github.com/hashicorp/hcl"
"github.com/spiffe/spire/pkg/agent"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/log"
"github.com/spiffe/spire/pkg/common/util"
)
const (
defaultConfigPath = "conf/agent/agent.conf"
defaultSocketPath = "./spire_api"
// TODO: Make my defaults sane
defaultDataDir = "."
defaultLogLevel = "INFO"
defaultUmask = 0077
)
// RunConfig represents the available configurables for file
// and CLI options
type runConfig struct {
AgentConfig agentConfig `hcl:"agent"`
PluginConfigs catalog.PluginConfigMap `hcl:"plugins"`
}
type agentConfig struct {
DataDir string `hcl:"data_dir"`
EnableSDS bool `hcl:"enable_sds"`
LogFile string `hcl:"log_file"`
LogLevel string `hcl:"log_level"`
ServerAddress string `hcl:"server_address"`
ServerPort int `hcl:"server_port"`
SocketPath string `hcl:"socket_path"`
TrustBundlePath string `hcl:"trust_bundle_path"`
TrustDomain string `hcl:"trust_domain"`
JoinToken string `hcl:"join_token"`
ConfigPath string
// Undocumented configurables
ProfilingEnabled bool `hcl:"profiling_enabled"`
ProfilingPort int `hcl:"profiling_port"`
ProfilingFreq int `hcl:"profiling_freq"`
ProfilingNames []string `hcl:"profiling_names"`
Umask string `hcl:"umask"`
}
type RunCLI struct {
}
func (*RunCLI) Help() string {
_, err := parseFlags([]string{"-h"})
return err.Error()
}
func (*RunCLI) Run(args []string) int {
cliConfig, err := parseFlags(args)
if err != nil {
fmt.Println(err.Error())
return 1
}
fileConfig, err := parseFile(cliConfig.AgentConfig.ConfigPath)
if err != nil {
fmt.Println(err.Error())
return 1
}
c := newDefaultConfig()
// Get the plugin configurations from the file
c.PluginConfigs = fileConfig.PluginConfigs
err = mergeConfigs(c, fileConfig, cliConfig)
if err != nil {
fmt.Println(err.Error())
}
err = validateConfig(c)
if err != nil {
fmt.Println(err.Error())
}
agt := agent.New(c)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
util.SignalListener(ctx, cancel)
err = agt.Run(ctx)
if err != nil {
c.Log.Errorf("agent crashed: %v", err)
return 1
}
c.Log.Infof("Agent stopped gracefully")
return 0
}
func (*RunCLI) Synopsis() string {
return "Runs the agent"
}
func parseFile(filePath string) (*runConfig, error) {
c := &runConfig{}
// Return a friendly error if the file is missing
if _, err := os.Stat(filePath); os.IsNotExist(err) {
msg := "could not find config file %s: please use the -config flag"
p, err := filepath.Abs(filePath)
if err != nil {
p = filePath
msg = "could not determine CWD; config file not found at %s: use -config"
}
return nil, fmt.Errorf(msg, p)
}
data, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
hclTree, err := hcl.Parse(string(data))
if err != nil {
return nil, err
}
if err := hcl.DecodeObject(&c, hclTree); err != nil {
return nil, err
}
return c, nil
}
func parseFlags(args []string) (*runConfig, error) {
flags := flag.NewFlagSet("run", flag.ContinueOnError)
c := &runConfig{}
flags.StringVar(&c.AgentConfig.ServerAddress, "serverAddress", "", "IP address or DNS name of the SPIRE server")
flags.IntVar(&c.AgentConfig.ServerPort, "serverPort", 0, "Port number of the SPIRE server")
flags.StringVar(&c.AgentConfig.TrustDomain, "trustDomain", "", "The trust domain that this agent belongs to")
flags.StringVar(&c.AgentConfig.TrustBundlePath, "trustBundle", "", "Path to the SPIRE server CA bundle")
flags.StringVar(&c.AgentConfig.JoinToken, "joinToken", "", "An optional token which has been generated by the SPIRE server")
flags.StringVar(&c.AgentConfig.SocketPath, "socketPath", "", "Location to bind the workload API socket")
flags.StringVar(&c.AgentConfig.DataDir, "dataDir", "", "A directory the agent can use for its runtime data")
flags.StringVar(&c.AgentConfig.LogFile, "logFile", "", "File to write logs to")
flags.StringVar(&c.AgentConfig.LogLevel, "logLevel", "", "DEBUG, INFO, WARN or ERROR")
flags.StringVar(&c.AgentConfig.ConfigPath, "config", defaultConfigPath, "Path to a SPIRE config file")
flags.StringVar(&c.AgentConfig.Umask, "umask", "", "Umask value to use for new files")
err := flags.Parse(args)
if err != nil {
return nil, err
}
return c, nil
}
func mergeConfigs(c *agent.Config, fileConfig, cliConfig *runConfig) error {
// CLI > File, merge fileConfig first
err := mergeConfig(c, fileConfig)
if err != nil {
return err
}
return mergeConfig(c, cliConfig)
}
func mergeConfig(orig *agent.Config, cmd *runConfig) error {
if cmd.AgentConfig.ServerAddress != "" {
_, port, _ := net.SplitHostPort(orig.ServerAddress)
orig.ServerAddress = net.JoinHostPort(cmd.AgentConfig.ServerAddress, port)
}
if cmd.AgentConfig.ServerPort != 0 {
host, _, _ := net.SplitHostPort(orig.ServerAddress)
orig.ServerAddress = net.JoinHostPort(host, strconv.Itoa(cmd.AgentConfig.ServerPort))
}
if cmd.AgentConfig.TrustDomain != "" {
trustDomain, err := idutil.ParseSpiffeID("spiffe://"+cmd.AgentConfig.TrustDomain, idutil.AllowAnyTrustDomain())
if err != nil {
return err
}
orig.TrustDomain = *trustDomain
}
// Parse trust bundle
if cmd.AgentConfig.TrustBundlePath != "" {
bundle, err := parseTrustBundle(cmd.AgentConfig.TrustBundlePath)
if err != nil {
return fmt.Errorf("Error parsing trust bundle: %s", err)
}
orig.TrustBundle = bundle
}
if cmd.AgentConfig.JoinToken != "" {
orig.JoinToken = cmd.AgentConfig.JoinToken
}
if cmd.AgentConfig.SocketPath != "" {
orig.BindAddress.Name = cmd.AgentConfig.SocketPath
}
if cmd.AgentConfig.DataDir != "" {
orig.DataDir = cmd.AgentConfig.DataDir
}
if cmd.AgentConfig.EnableSDS {
orig.EnableSDS = cmd.AgentConfig.EnableSDS
}
// Handle log file and level
if cmd.AgentConfig.LogFile != "" || cmd.AgentConfig.LogLevel != "" {
logLevel := defaultLogLevel
if cmd.AgentConfig.LogLevel != "" {
logLevel = cmd.AgentConfig.LogLevel
}
logger, err := log.NewLogger(logLevel, cmd.AgentConfig.LogFile)
if err != nil {
return fmt.Errorf("Could not open log file %s: %s", cmd.AgentConfig.LogFile, err)
}
orig.Log = logger
}
if cmd.AgentConfig.Umask != "" {
umask, err := strconv.ParseInt(cmd.AgentConfig.Umask, 0, 0)
if err != nil {
return fmt.Errorf("Could not parse umask %s: %s", cmd.AgentConfig.Umask, err)
}
orig.Umask = int(umask)
}
if cmd.AgentConfig.ProfilingEnabled {
orig.ProfilingEnabled = cmd.AgentConfig.ProfilingEnabled
}
if orig.ProfilingEnabled {
if cmd.AgentConfig.ProfilingPort > 0 {
orig.ProfilingPort = cmd.AgentConfig.ProfilingPort
}
if cmd.AgentConfig.ProfilingFreq > 0 {
orig.ProfilingFreq = cmd.AgentConfig.ProfilingFreq
}
if len(cmd.AgentConfig.ProfilingNames) > 0 {
orig.ProfilingNames = cmd.AgentConfig.ProfilingNames
}
}
return nil
}
func validateConfig(c *agent.Config) error {
host, port, _ := net.SplitHostPort(c.ServerAddress)
if host == "" {
return errors.New("ServerAddress is required")
}
if port == "" {
return errors.New("ServerPort is required")
}
if c.TrustDomain.String() == "" {
return errors.New("TrustDomain is required")
}
if c.TrustBundle == nil {
return errors.New("TrustBundle is required")
}
return nil
}
func newDefaultConfig() *agent.Config {
bindAddr := &net.UnixAddr{Name: defaultSocketPath, Net: "unix"}
// log.NewLogger() cannot return error when using STDOUT
logger, _ := log.NewLogger(defaultLogLevel, "")
return &agent.Config{
BindAddress: bindAddr,
DataDir: defaultDataDir,
Log: logger,
Umask: defaultUmask,
}
}
func parseTrustBundle(path string) ([]*x509.Certificate, error) {
pemData, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var data []byte
for len(pemData) > 1 {
var block *pem.Block
block, pemData = pem.Decode(pemData)
if block == nil && len(data) < 1 {
return nil, errors.New("no certificates found")
}
if block == nil {
return nil, errors.New("encountered unknown data in trust bundle")
}
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("non-certificate type %v found in trust bundle", block.Type)
}
data = append(data, block.Bytes...)
}
bundle, err := x509.ParseCertificates(data)
if err != nil {
return nil, fmt.Errorf("parse certificates from %v, %v", path, err)
}
return bundle, nil
}
| 1 | 10,309 | Hmm I would have expected this to go away? What role does it play now that we have a minimum? | spiffe-spire | go |
@@ -3,12 +3,6 @@
const MongoNetworkError = require('./core').MongoNetworkError;
const mongoErrorContextSymbol = require('./core').mongoErrorContextSymbol;
-const GET_MORE_NON_RESUMABLE_CODES = new Set([
- 136, // CappedPositionLost
- 237, // CursorKilled
- 11601 // Interrupted
-]);
-
// From spec@https://github.com/mongodb/specifications/blob/f93d78191f3db2898a59013a7ed5650352ef6da8/source/change-streams/change-streams.rst#resumable-error
const GET_MORE_RESUMABLE_CODES = new Set([
6, // HostUnreachable | 1 | 'use strict';
const MongoNetworkError = require('./core').MongoNetworkError;
const mongoErrorContextSymbol = require('./core').mongoErrorContextSymbol;
const GET_MORE_NON_RESUMABLE_CODES = new Set([
136, // CappedPositionLost
237, // CursorKilled
11601 // Interrupted
]);
// From spec@https://github.com/mongodb/specifications/blob/f93d78191f3db2898a59013a7ed5650352ef6da8/source/change-streams/change-streams.rst#resumable-error
const GET_MORE_RESUMABLE_CODES = new Set([
6, // HostUnreachable
7, // HostNotFound
89, // NetworkTimeout
91, // ShutdownInProgress
189, // PrimarySteppedDown
262, // ExceededTimeLimit
9001, // SocketException
10107, // NotMaster
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
13435, // NotMasterNoSlaveOk
13436, // NotMasterOrSecondary
63, // StaleShardVersion
150, // StaleEpoch
13388, // StaleConfig
234, // RetryChangeStream
133 // FailedToSatisfyReadPreference
]);
function isGetMoreError(error) {
if (error[mongoErrorContextSymbol]) {
return error[mongoErrorContextSymbol].isGetMore;
}
}
function isResumableError(error, wireVersion) {
if (!isGetMoreError(error)) {
return false;
}
if (error instanceof MongoNetworkError) {
return true;
}
if (wireVersion >= 9) {
return error.hasErrorLabel('ResumableChangeStreamError');
}
return (
GET_MORE_RESUMABLE_CODES.has(error.code) &&
!error.hasErrorLabel('NonResumableChangeStreamError')
);
}
module.exports = { GET_MORE_NON_RESUMABLE_CODES, GET_MORE_RESUMABLE_CODES, isResumableError };
| 1 | 17,291 | if we are removing this, is it useful to have the `isGetMoreError` message below? If that's not required, can we also remove the `mongoErrorContextSymbol` outright? | mongodb-node-mongodb-native | js |
@@ -59,11 +59,7 @@ public:
void helper_block_for_at_least_entries(
uint32_t amount)
{
- std::unique_lock<std::mutex> lck(*xml_mutex_);
- mock_consumer->cv().wait(lck, [this, amount]
- {
- return mock_consumer->ConsumedEntriesSize_nts() >= amount;
- });
+ mock_consumer->wait_for_at_least_entries(amount);
}
eprosima::fastdds::dds::MockConsumer* mock_consumer; | 1 | // Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <fastrtps/rtps/builtin/data/WriterProxyData.h>
#include <fastrtps/rtps/builtin/data/ReaderProxyData.h>
#include <fastrtps/xmlparser/XMLEndpointParser.h>
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/dds/log/OStreamConsumer.hpp>
#include <fastdds/dds/log/FileConsumer.hpp>
#include <fastdds/dds/log/StdoutConsumer.hpp>
#include <fastdds/dds/log/StdoutErrConsumer.hpp>
#include "../logging/mock/MockConsumer.h"
#include <tinyxml2.h>
#include <gtest/gtest.h>
#include <string>
#include <fstream>
#include <sstream>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using namespace ::testing;
using eprosima::fastrtps::xmlparser::XMLP_ret;
using eprosima::fastrtps::xmlparser::XMLEndpointParser;
using eprosima::fastrtps::xmlparser::StaticRTPSParticipantInfo;
using eprosima::fastrtps::rtps::ReaderProxyData;
using eprosima::fastrtps::rtps::WriterProxyData;
using eprosima::fastdds::dds::Log;
using eprosima::fastdds::dds::LogConsumer;
class XMLEndpointParserTests : public ::testing::Test
{
public:
XMLEndpointParserTests()
{
}
~XMLEndpointParserTests()
{
eprosima::fastdds::dds::Log::Reset();
eprosima::fastdds::dds::Log::KillThread();
}
void helper_block_for_at_least_entries(
uint32_t amount)
{
std::unique_lock<std::mutex> lck(*xml_mutex_);
mock_consumer->cv().wait(lck, [this, amount]
{
return mock_consumer->ConsumedEntriesSize_nts() >= amount;
});
}
eprosima::fastdds::dds::MockConsumer* mock_consumer;
mutable std::mutex* xml_mutex_;
XMLEndpointParser* mp_edpXML;
protected:
void SetUp() override
{
xml_mutex_ = new std::mutex();
mp_edpXML = new xmlparser::XMLEndpointParser();
}
void TearDown() override
{
delete xml_mutex_;
delete mp_edpXML;
xml_mutex_ = nullptr;
mp_edpXML = nullptr;
}
};
/*
* This test checks the negative cases of the XMLEndpointParser::loadXMLFileNegativeClauses method.
* 1. Check passing non existant file.
* 2. Check passing an empty file.
*/
TEST_F(XMLEndpointParserTests, loadXMLFileNegativeClauses)
{
std::string filename;
filename = "bad_filename";
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLFile(filename));
filename = "wrong.xml";
const char* content = "<bad_element></bad_element>";
std::ofstream out(filename);
out << content;
out.close();
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLFile(filename));
remove(filename.c_str());
}
/*
* This test checks the method XMLEndpointParser::loadXMLNode.
* 1. Check the return for a correct XML
* 2. Check the return for an incorrect XML
*/
TEST_F(XMLEndpointParserTests, loadXMLNode)
{
tinyxml2::XMLDocument xml_doc;
{
// Correct XML
const char* xml =
"\
<staticdiscovery>\
<participant>\
<name>HelloWorldSubscriber</name>\
<reader>\
<userId>3</userId>\
<entityID>4</entityID>\
<expectsInlineQos>true</expectsInlineQos>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<partitionQos>HelloPartition</partitionQos>\
<partitionQos>WorldPartition</partitionQos>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<unicastLocator address=\"10.47.8.30\" port=\"6000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
<reliabilityQos>BEST_EFFORT_RELIABILITY_QOS</reliabilityQos>\
<durabilityQos>VOLATILE_DURABILITY_QOS</durabilityQos>\
<ownershipQos kind=\"SHARED_OWNERSHIP_QOS\"/>\
<livelinessQos kind=\"AUTOMATIC_LIVELINESS_QOS\" leaseDuration_ms=\"1000\"/>\
</reader>\
</participant>\
<participant>\
<name>HelloWorldPublisher</name>\
<writer>\
<unicastLocator address=\"192.168.0.120\" port=\"9000\"/>\
<unicastLocator address=\"10.47.8.31\" port=\"8000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
<userId>5</userId>\
<entityID>6</entityID>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<partitionQos>HelloPartition</partitionQos>\
<partitionQos>WorldPartition</partitionQos>\
<reliabilityQos>BEST_EFFORT_RELIABILITY_QOS</reliabilityQos>\
<durabilityQos>VOLATILE_DURABILITY_QOS</durabilityQos>\
<ownershipQos kind=\"SHARED_OWNERSHIP_QOS\" strength=\"50\"/>\
<livelinessQos kind=\"AUTOMATIC_LIVELINESS_QOS\" leaseDuration_ms=\"1000\"/>\
</writer>\
</participant>\
</staticdiscovery>\
";
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLNode(xml_doc));
}
{
// Wrong XML
const char* xml = "<bad_xml></bad_xml>";
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLNode(xml_doc));
}
}
/*
* This test checks the XMLEndpointParser::loadXMLReaderEndpoint method.
* 1. Check incorrect values for the writer tag
* 2. Check incorrect values for the reader tag
* 3. Check an incorrect tag
*/
TEST_F(XMLEndpointParserTests, loadXMLParticipantEndpoint)
{
tinyxml2::XMLDocument xml_doc;
tinyxml2::XMLElement* titleElement;
mock_consumer = new eprosima::fastdds::dds::MockConsumer();
Log::RegisterConsumer(std::unique_ptr<LogConsumer>(mock_consumer));
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml =
"\
<participant>\
<name>HelloWorldSubscriber</name>\
<reader>\
<userId>3</userId>\
<entityID>4</entityID>\
<expectsInlineQos>true</expectsInlineQos>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<topic name=\"HelloWorldTopic\" dataType=\"HelloWorld\" kind=\"WITH_KEY\"/>\
<partitionQos>HelloPartition</partitionQos>\
<partitionQos>WorldPartition</partitionQos>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<unicastLocator address=\"10.47.8.30\" port=\"6000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
<reliabilityQos>BEST_EFFORT_RELIABILITY_QOS</reliabilityQos>\
<durabilityQos>VOLATILE_DURABILITY_QOS</durabilityQos>\
<ownershipQos kind=\"SHARED_OWNERSHIP_QOS\"/>\
<livelinessQos kind=\"AUTOMATIC_LIVELINESS_QOS\" leaseDuration_ms=\"1000\"/>\
</reader>\
</participant>\
";
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
mp_edpXML->loadXMLParticipantEndpoint(titleElement, pdata);
EXPECT_EQ(pdata->m_RTPSParticipantName, "HelloWorldSubscriber");
EXPECT_EQ(pdata->m_readers.size(), (size_t)1);
// Delete the ReaderProxyData created inside loadXMLParticipantEndpoint
delete pdata->m_readers[0];
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml_p =
"\
<participant>\
<%s>bad_value</%s>\
</participant>\
";
char xml[500];
std::vector<std::string> test_tags = {"reader", "writer", "bad_element"};
for (const std::string& tag : test_tags)
{
sprintf(xml, xml_p, tag.c_str(), tag.c_str());
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
mp_edpXML->loadXMLParticipantEndpoint(titleElement, pdata);
}
helper_block_for_at_least_entries(5);
auto consumed_entries = mock_consumer->ConsumedEntries();
// Expect 3 log error.
uint32_t num_errors = 0;
for (const auto& entry : consumed_entries)
{
if (entry.kind == Log::Kind::Error)
{
num_errors++;
}
}
EXPECT_EQ(num_errors, 5u);
delete pdata;
}
}
/*
* This test checks the XMLEndpointParser::loadXMLReaderEndpoint method.
* 1. Check correct parsing of the XML int ReaderProxyData
* 2. Check incorrect values for the livelinesQos
* 3. Check incorrect values for the ownershipQos
* 4. Check an incorrect value for tags with parsable content
* 5. Check an incorrect value for tags with parsable attributes
*/
TEST_F(XMLEndpointParserTests, loadXMLReaderEndpoint)
{
tinyxml2::XMLDocument xml_doc;
tinyxml2::XMLElement* titleElement;
int user_id = 3;
int entity_id = 4;
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml =
"\
<reader>\
<userId>3</userId>\
<entityID>4</entityID>\
<expectsInlineQos>true</expectsInlineQos>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<topic name=\"HelloWorldTopic\" dataType=\"HelloWorld\" kind=\"WITH_KEY\"/>\
<partitionQos>HelloPartition</partitionQos>\
<partitionQos>WorldPartition</partitionQos>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<unicastLocator address=\"10.47.8.30\" port=\"6000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
<reliabilityQos>BEST_EFFORT_RELIABILITY_QOS</reliabilityQos>\
<durabilityQos>VOLATILE_DURABILITY_QOS</durabilityQos>\
<ownershipQos kind=\"SHARED_OWNERSHIP_QOS\"/>\
<livelinessQos kind=\"AUTOMATIC_LIVELINESS_QOS\" leaseDuration_ms=\"1000\"/>\
</reader>\
";
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLReaderEndpoint(titleElement, pdata));
// Topic attributes
EXPECT_EQ(pdata->m_readers[0]->topicName(), "HelloWorldTopic");
EXPECT_EQ(pdata->m_readers[0]->topicKind(), TopicKind_t::WITH_KEY);
EXPECT_EQ(pdata->m_readers[0]->typeName(), "HelloWorld");
EXPECT_EQ(pdata->m_readers[0]->has_locators(), true);
// Locators
Locator_t uni_loc;
IPLocator::setIPv4(uni_loc, "192.168.0.128");
uni_loc.port = static_cast<uint16_t>(5000);
EXPECT_EQ(pdata->m_readers[0]->remote_locators().unicast[0], uni_loc);
Locator_t multi_loc;
IPLocator::setIPv4(multi_loc, "239.255.1.1");
multi_loc.port = static_cast<uint16_t>(7000);
EXPECT_EQ(pdata->m_readers[0]->remote_locators().multicast[0], multi_loc);
// qos
EXPECT_EQ(pdata->m_readers[0]->m_qos.m_reliability.kind, BEST_EFFORT_RELIABILITY_QOS);
EXPECT_EQ(pdata->m_readers[0]->m_qos.m_durability.kind, VOLATILE_DURABILITY_QOS);
EXPECT_EQ(pdata->m_readers[0]->m_qos.m_ownership.kind, SHARED_OWNERSHIP_QOS);
EXPECT_EQ(pdata->m_readers[0]->m_qos.m_liveliness.kind, AUTOMATIC_LIVELINESS_QOS);
// Delete the ReaderProxyData created inside loadXMLParticipantEndpoint
delete pdata->m_readers[0];
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml_p =
"\
<reader>\
<userId>%d</userId>\
<entityID>%d</entityID>\
<livelinessQos kind=\"%s\" leaseDuration_ms=\"%s\"/>\
</reader>\
";
char xml[500];
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "AUTOMATIC_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_PARTICIPANT_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "INF");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "0");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
// Delete the WriterProxyData created inside loadXMLWriterEndpoint
for (auto wdata : pdata->m_writers)
{
delete wdata;
}
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml_p =
"\
<reader>\
<userId>%d</userId>\
<entityID>%d</entityID>\
<ownershipQos kind=\"%s\"/>\
</reader>\
";
char xml[500];
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "SHARED_OWNERSHIP_QOS");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLReaderEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "EXCLUSIVE_OWNERSHIP_QOS");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLReaderEndpoint(titleElement, pdata));
// Delete the ReaderProxyData created inside loadXMLReaderEndpoint
for (auto rdata : pdata->m_readers)
{
delete rdata;
}
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Tags with child tags
const char* xml_content =
"\
<reader>\
<%s>bad_value</%s>\
</reader>\
";
char xml[500];
std::vector<std::string> content_tags =
{
"userId",
"entityID",
"expectsInlineQos",
"topicName",
"topicDataType",
"topicKind",
"partitionQos",
"partitionQos",
"reliabilityQos",
"durabilityQos",
"bad_element"
};
for (const std::string& tag : content_tags)
{
sprintf(xml, xml_content, tag.c_str(), tag.c_str());
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLReaderEndpoint(titleElement, pdata));
}
delete pdata;
}
{
// Tags with attributes
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
const char* xml_attribute =
"\
<reader>\
<%s bad_attribute=\"bad_value\"/>\
</reader>\
";
char xml[500];
std::vector<std::string> attribute_tags =
{
"unicastLocator",
"unicastLocator",
"multicastLocator",
"ownershipQos",
"livelinessQos"
};
for (const std::string& tag : attribute_tags)
{
sprintf(xml, xml_attribute, tag.c_str());
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLReaderEndpoint(titleElement, pdata));
}
delete pdata;
}
}
/*
* This test checks the XMLEndpointParser::loadXMLWriterEndpoint method.
* 1. Check correct parsing of the XML int WriterProxyData
* 2. Check incorrect values for the livelinesQos
* 3. Check incorrect values for the ownershipQos
* 4. Check an incorrect value for tags with parsable content
* 5. Check an incorrect value for tags with parsable attributes
*/
TEST_F(XMLEndpointParserTests, loadXMLWriterEndpoint)
{
tinyxml2::XMLDocument xml_doc;
tinyxml2::XMLElement* titleElement;
int user_id = 3;
int entity_id = 4;
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml =
"\
<writer>\
<userId>3</userId>\
<entityID>4</entityID>\
<expectsInlineQos>true</expectsInlineQos>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>NO_KEY</topicKind>\
<topic name=\"HelloWorldTopic\" dataType=\"HelloWorld\" kind=\"NO_KEY\"/>\
<partitionQos>HelloPartition</partitionQos>\
<partitionQos>WorldPartition</partitionQos>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<unicastLocator address=\"10.47.8.30\" port=\"6000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
<reliabilityQos>BEST_EFFORT_RELIABILITY_QOS</reliabilityQos>\
<durabilityQos>VOLATILE_DURABILITY_QOS</durabilityQos>\
<ownershipQos kind=\"SHARED_OWNERSHIP_QOS\"/>\
<livelinessQos kind=\"AUTOMATIC_LIVELINESS_QOS\" leaseDuration_ms=\"1000\"/>\
</writer>\
";
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
// Topic attributes
EXPECT_EQ(pdata->m_writers[0]->topicName(), "HelloWorldTopic");
EXPECT_EQ(pdata->m_writers[0]->topicKind(), TopicKind_t::NO_KEY);
EXPECT_EQ(pdata->m_writers[0]->typeName(), "HelloWorld");
EXPECT_EQ(pdata->m_writers[0]->has_locators(), true);
// Locators
Locator_t uni_loc;
IPLocator::setIPv4(uni_loc, "192.168.0.128");
uni_loc.port = static_cast<uint16_t>(5000);
EXPECT_EQ(pdata->m_writers[0]->remote_locators().unicast[0], uni_loc);
Locator_t multi_loc;
IPLocator::setIPv4(multi_loc, "239.255.1.1");
multi_loc.port = static_cast<uint16_t>(7000);
EXPECT_EQ(pdata->m_writers[0]->remote_locators().multicast[0], multi_loc);
// qos
EXPECT_EQ(pdata->m_writers[0]->m_qos.m_reliability.kind, BEST_EFFORT_RELIABILITY_QOS);
EXPECT_EQ(pdata->m_writers[0]->m_qos.m_durability.kind, VOLATILE_DURABILITY_QOS);
EXPECT_EQ(pdata->m_writers[0]->m_qos.m_ownership.kind, SHARED_OWNERSHIP_QOS);
EXPECT_EQ(pdata->m_writers[0]->m_qos.m_liveliness.kind, AUTOMATIC_LIVELINESS_QOS);
// Delete the WriterProxyData created inside loadXMLWriterEndpoint
delete pdata->m_writers[0];
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml_p =
"\
<writer>\
<userId>%d</userId>\
<entityID>%d</entityID>\
<livelinessQos kind=\"%s\" leaseDuration_ms=\"%s\"/>\
</writer>\
";
char xml[500];
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "AUTOMATIC_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_PARTICIPANT_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "1000");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "INF");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "MANUAL_BY_TOPIC_LIVELINESS_QOS", "0");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
// Delete the WriterProxyData created inside loadXMLWriterEndpoint
for (auto wdata : pdata->m_writers)
{
delete wdata;
}
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Parametrized XML
const char* xml_p =
"\
<writer>\
<userId>%d</userId>\
<entityID>%d</entityID>\
<ownershipQos kind=\"%s\"/>\
</writer>\
";
char xml[500];
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "SHARED_OWNERSHIP_QOS");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
user_id += 2;
entity_id += 2;
sprintf(xml, xml_p, user_id, entity_id, "EXCLUSIVE_OWNERSHIP_QOS");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
// Delete the WriterProxyData created inside loadXMLWriterEndpoint
for (auto wdata : pdata->m_writers)
{
delete wdata;
}
// Then delete StaticRTPSParticipantInfo
delete pdata;
}
{
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
// Tags with child tags
const char* xml_content =
"\
<writer>\
<%s>bad_value</%s>\
</writer>\
";
char xml[500];
std::vector<std::string> content_tags =
{
"userId",
"entityID",
"expectsInlineQos",
"topicName",
"topicDataType",
"topicKind",
"partitionQos",
"partitionQos",
"reliabilityQos",
"durabilityQos",
"bad_element"
};
for (const std::string& tag : content_tags)
{
sprintf(xml, xml_content, tag.c_str(), tag.c_str());
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
}
delete pdata;
}
{
// Tags with attributes
StaticRTPSParticipantInfo* pdata = new StaticRTPSParticipantInfo();
const char* xml_attribute =
"\
<writer>\
<%s bad_attribute=\"bad_value\"/>\
</writer>\
";
char xml[500];
std::vector<std::string> attribute_tags =
{
"unicastLocator",
"unicastLocator",
"multicastLocator",
"ownershipQos",
"livelinessQos"
};
for (const std::string& tag : attribute_tags)
{
sprintf(xml, xml_attribute, tag.c_str());
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
titleElement = xml_doc.FirstChildElement();
EXPECT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->loadXMLWriterEndpoint(titleElement, pdata));
}
delete pdata;
}
}
/*
* This test checks the XMLEndpointParser::lookforWriter method. First load a writer to the parser with loadXMLNode,
* then retrieve it with its id. Then check the returned writer data is correct.
*/
TEST_F(XMLEndpointParserTests, lookforReader)
{
tinyxml2::XMLDocument xml_doc;
ReaderProxyData* rdataptr = nullptr;
const char* xml =
"\
<staticdiscovery>\
<participant>\
<name>HelloWorldPublisher</name>\
<reader>\
<userId>3</userId>\
<entityID>4</entityID>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
</reader>\
</participant>\
</staticdiscovery>\
";
// Load writer with known properties
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLNode(xml_doc));
ASSERT_EQ(XMLP_ret::XML_OK, mp_edpXML->lookforReader("HelloWorldPublisher", 3, &rdataptr));
ASSERT_NE(rdataptr, nullptr);
EXPECT_EQ(rdataptr->topicName(), "HelloWorldTopic");
EXPECT_EQ(rdataptr->topicKind(), TopicKind_t::WITH_KEY);
EXPECT_EQ(rdataptr->typeName(), "HelloWorld");
EXPECT_EQ(rdataptr->has_locators(), true);
// Locators
Locator_t uni_loc;
IPLocator::setIPv4(uni_loc, "192.168.0.128");
uni_loc.port = static_cast<uint16_t>(5000);
EXPECT_EQ(rdataptr->remote_locators().unicast[0], uni_loc);
Locator_t multi_loc;
IPLocator::setIPv4(multi_loc, "239.255.1.1");
multi_loc.port = static_cast<uint16_t>(7000);
EXPECT_EQ(rdataptr->remote_locators().multicast[0], multi_loc);
ASSERT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->lookforReader("WrongName", 15, &rdataptr));
}
/*
* This test checks the XMLEndpointParser::lookforWriter method. First load a writer to the parser with loadXMLNode,
* then retrieve it with its id. Then check the returned writer data is correct.
*/
TEST_F(XMLEndpointParserTests, lookforWriter)
{
tinyxml2::XMLDocument xml_doc;
WriterProxyData* wdataptr = nullptr;
const char* xml =
"\
<staticdiscovery>\
<participant>\
<name>HelloWorldPublisher</name>\
<writer>\
<userId>3</userId>\
<entityID>4</entityID>\
<topicName>HelloWorldTopic</topicName>\
<topicDataType>HelloWorld</topicDataType>\
<topicKind>WITH_KEY</topicKind>\
<unicastLocator address=\"192.168.0.128\" port=\"5000\"/>\
<multicastLocator address=\"239.255.1.1\" port=\"7000\"/>\
</writer>\
</participant>\
</staticdiscovery>\
";
// Load writer with known properties
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
EXPECT_EQ(XMLP_ret::XML_OK, mp_edpXML->loadXMLNode(xml_doc));
ASSERT_EQ(XMLP_ret::XML_OK, mp_edpXML->lookforWriter("HelloWorldPublisher", 3, &wdataptr));
EXPECT_EQ(wdataptr->topicName(), "HelloWorldTopic");
EXPECT_EQ(wdataptr->topicKind(), TopicKind_t::WITH_KEY);
EXPECT_EQ(wdataptr->typeName(), "HelloWorld");
EXPECT_EQ(wdataptr->has_locators(), true);
// Locators
Locator_t uni_loc;
IPLocator::setIPv4(uni_loc, "192.168.0.128");
uni_loc.port = static_cast<uint16_t>(5000);
EXPECT_EQ(wdataptr->remote_locators().unicast[0], uni_loc);
Locator_t multi_loc;
IPLocator::setIPv4(multi_loc, "239.255.1.1");
multi_loc.port = static_cast<uint16_t>(7000);
EXPECT_EQ(wdataptr->remote_locators().multicast[0], multi_loc);
ASSERT_EQ(XMLP_ret::XML_ERROR, mp_edpXML->lookforWriter("WrongName", 15, &wdataptr));
}
int main(
int argc,
char** argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 1 | 23,457 | I think this mutex is only used here. Remove it. | eProsima-Fast-DDS | cpp |
@@ -181,7 +181,16 @@ public class IndexSearcher {
}
/** Runs searches for each segment separately, using the
- * provided Executor. NOTE:
+ * provided Executor. The passed in Executor will also be
+ * used by LRUQueryCache (if enabled) to perform asynchronous
+ * query caching.
+ * If a task is rejected by the host Executor, the failed task
+ * will then be executed on the caller thread. This is done to
+ * ensure that a query succeeds, albeit with a higher latency.
+ * If a user wishes to modify the said behaviour, they can either
+ * handle the exception in the provided Executor, or override
+ * the said method in a custom extension of IndexSearcher.
+ * NOTE:
* if you are using {@link NIOFSDirectory}, do not use
* the shutdownNow method of ExecutorService as this uses
* Thread.interrupt under-the-hood which can silently | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.RejectedExecutionException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements search over a single IndexReader.
*
* <p>Applications usually need only call the inherited
* {@link #search(Query,int)} method. For
* performance reasons, if your index is unchanging, you
* should share a single IndexSearcher instance across
* multiple searches instead of creating a new one
* per-search. If your index has changed and you wish to
* see the changes reflected in searching, you should
* use {@link DirectoryReader#openIfChanged(DirectoryReader)}
* to obtain a new reader and
* then create a new IndexSearcher from that. Also, for
* low-latency turnaround it's best to use a near-real-time
* reader ({@link DirectoryReader#open(IndexWriter)}).
* Once you have a new {@link IndexReader}, it's relatively
* cheap to create a new IndexSearcher from it.
*
* <p><b>NOTE</b>: The {@link #search} and {@link #searchAfter} methods are
* configured to only count top hits accurately up to {@code 1,000} and may
* return a {@link TotalHits.Relation lower bound} of the hit count if the
* hit count is greater than or equal to {@code 1,000}. On queries that match
* lots of documents, counting the number of hits may take much longer than
* computing the top hits so this trade-off allows to get some minimal
* information about the hit count without slowing down search too much. The
* {@link TopDocs#scoreDocs} array is always accurate however. If this behavior
* doesn't suit your needs, you should create collectors manually with either
* {@link TopScoreDocCollector#create} or {@link TopFieldCollector#create} and
* call {@link #search(Query, Collector)}.
*
* <a name="thread-safety"></a><p><b>NOTE</b>: <code>{@link
* IndexSearcher}</code> instances are completely
* thread safe, meaning multiple threads can call any of its
* methods, concurrently. If your application requires
* external synchronization, you should <b>not</b>
* synchronize on the <code>IndexSearcher</code> instance;
* use your own (non-Lucene) objects instead.</p>
*/
public class IndexSearcher {
static int maxClauseCount = 1024;
private static QueryCache DEFAULT_QUERY_CACHE;
private static QueryCachingPolicy DEFAULT_CACHING_POLICY = new UsageTrackingQueryCachingPolicy();
static {
final int maxCachedQueries = 1000;
// min of 32MB or 5% of the heap size
final long maxRamBytesUsed = Math.min(1L << 25, Runtime.getRuntime().maxMemory() / 20);
DEFAULT_QUERY_CACHE = new LRUQueryCache(maxCachedQueries, maxRamBytesUsed);
}
/**
* By default we count hits accurately up to 1000. This makes sure that we
* don't spend most time on computing hit counts
*/
private static final int TOTAL_HITS_THRESHOLD = 1000;
/**
* Thresholds for index slice allocation logic. To change the default, extend
* <code> IndexSearcher</code> and use custom values
*/
private static final int MAX_DOCS_PER_SLICE = 250_000;
private static final int MAX_SEGMENTS_PER_SLICE = 5;
final IndexReader reader; // package private for testing!
// NOTE: these members might change in incompatible ways
// in the next release
protected final IndexReaderContext readerContext;
protected final List<LeafReaderContext> leafContexts;
/** used with executor - each slice holds a set of leafs executed within one thread */
private final LeafSlice[] leafSlices;
// These are only used for multi-threaded search
private final Executor executor;
// the default Similarity
private static final Similarity defaultSimilarity = new BM25Similarity();
private QueryCache queryCache = DEFAULT_QUERY_CACHE;
private QueryCachingPolicy queryCachingPolicy = DEFAULT_CACHING_POLICY;
/**
* Expert: returns a default Similarity instance.
* In general, this method is only called to initialize searchers and writers.
* User code and query implementations should respect
* {@link IndexSearcher#getSimilarity()}.
* @lucene.internal
*/
public static Similarity getDefaultSimilarity() {
return defaultSimilarity;
}
/**
* Expert: Get the default {@link QueryCache} or {@code null} if the cache is disabled.
* @lucene.internal
*/
public static QueryCache getDefaultQueryCache() {
return DEFAULT_QUERY_CACHE;
}
/**
* Expert: set the default {@link QueryCache} instance.
* @lucene.internal
*/
public static void setDefaultQueryCache(QueryCache defaultQueryCache) {
DEFAULT_QUERY_CACHE = defaultQueryCache;
}
/**
* Expert: Get the default {@link QueryCachingPolicy}.
* @lucene.internal
*/
public static QueryCachingPolicy getDefaultQueryCachingPolicy() {
return DEFAULT_CACHING_POLICY;
}
/**
* Expert: set the default {@link QueryCachingPolicy} instance.
* @lucene.internal
*/
public static void setDefaultQueryCachingPolicy(QueryCachingPolicy defaultQueryCachingPolicy) {
DEFAULT_CACHING_POLICY = defaultQueryCachingPolicy;
}
/** The Similarity implementation used by this searcher. */
private Similarity similarity = defaultSimilarity;
/** Creates a searcher searching the provided index. */
public IndexSearcher(IndexReader r) {
this(r, null);
}
/** Runs searches for each segment separately, using the
* provided Executor. NOTE:
* if you are using {@link NIOFSDirectory}, do not use
* the shutdownNow method of ExecutorService as this uses
* Thread.interrupt under-the-hood which can silently
* close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @lucene.experimental */
public IndexSearcher(IndexReader r, Executor executor) {
this(r.getContext(), executor);
}
/**
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
* <p>
* Given a non-<code>null</code> {@link Executor} this method runs
* searches for each segment separately, using the provided Executor.
* NOTE: if you are using {@link NIOFSDirectory}, do not use the shutdownNow method of
* ExecutorService as this uses Thread.interrupt under-the-hood which can
* silently close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @see IndexReaderContext
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context, Executor executor) {
assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
reader = context.reader();
this.executor = executor;
this.readerContext = context;
leafContexts = context.leaves();
this.leafSlices = executor == null ? null : slices(leafContexts);
}
/**
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
*
* @see IndexReaderContext
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context) {
this(context, null);
}
/** Return the maximum number of clauses permitted, 1024 by default.
* Attempts to add more than the permitted number of clauses cause {@link
* TooManyClauses} to be thrown.
* @see #setMaxClauseCount(int)
*/
public static int getMaxClauseCount() { return maxClauseCount; }
/**
* Set the maximum number of clauses permitted per Query.
* Default value is 1024.
*/
public static void setMaxClauseCount(int value) {
if (value < 1) {
throw new IllegalArgumentException("maxClauseCount must be >= 1");
}
maxClauseCount = value;
}
/**
* Set the {@link QueryCache} to use when scores are not needed.
* A value of {@code null} indicates that query matches should never be
* cached. This method should be called <b>before</b> starting using this
* {@link IndexSearcher}.
* <p>NOTE: When using a query cache, queries should not be modified after
* they have been passed to IndexSearcher.
* @see QueryCache
* @lucene.experimental
*/
public void setQueryCache(QueryCache queryCache) {
this.queryCache = queryCache;
}
/**
* Return the query cache of this {@link IndexSearcher}. This will be either
* the {@link #getDefaultQueryCache() default query cache} or the query cache
* that was last set through {@link #setQueryCache(QueryCache)}. A return
* value of {@code null} indicates that caching is disabled.
* @lucene.experimental
*/
public QueryCache getQueryCache() {
return queryCache;
}
/**
* Set the {@link QueryCachingPolicy} to use for query caching.
* This method should be called <b>before</b> starting using this
* {@link IndexSearcher}.
* @see QueryCachingPolicy
* @lucene.experimental
*/
public void setQueryCachingPolicy(QueryCachingPolicy queryCachingPolicy) {
this.queryCachingPolicy = Objects.requireNonNull(queryCachingPolicy);
}
/**
* Return the query cache of this {@link IndexSearcher}. This will be either
* the {@link #getDefaultQueryCachingPolicy() default policy} or the policy
* that was last set through {@link #setQueryCachingPolicy(QueryCachingPolicy)}.
* @lucene.experimental
*/
public QueryCachingPolicy getQueryCachingPolicy() {
return queryCachingPolicy;
}
/**
* Expert: Creates an array of leaf slices each holding a subset of the given leaves.
* Each {@link LeafSlice} is executed in a single thread. By default, segments with more than
* MAX_DOCS_PER_SLICE will get their own thread
*/
protected LeafSlice[] slices(List<LeafReaderContext> leaves) {
return slices(leaves, MAX_DOCS_PER_SLICE, MAX_SEGMENTS_PER_SLICE);
}
/**
* Static method to segregate LeafReaderContexts amongst multiple slices
*/
public static LeafSlice[] slices (List<LeafReaderContext> leaves, int maxDocsPerSlice,
int maxSegmentsPerSlice) {
// Make a copy so we can sort:
List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves);
// Sort by maxDoc, descending:
Collections.sort(sortedLeaves,
Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc())));
final List<List<LeafReaderContext>> groupedLeaves = new ArrayList<>();
long docSum = 0;
List<LeafReaderContext> group = null;
for (LeafReaderContext ctx : sortedLeaves) {
if (ctx.reader().maxDoc() > maxDocsPerSlice) {
assert group == null;
groupedLeaves.add(Collections.singletonList(ctx));
} else {
if (group == null) {
group = new ArrayList<>();
group.add(ctx);
groupedLeaves.add(group);
} else {
group.add(ctx);
}
docSum += ctx.reader().maxDoc();
if (group.size() >= maxSegmentsPerSlice || docSum > maxDocsPerSlice) {
group = null;
docSum = 0;
}
}
}
LeafSlice[] slices = new LeafSlice[groupedLeaves.size()];
int upto = 0;
for (List<LeafReaderContext> currentLeaf : groupedLeaves) {
slices[upto] = new LeafSlice(currentLeaf);
++upto;
}
return slices;
}
/** Return the {@link IndexReader} this searches. */
public IndexReader getIndexReader() {
return reader;
}
/**
* Sugar for <code>.getIndexReader().document(docID)</code>
* @see IndexReader#document(int)
*/
public Document doc(int docID) throws IOException {
return reader.document(docID);
}
/**
* Sugar for <code>.getIndexReader().document(docID, fieldVisitor)</code>
* @see IndexReader#document(int, StoredFieldVisitor)
*/
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
reader.document(docID, fieldVisitor);
}
/**
* Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code>
* @see IndexReader#document(int, Set)
*/
public Document doc(int docID, Set<String> fieldsToLoad) throws IOException {
return reader.document(docID, fieldsToLoad);
}
/** Expert: Set the Similarity implementation used by this IndexSearcher.
*
*/
public void setSimilarity(Similarity similarity) {
this.similarity = similarity;
}
/** Expert: Get the {@link Similarity} to use to compute scores. This returns the
* {@link Similarity} that has been set through {@link #setSimilarity(Similarity)}
* or the default {@link Similarity} if none has been set explicitly. */
public Similarity getSimilarity() {
return similarity;
}
/**
* Count how many documents match the given query.
*/
public int count(Query query) throws IOException {
query = rewrite(query);
while (true) {
// remove wrappers that don't matter for counts
if (query instanceof ConstantScoreQuery) {
query = ((ConstantScoreQuery) query).getQuery();
} else {
break;
}
}
// some counts can be computed in constant time
if (query instanceof MatchAllDocsQuery) {
return reader.numDocs();
} else if (query instanceof TermQuery && reader.hasDeletions() == false) {
Term term = ((TermQuery) query).getTerm();
int count = 0;
for (LeafReaderContext leaf : reader.leaves()) {
count += leaf.reader().docFreq(term);
}
return count;
}
// general case: create a collecor and count matches
final CollectorManager<TotalHitCountCollector, Integer> collectorManager = new CollectorManager<TotalHitCountCollector, Integer>() {
@Override
public TotalHitCountCollector newCollector() throws IOException {
return new TotalHitCountCollector();
}
@Override
public Integer reduce(Collection<TotalHitCountCollector> collectors) throws IOException {
int total = 0;
for (TotalHitCountCollector collector : collectors) {
total += collector.getTotalHits();
}
return total;
}
};
return search(query, collectorManager);
}
/** Returns the leaf slices used for concurrent searching, or null if no {@code Executor} was
* passed to the constructor.
*
* @lucene.experimental */
public LeafSlice[] getSlices() {
return leafSlices;
}
/** Finds the top <code>n</code>
* hits for <code>query</code> where all results are after a previous
* result (<code>after</code>).
* <p>
* By passing the bottom result from a previous page as <code>after</code>,
* this method can be used for efficient 'deep-paging' across potentially
* large result sets.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public TopDocs searchAfter(ScoreDoc after, Query query, int numHits) throws IOException {
final int limit = Math.max(1, reader.maxDoc());
if (after != null && after.doc >= limit) {
throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
+ after.doc + " limit=" + limit);
}
final int cappedNumHits = Math.min(numHits, limit);
final CollectorManager<TopScoreDocCollector, TopDocs> manager = new CollectorManager<TopScoreDocCollector, TopDocs>() {
private final HitsThresholdChecker hitsThresholdChecker = (executor == null || leafSlices.length <= 1) ? HitsThresholdChecker.create(TOTAL_HITS_THRESHOLD) :
HitsThresholdChecker.createShared(TOTAL_HITS_THRESHOLD);
private final BottomValueChecker bottomValueChecker = BottomValueChecker.createMaxBottomScoreChecker();
@Override
public TopScoreDocCollector newCollector() throws IOException {
return TopScoreDocCollector.create(cappedNumHits, after, hitsThresholdChecker, bottomValueChecker);
}
@Override
public TopDocs reduce(Collection<TopScoreDocCollector> collectors) throws IOException {
final TopDocs[] topDocs = new TopDocs[collectors.size()];
int i = 0;
for (TopScoreDocCollector collector : collectors) {
topDocs[i++] = collector.topDocs();
}
return TopDocs.merge(0, cappedNumHits, topDocs);
}
};
return search(query, manager);
}
/** Finds the top <code>n</code>
* hits for <code>query</code>.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public TopDocs search(Query query, int n)
throws IOException {
return searchAfter(null, query, n);
}
/** Lower-level search API.
*
* <p>{@link LeafCollector#collect(int)} is called for every matching document.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public void search(Query query, Collector results)
throws IOException {
query = rewrite(query);
search(leafContexts, createWeight(query, results.scoreMode(), 1), results);
}
/** Search implementation with arbitrary sorting, plus
* control over whether hit scores and max score
* should be computed. Finds
* the top <code>n</code> hits for <code>query</code>, and sorting
* the hits by the criteria in <code>sort</code>.
* If <code>doDocScores</code> is <code>true</code>
* then the score of each hit will be computed and
* returned. If <code>doMaxScore</code> is
* <code>true</code> then the maximum score over all
* collected hits will be computed.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public TopFieldDocs search(Query query, int n,
Sort sort, boolean doDocScores) throws IOException {
return searchAfter(null, query, n, sort, doDocScores);
}
/**
* Search implementation with arbitrary sorting.
* @param query The query to search for
* @param n Return only the top n results
* @param sort The {@link org.apache.lucene.search.Sort} object
* @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
* @throws IOException if there is a low-level I/O error
*/
public TopFieldDocs search(Query query, int n, Sort sort) throws IOException {
return searchAfter(null, query, n, sort, false);
}
/** Finds the top <code>n</code>
* hits for <code>query</code> where all results are after a previous
* result (<code>after</code>).
* <p>
* By passing the bottom result from a previous page as <code>after</code>,
* this method can be used for efficient 'deep-paging' across potentially
* large result sets.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public TopDocs searchAfter(ScoreDoc after, Query query, int n, Sort sort) throws IOException {
return searchAfter(after, query, n, sort, false);
}
/** Finds the top <code>n</code>
* hits for <code>query</code> where all results are after a previous
* result (<code>after</code>), allowing control over
* whether hit scores and max score should be computed.
* <p>
* By passing the bottom result from a previous page as <code>after</code>,
* this method can be used for efficient 'deep-paging' across potentially
* large result sets. If <code>doDocScores</code> is <code>true</code>
* then the score of each hit will be computed and
* returned. If <code>doMaxScore</code> is
* <code>true</code> then the maximum score over all
* collected hits will be computed.
*
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public TopFieldDocs searchAfter(ScoreDoc after, Query query, int numHits, Sort sort,
boolean doDocScores) throws IOException {
if (after != null && !(after instanceof FieldDoc)) {
// TODO: if we fix type safety of TopFieldDocs we can
// remove this
throw new IllegalArgumentException("after must be a FieldDoc; got " + after);
}
return searchAfter((FieldDoc) after, query, numHits, sort, doDocScores);
}
private TopFieldDocs searchAfter(FieldDoc after, Query query, int numHits, Sort sort,
boolean doDocScores) throws IOException {
final int limit = Math.max(1, reader.maxDoc());
if (after != null && after.doc >= limit) {
throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
+ after.doc + " limit=" + limit);
}
final int cappedNumHits = Math.min(numHits, limit);
final Sort rewrittenSort = sort.rewrite(this);
final CollectorManager<TopFieldCollector, TopFieldDocs> manager = new CollectorManager<TopFieldCollector, TopFieldDocs>() {
private final HitsThresholdChecker hitsThresholdChecker = (executor == null || leafSlices.length <= 1) ? HitsThresholdChecker.create(TOTAL_HITS_THRESHOLD) :
HitsThresholdChecker.createShared(TOTAL_HITS_THRESHOLD);
@Override
public TopFieldCollector newCollector() throws IOException {
// TODO: don't pay the price for accurate hit counts by default
return TopFieldCollector.create(rewrittenSort, cappedNumHits, after, hitsThresholdChecker);
}
@Override
public TopFieldDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()];
int i = 0;
for (TopFieldCollector collector : collectors) {
topDocs[i++] = collector.topDocs();
}
return TopDocs.merge(rewrittenSort, 0, cappedNumHits, topDocs);
}
};
TopFieldDocs topDocs = search(query, manager);
if (doDocScores) {
TopFieldCollector.populateScores(topDocs.scoreDocs, this, query);
}
return topDocs;
}
/**
* Lower-level search API.
* Search all leaves using the given {@link CollectorManager}. In contrast
* to {@link #search(Query, Collector)}, this method will use the searcher's
* {@link Executor} in order to parallelize execution of the collection
* on the configured {@link #leafSlices}.
* @see CollectorManager
* @lucene.experimental
*/
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
if (executor == null || leafSlices.length <= 1) {
final C collector = collectorManager.newCollector();
search(query, collector);
return collectorManager.reduce(Collections.singletonList(collector));
} else {
final List<C> collectors = new ArrayList<>(leafSlices.length);
ScoreMode scoreMode = null;
for (int i = 0; i < leafSlices.length; ++i) {
final C collector = collectorManager.newCollector();
collectors.add(collector);
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
}
}
if (scoreMode == null) {
// no segments
scoreMode = ScoreMode.COMPLETE;
}
query = rewrite(query);
final Weight weight = createWeight(query, scoreMode, 1);
final List<Future<C>> topDocsFutures = new ArrayList<>(leafSlices.length);
for (int i = 0; i < leafSlices.length - 1; ++i) {
final LeafReaderContext[] leaves = leafSlices[i].leaves;
final C collector = collectors.get(i);
FutureTask<C> task = new FutureTask<>(() -> {
search(Arrays.asList(leaves), weight, collector);
return collector;
});
boolean executedOnCallerThread = false;
try {
executor.execute(task);
} catch (RejectedExecutionException e) {
// Execute on caller thread
search(Arrays.asList(leaves), weight, collector);
topDocsFutures.add(CompletableFuture.completedFuture(collector));
executedOnCallerThread = true;
}
// Do not add the task's future if it was not used
if (executedOnCallerThread == false) {
topDocsFutures.add(task);
}
}
final LeafReaderContext[] leaves = leafSlices[leafSlices.length - 1].leaves;
final C collector = collectors.get(leafSlices.length - 1);
// execute the last on the caller thread
search(Arrays.asList(leaves), weight, collector);
topDocsFutures.add(CompletableFuture.completedFuture(collector));
final List<C> collectedCollectors = new ArrayList<>();
for (Future<C> future : topDocsFutures) {
try {
collectedCollectors.add(future.get());
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
return collectorManager.reduce(collectors);
}
}
/**
* Lower-level search API.
*
* <p>
* {@link LeafCollector#collect(int)} is called for every document. <br>
*
* <p>
* NOTE: this method executes the searches on all given leaves exclusively.
* To search across all the searchers leaves use {@link #leafContexts}.
*
* @param leaves
* the searchers leaves to execute the searches on
* @param weight
* to match documents
* @param collector
* to receive hits
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
throws IOException {
// TODO: should we make this
// threaded...? the Collector could be sync'd?
// always use single thread:
for (LeafReaderContext ctx : leaves) { // search each subreader
final LeafCollector leafCollector;
try {
leafCollector = collector.getLeafCollector(ctx);
} catch (CollectionTerminatedException e) {
// there is no doc of interest in this reader context
// continue with the following leaf
continue;
}
BulkScorer scorer = weight.bulkScorer(ctx);
if (scorer != null) {
try {
scorer.score(leafCollector, ctx.reader().getLiveDocs());
} catch (CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
}
}
}
}
/** Expert: called to re-write queries into primitive queries.
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
public Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
rewrittenQuery = query.rewrite(reader)) {
query = rewrittenQuery;
}
query.visit(getNumClausesCheckVisitor());
return query;
}
/** Returns a QueryVisitor which recursively checks the total
* number of clauses that a query and its children cumulatively
* have and validates that the total number does not exceed
* the specified limit
*/
private static QueryVisitor getNumClausesCheckVisitor() {
return new QueryVisitor() {
int numClauses;
@Override
public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) {
// Return this instance even for MUST_NOT and not an empty QueryVisitor
return this;
}
@Override
public void visitLeaf(Query query) {
if (numClauses > maxClauseCount) {
throw new TooManyClauses();
}
++numClauses;
}
@Override
public void consumeTerms(Query query, Term... terms) {
if (numClauses > maxClauseCount) {
throw new TooManyClauses();
}
++numClauses;
}
};
}
/** Returns an Explanation that describes how <code>doc</code> scored against
* <code>query</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
query = rewrite(query);
return explain(createWeight(query, ScoreMode.COMPLETE, 1), doc);
}
/** Expert: low-level implementation method
* Returns an Explanation that describes how <code>doc</code> scored against
* <code>weight</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
* <p>Applications should call {@link IndexSearcher#explain(Query, int)}.
* @throws TooManyClauses If a query would exceed
* {@link IndexSearcher#getMaxClauseCount()} clauses.
*/
protected Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, leafContexts);
final LeafReaderContext ctx = leafContexts.get(n);
int deBasedDoc = doc - ctx.docBase;
final Bits liveDocs = ctx.reader().getLiveDocs();
if (liveDocs != null && liveDocs.get(deBasedDoc) == false) {
return Explanation.noMatch("Document " + doc + " is deleted");
}
return weight.explain(ctx, deBasedDoc);
}
/**
* Creates a {@link Weight} for the given query, potentially adding caching
* if possible and configured.
* @lucene.experimental
*/
public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws IOException {
final QueryCache queryCache = this.queryCache;
Weight weight = query.createWeight(this, scoreMode, boost);
if (scoreMode.needsScores() == false && queryCache != null) {
weight = queryCache.doCache(weight, queryCachingPolicy);
}
return weight;
}
/**
* Returns this searchers the top-level {@link IndexReaderContext}.
* @see IndexReader#getContext()
*/
/* sugar for #getReader().getTopReaderContext() */
public IndexReaderContext getTopReaderContext() {
return readerContext;
}
/**
* A class holding a subset of the {@link IndexSearcher}s leaf contexts to be
* executed within a single thread.
*
* @lucene.experimental
*/
public static class LeafSlice {
/** The leaves that make up this slice.
*
* @lucene.experimental */
public final LeafReaderContext[] leaves;
public LeafSlice(List<LeafReaderContext> leavesList) {
Collections.sort(leavesList, Comparator.comparingInt(l -> l.docBase));
this.leaves = leavesList.toArray(new LeafReaderContext[0]);
}
}
@Override
public String toString() {
return "IndexSearcher(" + reader + "; executor=" + executor + ")";
}
/**
* Returns {@link TermStatistics} for a term.
*
* This can be overridden for example, to return a term's statistics
* across a distributed collection.
*
* @param docFreq The document frequency of the term. It must be greater or equal to 1.
* @param totalTermFreq The total term frequency.
* @return A {@link TermStatistics} (never null).
*
* @lucene.experimental
*/
public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) throws IOException {
// This constructor will throw an exception if docFreq <= 0.
return new TermStatistics(term.bytes(), docFreq, totalTermFreq);
}
/**
* Returns {@link CollectionStatistics} for a field, or {@code null} if
* the field does not exist (has no indexed terms)
*
* This can be overridden for example, to return a field's statistics
* across a distributed collection.
* @lucene.experimental
*/
public CollectionStatistics collectionStatistics(String field) throws IOException {
assert field != null;
long docCount = 0;
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
for (LeafReaderContext leaf : reader.leaves()) {
final Terms terms = leaf.reader().terms(field);
if (terms == null) {
continue;
}
docCount += terms.getDocCount();
sumTotalTermFreq += terms.getSumTotalTermFreq();
sumDocFreq += terms.getSumDocFreq();
}
if (docCount == 0) {
return null;
}
return new CollectionStatistics(field, reader.maxDoc(), docCount, sumTotalTermFreq, sumDocFreq);
}
/**
* Returns this searchers executor or <code>null</code> if no executor was provided
*/
public Executor getExecutor() {
return executor;
}
/** Thrown when an attempt is made to add more than {@link
* #getMaxClauseCount()} clauses. This typically happens if
* a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery
* is expanded to many terms during search.
*/
public static class TooManyClauses extends RuntimeException {
public TooManyClauses() {
super("maxClauseCount is set to " + maxClauseCount);
}
}
}
| 1 | 30,699 | it's unclear to me which method you are recommending overriding? | apache-lucene-solr | java |
@@ -511,7 +511,7 @@ class StringFormatChecker(BaseChecker):
check_args = False
# Consider "{[0]} {[1]}" as num_args.
- num_args += sum(1 for field in named_fields if field == "")
+ num_args += sum(1 for field in named_fields if not field)
if named_fields:
for field in named_fields:
if field and field not in named_arguments: | 1 | # Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Rene Zhang <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016 Peter Dawyndt <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2018, 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2018-2019 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Alan Chan <[email protected]>
# Copyright (c) 2018 Yury Gribov <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Wes Turner <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Matthew Suozzo <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 谭九鼎 <[email protected]>
# Copyright (c) 2020 Anthony <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Peter Kolbus <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Checker for string formatting operations.
"""
import collections
import numbers
import re
import tokenize
from typing import TYPE_CHECKING, Iterable
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker, BaseTokenChecker, utils
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker, IRawChecker, ITokenChecker
from pylint.utils import get_global_option
if TYPE_CHECKING:
from typing import Counter # typing.Counter added in Python 3.6.1
_AST_NODE_STR_TYPES = ("__builtin__.unicode", "__builtin__.str", "builtins.str")
# Prefixes for both strings and bytes literals per
# https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
_PREFIXES = {
"r",
"u",
"R",
"U",
"f",
"F",
"fr",
"Fr",
"fR",
"FR",
"rf",
"rF",
"Rf",
"RF",
"b",
"B",
"br",
"Br",
"bR",
"BR",
"rb",
"rB",
"Rb",
"RB",
}
SINGLE_QUOTED_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?'''")
DOUBLE_QUOTED_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?\"\"\"")
QUOTE_DELIMITER_REGEX = re.compile(f"({'|'.join(_PREFIXES)})?(\"|')", re.DOTALL)
MSGS = { # pylint: disable=consider-using-namedtuple-or-dataclass
"E1300": (
"Unsupported format character %r (%#02x) at index %d",
"bad-format-character",
"Used when an unsupported format character is used in a format string.",
),
"E1301": (
"Format string ends in middle of conversion specifier",
"truncated-format-string",
"Used when a format string terminates before the end of a "
"conversion specifier.",
),
"E1302": (
"Mixing named and unnamed conversion specifiers in format string",
"mixed-format-string",
"Used when a format string contains both named (e.g. '%(foo)d') "
"and unnamed (e.g. '%d') conversion specifiers. This is also "
"used when a named conversion specifier contains * for the "
"minimum field width and/or precision.",
),
"E1303": (
"Expected mapping for format string, not %s",
"format-needs-mapping",
"Used when a format string that uses named conversion specifiers "
"is used with an argument that is not a mapping.",
),
"W1300": (
"Format string dictionary key should be a string, not %s",
"bad-format-string-key",
"Used when a format string that uses named conversion specifiers "
"is used with a dictionary whose keys are not all strings.",
),
"W1301": (
"Unused key %r in format string dictionary",
"unused-format-string-key",
"Used when a format string that uses named conversion specifiers "
"is used with a dictionary that contains keys not required by the "
"format string.",
),
"E1304": (
"Missing key %r in format string dictionary",
"missing-format-string-key",
"Used when a format string that uses named conversion specifiers "
"is used with a dictionary that doesn't contain all the keys "
"required by the format string.",
),
"E1305": (
"Too many arguments for format string",
"too-many-format-args",
"Used when a format string that uses unnamed conversion "
"specifiers is given too many arguments.",
),
"E1306": (
"Not enough arguments for format string",
"too-few-format-args",
"Used when a format string that uses unnamed conversion "
"specifiers is given too few arguments",
),
"E1307": (
"Argument %r does not match format type %r",
"bad-string-format-type",
"Used when a type required by format string "
"is not suitable for actual argument type",
),
"E1310": (
"Suspicious argument in %s.%s call",
"bad-str-strip-call",
"The argument to a str.{l,r,}strip call contains a duplicate character, ",
),
"W1302": (
"Invalid format string",
"bad-format-string",
"Used when a PEP 3101 format string is invalid.",
),
"W1303": (
"Missing keyword argument %r for format string",
"missing-format-argument-key",
"Used when a PEP 3101 format string that uses named fields "
"doesn't receive one or more required keywords.",
),
"W1304": (
"Unused format argument %r",
"unused-format-string-argument",
"Used when a PEP 3101 format string that uses named "
"fields is used with an argument that "
"is not required by the format string.",
),
"W1305": (
"Format string contains both automatic field numbering "
"and manual field specification",
"format-combined-specification",
"Used when a PEP 3101 format string contains both automatic "
"field numbering (e.g. '{}') and manual field "
"specification (e.g. '{0}').",
),
"W1306": (
"Missing format attribute %r in format specifier %r",
"missing-format-attribute",
"Used when a PEP 3101 format string uses an "
"attribute specifier ({0.length}), but the argument "
"passed for formatting doesn't have that attribute.",
),
"W1307": (
"Using invalid lookup key %r in format specifier %r",
"invalid-format-index",
"Used when a PEP 3101 format string uses a lookup specifier "
"({a[1]}), but the argument passed for formatting "
"doesn't contain or doesn't have that key as an attribute.",
),
"W1308": (
"Duplicate string formatting argument %r, consider passing as named argument",
"duplicate-string-formatting-argument",
"Used when we detect that a string formatting is "
"repeating an argument instead of using named string arguments",
),
"W1309": (
"Using an f-string that does not have any interpolated variables",
"f-string-without-interpolation",
"Used when we detect an f-string that does not use any interpolation variables, "
"in which case it can be either a normal string or a bug in the code.",
),
"W1310": (
"Using formatting for a string that does not have any interpolated variables",
"format-string-without-interpolation",
"Used when we detect a string that does not have any interpolation variables, "
"in which case it can be either a normal string without formatting or a bug in the code.",
),
"W1311": (
"F-strings are not supported by all versions included in the py-version setting",
"using-f-string-in-unsupported-version",
"Used when the py-version set by the user is lower than 3.6 and pylint encounters "
"a f-string.",
),
}
OTHER_NODES = (
nodes.Const,
nodes.List,
nodes.Lambda,
nodes.FunctionDef,
nodes.ListComp,
nodes.SetComp,
nodes.GeneratorExp,
)
def get_access_path(key, parts):
"""Given a list of format specifiers, returns
the final access path (e.g. a.b.c[0][1]).
"""
path = []
for is_attribute, specifier in parts:
if is_attribute:
path.append(f".{specifier}")
else:
path.append(f"[{specifier!r}]")
return str(key) + "".join(path)
def arg_matches_format_type(arg_type, format_type):
if format_type in "sr":
# All types can be printed with %s and %r
return True
if isinstance(arg_type, astroid.Instance):
arg_type = arg_type.pytype()
if arg_type == "builtins.str":
return format_type == "c"
if arg_type == "builtins.float":
return format_type in "deEfFgGn%"
if arg_type == "builtins.int":
# Integers allow all types
return True
return False
return True
class StringFormatChecker(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
__implements__ = (IAstroidChecker,)
name = "string"
msgs = MSGS
# pylint: disable=too-many-branches
@check_messages(
"bad-format-character",
"truncated-format-string",
"mixed-format-string",
"bad-format-string-key",
"missing-format-string-key",
"unused-format-string-key",
"bad-string-format-type",
"format-needs-mapping",
"too-many-format-args",
"too-few-format-args",
"bad-string-format-type",
"format-string-without-interpolation",
)
def visit_binop(self, node: nodes.BinOp) -> None:
if node.op != "%":
return
left = node.left
args = node.right
if not (isinstance(left, nodes.Const) and isinstance(left.value, str)):
return
format_string = left.value
try:
(
required_keys,
required_num_args,
required_key_types,
required_arg_types,
) = utils.parse_format_string(format_string)
except utils.UnsupportedFormatCharacter as exc:
formatted = format_string[exc.index]
self.add_message(
"bad-format-character",
node=node,
args=(formatted, ord(formatted), exc.index),
)
return
except utils.IncompleteFormatString:
self.add_message("truncated-format-string", node=node)
return
if not required_keys and not required_num_args:
self.add_message("format-string-without-interpolation", node=node)
return
if required_keys and required_num_args:
# The format string uses both named and unnamed format
# specifiers.
self.add_message("mixed-format-string", node=node)
elif required_keys:
# The format string uses only named format specifiers.
# Check that the RHS of the % operator is a mapping object
# that contains precisely the set of keys required by the
# format string.
if isinstance(args, nodes.Dict):
keys = set()
unknown_keys = False
for k, _ in args.items:
if isinstance(k, nodes.Const):
key = k.value
if isinstance(key, str):
keys.add(key)
else:
self.add_message(
"bad-format-string-key", node=node, args=key
)
else:
# One of the keys was something other than a
# constant. Since we can't tell what it is,
# suppress checks for missing keys in the
# dictionary.
unknown_keys = True
if not unknown_keys:
for key in required_keys:
if key not in keys:
self.add_message(
"missing-format-string-key", node=node, args=key
)
for key in keys:
if key not in required_keys:
self.add_message(
"unused-format-string-key", node=node, args=key
)
for key, arg in args.items:
if not isinstance(key, nodes.Const):
continue
format_type = required_key_types.get(key.value, None)
arg_type = utils.safe_infer(arg)
if (
format_type is not None
and arg_type
and arg_type != astroid.Uninferable
and not arg_matches_format_type(arg_type, format_type)
):
self.add_message(
"bad-string-format-type",
node=node,
args=(arg_type.pytype(), format_type),
)
elif isinstance(args, (OTHER_NODES, nodes.Tuple)):
type_name = type(args).__name__
self.add_message("format-needs-mapping", node=node, args=type_name)
# else:
# The RHS of the format specifier is a name or
# expression. It may be a mapping object, so
# there's nothing we can check.
else:
# The format string uses only unnamed format specifiers.
# Check that the number of arguments passed to the RHS of
# the % operator matches the number required by the format
# string.
args_elts = []
if isinstance(args, nodes.Tuple):
rhs_tuple = utils.safe_infer(args)
num_args = None
if isinstance(rhs_tuple, nodes.BaseContainer):
args_elts = rhs_tuple.elts
num_args = len(args_elts)
elif isinstance(args, (OTHER_NODES, (nodes.Dict, nodes.DictComp))):
args_elts = [args]
num_args = 1
else:
# The RHS of the format specifier is a name or
# expression. It could be a tuple of unknown size, so
# there's nothing we can check.
num_args = None
if num_args is not None:
if num_args > required_num_args:
self.add_message("too-many-format-args", node=node)
elif num_args < required_num_args:
self.add_message("too-few-format-args", node=node)
for arg, format_type in zip(args_elts, required_arg_types):
if not arg:
continue
arg_type = utils.safe_infer(arg)
if (
arg_type
and arg_type != astroid.Uninferable
and not arg_matches_format_type(arg_type, format_type)
):
self.add_message(
"bad-string-format-type",
node=node,
args=(arg_type.pytype(), format_type),
)
@check_messages("f-string-without-interpolation")
@check_messages("using-f-string-in-unsupported-version")
def visit_joinedstr(self, node: nodes.JoinedStr) -> None:
self._check_interpolation(node)
self._check_unsupported_version(node)
def _check_interpolation(self, node: nodes.JoinedStr) -> None:
if isinstance(node.parent, nodes.FormattedValue):
return
for value in node.values:
if isinstance(value, nodes.FormattedValue):
return
self.add_message("f-string-without-interpolation", node=node)
def _check_unsupported_version(self, node: nodes.JoinedStr) -> None:
if get_global_option(self, "py-version") < (3, 6):
self.add_message("using-f-string-in-unsupported-version", node=node)
@check_messages(*MSGS)
def visit_call(self, node: nodes.Call) -> None:
func = utils.safe_infer(node.func)
if (
isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and func.bound.name in ("str", "unicode", "bytes")
):
if func.name in ("strip", "lstrip", "rstrip") and node.args:
arg = utils.safe_infer(node.args[0])
if not isinstance(arg, nodes.Const) or not isinstance(arg.value, str):
return
if len(arg.value) != len(set(arg.value)):
self.add_message(
"bad-str-strip-call",
node=node,
args=(func.bound.name, func.name),
)
elif func.name == "format":
self._check_new_format(node, func)
def _detect_vacuous_formatting(self, node, positional_arguments):
counter = collections.Counter(
arg.name for arg in positional_arguments if isinstance(arg, nodes.Name)
)
for name, count in counter.items():
if count == 1:
continue
self.add_message(
"duplicate-string-formatting-argument", node=node, args=(name,)
)
def _check_new_format(self, node, func):
"""Check the new string formatting."""
# Skip format nodes which don't have an explicit string on the
# left side of the format operation.
# We do this because our inference engine can't properly handle
# redefinitions of the original string.
# Note that there may not be any left side at all, if the format method
# has been assigned to another variable. See issue 351. For example:
#
# fmt = 'some string {}'.format
# fmt('arg')
if isinstance(node.func, nodes.Attribute) and not isinstance(
node.func.expr, nodes.Const
):
return
if node.starargs or node.kwargs:
return
try:
strnode = next(func.bound.infer())
except astroid.InferenceError:
return
if not (isinstance(strnode, nodes.Const) and isinstance(strnode.value, str)):
return
try:
call_site = astroid.arguments.CallSite.from_call(node)
except astroid.InferenceError:
return
try:
fields, num_args, manual_pos = utils.parse_format_method_string(
strnode.value
)
except utils.IncompleteFormatString:
self.add_message("bad-format-string", node=node)
return
positional_arguments = call_site.positional_arguments
named_arguments = call_site.keyword_arguments
named_fields = {field[0] for field in fields if isinstance(field[0], str)}
if num_args and manual_pos:
self.add_message("format-combined-specification", node=node)
return
check_args = False
# Consider "{[0]} {[1]}" as num_args.
num_args += sum(1 for field in named_fields if field == "")
if named_fields:
for field in named_fields:
if field and field not in named_arguments:
self.add_message(
"missing-format-argument-key", node=node, args=(field,)
)
for field in named_arguments:
if field not in named_fields:
self.add_message(
"unused-format-string-argument", node=node, args=(field,)
)
# num_args can be 0 if manual_pos is not.
num_args = num_args or manual_pos
if positional_arguments or num_args:
empty = any(True for field in named_fields if field == "")
if named_arguments or empty:
# Verify the required number of positional arguments
# only if the .format got at least one keyword argument.
# This means that the format strings accepts both
# positional and named fields and we should warn
# when one of the them is missing or is extra.
check_args = True
else:
check_args = True
if check_args:
# num_args can be 0 if manual_pos is not.
num_args = num_args or manual_pos
if not num_args:
self.add_message("format-string-without-interpolation", node=node)
return
if len(positional_arguments) > num_args:
self.add_message("too-many-format-args", node=node)
elif len(positional_arguments) < num_args:
self.add_message("too-few-format-args", node=node)
self._detect_vacuous_formatting(node, positional_arguments)
self._check_new_format_specifiers(node, fields, named_arguments)
def _check_new_format_specifiers(self, node, fields, named):
"""
Check attribute and index access in the format
string ("{0.a}" and "{0[a]}").
"""
for key, specifiers in fields:
# Obtain the argument. If it can't be obtained
# or inferred, skip this check.
if key == "":
# {[0]} will have an unnamed argument, defaulting
# to 0. It will not be present in `named`, so use the value
# 0 for it.
key = 0
if isinstance(key, numbers.Number):
try:
argname = utils.get_argument_from_call(node, key)
except utils.NoSuchArgumentError:
continue
else:
if key not in named:
continue
argname = named[key]
if argname in (astroid.Uninferable, None):
continue
try:
argument = utils.safe_infer(argname)
except astroid.InferenceError:
continue
if not specifiers or not argument:
# No need to check this key if it doesn't
# use attribute / item access
continue
if argument.parent and isinstance(argument.parent, nodes.Arguments):
# Ignore any object coming from an argument,
# because we can't infer its value properly.
continue
previous = argument
parsed = []
for is_attribute, specifier in specifiers:
if previous is astroid.Uninferable:
break
parsed.append((is_attribute, specifier))
if is_attribute:
try:
previous = previous.getattr(specifier)[0]
except astroid.NotFoundError:
if (
hasattr(previous, "has_dynamic_getattr")
and previous.has_dynamic_getattr()
):
# Don't warn if the object has a custom __getattr__
break
path = get_access_path(key, parsed)
self.add_message(
"missing-format-attribute",
args=(specifier, path),
node=node,
)
break
else:
warn_error = False
if hasattr(previous, "getitem"):
try:
previous = previous.getitem(nodes.Const(specifier))
except (
astroid.AstroidIndexError,
astroid.AstroidTypeError,
astroid.AttributeInferenceError,
):
warn_error = True
except astroid.InferenceError:
break
if previous is astroid.Uninferable:
break
else:
try:
# Lookup __getitem__ in the current node,
# but skip further checks, because we can't
# retrieve the looked object
previous.getattr("__getitem__")
break
except astroid.NotFoundError:
warn_error = True
if warn_error:
path = get_access_path(key, parsed)
self.add_message(
"invalid-format-index", args=(specifier, path), node=node
)
break
try:
previous = next(previous.infer())
except astroid.InferenceError:
# can't check further if we can't infer it
break
class StringConstantChecker(BaseTokenChecker):
"""Check string literals"""
__implements__ = (IAstroidChecker, ITokenChecker, IRawChecker)
name = "string"
msgs = {
"W1401": (
"Anomalous backslash in string: '%s'. "
"String constant might be missing an r prefix.",
"anomalous-backslash-in-string",
"Used when a backslash is in a literal string but not as an escape.",
),
"W1402": (
"Anomalous Unicode escape in byte string: '%s'. "
"String constant might be missing an r or u prefix.",
"anomalous-unicode-escape-in-string",
"Used when an escape like \\u is encountered in a byte "
"string where it has no effect.",
),
"W1404": (
"Implicit string concatenation found in %s",
"implicit-str-concat",
"String literals are implicitly concatenated in a "
"literal iterable definition : "
"maybe a comma is missing ?",
{"old_names": [("W1403", "implicit-str-concat-in-sequence")]},
),
"W1405": (
"Quote delimiter %s is inconsistent with the rest of the file",
"inconsistent-quotes",
"Quote delimiters are not used consistently throughout a module "
"(with allowances made for avoiding unnecessary escaping).",
),
"W1406": (
"The u prefix for strings is no longer necessary in Python >=3.0",
"redundant-u-string-prefix",
"Used when we detect a string with a u prefix. These prefixes were necessary "
"in Python 2 to indicate a string was Unicode, but since Python 3.0 strings "
"are Unicode by default.",
),
}
options = (
(
"check-str-concat-over-line-jumps",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "This flag controls whether the "
"implicit-str-concat should generate a warning "
"on implicit string concatenation in sequences defined over "
"several lines.",
},
),
(
"check-quote-consistency",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "This flag controls whether inconsistent-quotes generates a "
"warning when the character used as a quote delimiter is used "
"inconsistently within a module.",
},
),
)
# Characters that have a special meaning after a backslash in either
# Unicode or byte strings.
ESCAPE_CHARACTERS = "abfnrtvx\n\r\t\\'\"01234567"
# Characters that have a special meaning after a backslash but only in
# Unicode strings.
UNICODE_ESCAPE_CHARACTERS = "uUN"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.string_tokens = {} # token position -> (token value, next token)
def process_module(self, node: nodes.Module) -> None:
self._unicode_literals = "unicode_literals" in node.future_imports
def process_tokens(self, tokens):
encoding = "ascii"
for i, (tok_type, token, start, _, line) in enumerate(tokens):
if tok_type == tokenize.ENCODING:
# this is always the first token processed
encoding = token
elif tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self.process_string_token(token, start[0], start[1])
# We figure the next token, ignoring comments & newlines:
j = i + 1
while j < len(tokens) and tokens[j].type in (
tokenize.NEWLINE,
tokenize.NL,
tokenize.COMMENT,
):
j += 1
next_token = tokens[j] if j < len(tokens) else None
if encoding != "ascii":
# We convert `tokenize` character count into a byte count,
# to match with astroid `.col_offset`
start = (start[0], len(line[: start[1]].encode(encoding)))
self.string_tokens[start] = (str_eval(token), next_token)
if self.config.check_quote_consistency:
self.check_for_consistent_string_delimiters(tokens)
@check_messages("implicit-str-concat")
def visit_list(self, node: nodes.List) -> None:
self.check_for_concatenated_strings(node.elts, "list")
@check_messages("implicit-str-concat")
def visit_set(self, node: nodes.Set) -> None:
self.check_for_concatenated_strings(node.elts, "set")
@check_messages("implicit-str-concat")
def visit_tuple(self, node: nodes.Tuple) -> None:
self.check_for_concatenated_strings(node.elts, "tuple")
def visit_assign(self, node: nodes.Assign) -> None:
if isinstance(node.value, nodes.Const) and isinstance(node.value.value, str):
self.check_for_concatenated_strings([node.value], "assignment")
def check_for_consistent_string_delimiters(
self, tokens: Iterable[tokenize.TokenInfo]
) -> None:
"""Adds a message for each string using inconsistent quote delimiters.
Quote delimiters are used inconsistently if " and ' are mixed in a module's
shortstrings without having done so to avoid escaping an internal quote
character.
Args:
tokens: The tokens to be checked against for consistent usage.
"""
# typing.Counter added in Python 3.6.1 so this type hint must be a comment
string_delimiters = collections.Counter() # type: Counter[str]
# First, figure out which quote character predominates in the module
for tok_type, token, _, _, _ in tokens:
if tok_type == tokenize.STRING and _is_quote_delimiter_chosen_freely(token):
string_delimiters[_get_quote_delimiter(token)] += 1
if len(string_delimiters) > 1:
# Ties are broken arbitrarily
most_common_delimiter = string_delimiters.most_common(1)[0][0]
for tok_type, token, start, _, _ in tokens:
if tok_type != tokenize.STRING:
continue
quote_delimiter = _get_quote_delimiter(token)
if (
_is_quote_delimiter_chosen_freely(token)
and quote_delimiter != most_common_delimiter
):
self.add_message(
"inconsistent-quotes", line=start[0], args=(quote_delimiter,)
)
def check_for_concatenated_strings(self, elements, iterable_type):
for elt in elements:
if not (
isinstance(elt, nodes.Const) and elt.pytype() in _AST_NODE_STR_TYPES
):
continue
if elt.col_offset < 0:
# This can happen in case of escaped newlines
continue
if (elt.lineno, elt.col_offset) not in self.string_tokens:
# This may happen with Latin1 encoding
# cf. https://github.com/PyCQA/pylint/issues/2610
continue
matching_token, next_token = self.string_tokens[
(elt.lineno, elt.col_offset)
]
# We detect string concatenation: the AST Const is the
# combination of 2 string tokens
if matching_token != elt.value and next_token is not None:
if next_token.type == tokenize.STRING and (
next_token.start[0] == elt.lineno
or self.config.check_str_concat_over_line_jumps
):
self.add_message(
"implicit-str-concat", line=elt.lineno, args=(iterable_type,)
)
def process_string_token(self, token, start_row, start_col):
quote_char = None
index = None
for index, char in enumerate(token):
if char in "'\"":
quote_char = char
break
if quote_char is None:
return
prefix = token[:index].lower() # markers like u, b, r.
after_prefix = token[index:]
# Chop off quotes
quote_length = (
3 if after_prefix[:3] == after_prefix[-3:] == 3 * quote_char else 1
)
string_body = after_prefix[quote_length:-quote_length]
# No special checks on raw strings at the moment.
if "r" not in prefix:
self.process_non_raw_string_token(
prefix,
string_body,
start_row,
start_col + len(prefix) + quote_length,
)
def process_non_raw_string_token(
self, prefix, string_body, start_row, string_start_col
):
"""check for bad escapes in a non-raw string.
prefix: lowercase string of eg 'ur' string prefix markers.
string_body: the un-parsed body of the string, not including the quote
marks.
start_row: integer line number in the source.
string_start_col: integer col number of the string start in the source.
"""
# Walk through the string; if we see a backslash then escape the next
# character, and skip over it. If we see a non-escaped character,
# alert, and continue.
#
# Accept a backslash when it escapes a backslash, or a quote, or
# end-of-line, or one of the letters that introduce a special escape
# sequence <https://docs.python.org/reference/lexical_analysis.html>
#
index = 0
while True:
index = string_body.find("\\", index)
if index == -1:
break
# There must be a next character; having a backslash at the end
# of the string would be a SyntaxError.
next_char = string_body[index + 1]
match = string_body[index : index + 2]
# The column offset will vary depending on whether the string token
# is broken across lines. Calculate relative to the nearest line
# break or relative to the start of the token's line.
last_newline = string_body.rfind("\n", 0, index)
if last_newline == -1:
line = start_row
col_offset = index + string_start_col
else:
line = start_row + string_body.count("\n", 0, index)
col_offset = index - last_newline - 1
if next_char in self.UNICODE_ESCAPE_CHARACTERS:
if "u" in prefix:
pass
elif "b" not in prefix:
pass # unicode by default
else:
self.add_message(
"anomalous-unicode-escape-in-string",
line=line,
args=(match,),
col_offset=col_offset,
)
elif next_char not in self.ESCAPE_CHARACTERS:
self.add_message(
"anomalous-backslash-in-string",
line=line,
args=(match,),
col_offset=col_offset,
)
# Whether it was a valid escape or not, backslash followed by
# another character can always be consumed whole: the second
# character can never be the start of a new backslash escape.
index += 2
@check_messages("redundant-u-string-prefix")
def visit_const(self, node: nodes.Const) -> None:
if node.pytype() == "builtins.str" and not isinstance(
node.parent, nodes.JoinedStr
):
self._detect_u_string_prefix(node)
def _detect_u_string_prefix(self, node: nodes.Const):
"""Check whether strings include a 'u' prefix like u'String'"""
if node.kind == "u":
self.add_message(
"redundant-u-string-prefix",
line=node.lineno,
col_offset=node.col_offset,
)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(StringFormatChecker(linter))
linter.register_checker(StringConstantChecker(linter))
def str_eval(token):
"""
Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
This supports f-strings, contrary to `ast.literal_eval`.
We have to support all string literal notations:
https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
"""
if token[0:2].lower() in ("fr", "rf"):
token = token[2:]
elif token[0].lower() in ("r", "u", "f"):
token = token[1:]
if token[0:3] in ('"""', "'''"):
return token[3:-3]
return token[1:-1]
def _is_long_string(string_token: str) -> bool:
"""Is this string token a "longstring" (is it triple-quoted)?
Long strings are triple-quoted as defined in
https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
This function only checks characters up through the open quotes. Because it's meant
to be applied only to tokens that represent string literals, it doesn't bother to
check for close-quotes (demonstrating that the literal is a well-formed string).
Args:
string_token: The string token to be parsed.
Returns:
A boolean representing whether or not this token matches a longstring
regex.
"""
return bool(
SINGLE_QUOTED_REGEX.match(string_token)
or DOUBLE_QUOTED_REGEX.match(string_token)
)
def _get_quote_delimiter(string_token: str) -> str:
"""Returns the quote character used to delimit this token string.
This function does little checking for whether the token is a well-formed
string.
Args:
string_token: The token to be parsed.
Returns:
A string containing solely the first quote delimiter character in the passed
string.
Raises:
ValueError: No quote delimiter characters are present.
"""
match = QUOTE_DELIMITER_REGEX.match(string_token)
if not match:
raise ValueError(f"string token {string_token} is not a well-formed string")
return match.group(2)
def _is_quote_delimiter_chosen_freely(string_token: str) -> bool:
"""Was there a non-awkward option for the quote delimiter?
Args:
string_token: The quoted string whose delimiters are to be checked.
Returns:
Whether there was a choice in this token's quote character that would
not have involved backslash-escaping an interior quote character. Long
strings are excepted from this analysis under the assumption that their
quote characters are set by policy.
"""
quote_delimiter = _get_quote_delimiter(string_token)
unchosen_delimiter = '"' if quote_delimiter == "'" else "'"
return bool(
quote_delimiter
and not _is_long_string(string_token)
and unchosen_delimiter not in str_eval(string_token)
)
| 1 | 16,399 | This make me think that if the typing of a variable is inconsistent it could cause problem. For example here, if `field == 0`, the behavior is not exactly the same. A `0` and is implicitelty false. Well it works because summing 0 is neutral, but what if it was a multiplication ? | PyCQA-pylint | py |
@@ -4,12 +4,13 @@ import (
"bytes"
"errors"
"fmt"
- "github.com/libopenstorage/openstorage/api"
- "github.com/libopenstorage/openstorage/api/client"
- "github.com/libopenstorage/openstorage/volume"
"io"
"io/ioutil"
"strconv"
+
+ "github.com/libopenstorage/openstorage/api"
+ "github.com/libopenstorage/openstorage/api/client"
+ "github.com/libopenstorage/openstorage/volume"
)
const ( | 1 | package volume
import (
"bytes"
"errors"
"fmt"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/client"
"github.com/libopenstorage/openstorage/volume"
"io"
"io/ioutil"
"strconv"
)
const (
graphPath = "/graph"
volumePath = "/osd-volumes"
snapPath = "/osd-snapshot"
credsPath = "/osd-creds"
)
type volumeClient struct {
volume.IODriver
c *client.Client
}
func newVolumeClient(c *client.Client) volume.VolumeDriver {
return &volumeClient{volume.IONotSupported, c}
}
// String description of this driver.
func (v *volumeClient) Name() string {
return "VolumeDriver"
}
func (v *volumeClient) Type() api.DriverType {
// Block drivers implement the superset.
return api.DriverType_DRIVER_TYPE_BLOCK
}
func (v *volumeClient) GraphDriverCreate(id string, parent string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/create").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %s", response)
}
return nil
}
func (v *volumeClient) GraphDriverRemove(id string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/remove").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %s", response)
}
return nil
}
func (v *volumeClient) GraphDriverGet(id string, mountLabel string) (string, error) {
response := ""
if err := v.c.Get().Resource(graphPath + "/inspect").Instance(id).Do().Unmarshal(&response); err != nil {
return "", err
}
return response, nil
}
func (v *volumeClient) GraphDriverRelease(id string) error {
response := ""
if err := v.c.Put().Resource(graphPath + "/release").Instance(id).Do().Unmarshal(&response); err != nil {
return err
}
if response != id {
return fmt.Errorf("Invalid response: %v", response)
}
return nil
}
func (v *volumeClient) GraphDriverExists(id string) bool {
response := false
v.c.Get().Resource(graphPath + "/exists").Instance(id).Do().Unmarshal(&response)
return response
}
func (v *volumeClient) GraphDriverDiff(id string, parent string) io.Writer {
body, _ := v.c.Get().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Do().Body()
return bytes.NewBuffer(body)
}
func (v *volumeClient) GraphDriverChanges(id string, parent string) ([]api.GraphDriverChanges, error) {
var changes []api.GraphDriverChanges
err := v.c.Get().Resource(graphPath + "/changes").Instance(id).Do().Unmarshal(&changes)
return changes, err
}
func (v *volumeClient) GraphDriverApplyDiff(id string, parent string, diff io.Reader) (int, error) {
b, err := ioutil.ReadAll(diff)
if err != nil {
return 0, err
}
response := 0
if err = v.c.Put().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Instance(id).Body(b).Do().Unmarshal(&response); err != nil {
return 0, err
}
return response, nil
}
func (v *volumeClient) GraphDriverDiffSize(id string, parent string) (int, error) {
size := 0
err := v.c.Get().Resource(graphPath + "/diffsize").Instance(id).Do().Unmarshal(&size)
return size, err
}
// Create a new Vol for the specific volume spev.c.
// It returns a system generated VolumeID that uniquely identifies the volume
func (v *volumeClient) Create(locator *api.VolumeLocator, source *api.Source,
spec *api.VolumeSpec) (string, error) {
response := &api.VolumeCreateResponse{}
request := &api.VolumeCreateRequest{
Locator: locator,
Source: source,
Spec: spec,
}
if err := v.c.Post().Resource(volumePath).Body(request).Do().Unmarshal(response); err != nil {
return "", err
}
if response.VolumeResponse != nil && response.VolumeResponse.Error != "" {
return "", errors.New(response.VolumeResponse.Error)
}
return response.Id, nil
}
// Status diagnostic information
func (v *volumeClient) Status() [][2]string {
return [][2]string{}
}
// Inspect specified volumes.
// Errors ErrEnoEnt may be returned.
func (v *volumeClient) Inspect(ids []string) ([]*api.Volume, error) {
if len(ids) == 0 {
return nil, nil
}
var volumes []*api.Volume
request := v.c.Get().Resource(volumePath)
for _, id := range ids {
request.QueryOption(api.OptVolumeID, id)
}
if err := request.Do().Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Delete volume.
// Errors ErrEnoEnt, ErrVolHasSnaps may be returned.
func (v *volumeClient) Delete(volumeID string) error {
response := &api.VolumeResponse{}
if err := v.c.Delete().Resource(volumePath).Instance(volumeID).Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Snap specified volume. IO to the underlying volume should be quiesced before
// calling this function.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) Snapshot(volumeID string, readonly bool,
locator *api.VolumeLocator) (string, error) {
response := &api.SnapCreateResponse{}
request := &api.SnapCreateRequest{
Id: volumeID,
Readonly: readonly,
Locator: locator,
}
if err := v.c.Post().Resource(snapPath).Body(request).Do().Unmarshal(response); err != nil {
return "", err
}
// TODO(pedge): this probably should not be embedded in this way
if response.VolumeCreateResponse != nil &&
response.VolumeCreateResponse.VolumeResponse != nil &&
response.VolumeCreateResponse.VolumeResponse.Error != "" {
return "", errors.New(
response.VolumeCreateResponse.VolumeResponse.Error)
}
if response.VolumeCreateResponse != nil {
return response.VolumeCreateResponse.Id, nil
}
return "", nil
}
// Restore specified volume to given snapshot state
func (v *volumeClient) Restore(volumeID string, snapID string) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(snapPath + "/restore").Instance(volumeID)
req.QueryOption(api.OptSnapID, snapID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Stats for specified volume.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) Stats(
volumeID string,
cumulative bool,
) (*api.Stats, error) {
stats := &api.Stats{}
req := v.c.Get().Resource(volumePath + "/stats").Instance(volumeID)
req.QueryOption(api.OptCumulative, strconv.FormatBool(cumulative))
err := req.Do().Unmarshal(stats)
return stats, err
}
// UsedSize returns allocated volume size.
// Errors ErrEnoEnt may be returned
func (v *volumeClient) UsedSize(
volumeID string,
) (uint64, error) {
var usedSize uint64
req := v.c.Get().Resource(volumePath + "/usedsize").Instance(volumeID)
err := req.Do().Unmarshal(&usedSize)
return usedSize, err
}
// Active Requests on all volume.
func (v *volumeClient) GetActiveRequests() (*api.ActiveRequests, error) {
requests := &api.ActiveRequests{}
resp := v.c.Get().Resource(volumePath + "/requests").Instance("vol_id").Do()
if resp.Error() != nil {
return nil, resp.FormatError()
}
if err := resp.Unmarshal(requests); err != nil {
return nil, err
}
return requests, nil
}
// Shutdown and cleanup.
func (v *volumeClient) Shutdown() {}
// Enumerate volumes that map to the volumeLocator. Locator fields may be regexp.
// If locator fields are left blank, this will return all volumes.
func (v *volumeClient) Enumerate(locator *api.VolumeLocator,
labels map[string]string) ([]*api.Volume, error) {
var volumes []*api.Volume
req := v.c.Get().Resource(volumePath)
if locator.Name != "" {
req.QueryOption(api.OptName, locator.Name)
}
if len(locator.VolumeLabels) != 0 {
req.QueryOptionLabel(api.OptLabel, locator.VolumeLabels)
}
if len(labels) != 0 {
req.QueryOptionLabel(api.OptConfigLabel, labels)
}
resp := req.Do()
if resp.Error() != nil {
return nil, resp.FormatError()
}
if err := resp.Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Enumerate snaps for specified volume
// Count indicates the number of snaps populated.
func (v *volumeClient) SnapEnumerate(ids []string,
snapLabels map[string]string) ([]*api.Volume, error) {
var volumes []*api.Volume
request := v.c.Get().Resource(snapPath)
for _, id := range ids {
request.QueryOption(api.OptVolumeID, id)
}
if len(snapLabels) != 0 {
request.QueryOptionLabel(api.OptLabel, snapLabels)
}
if err := request.Do().Unmarshal(&volumes); err != nil {
return nil, err
}
return volumes, nil
}
// Attach map device to the host.
// On success the devicePath specifies location where the device is exported
// Errors ErrEnoEnt, ErrVolAttached may be returned.
func (v *volumeClient) Attach(volumeID string, attachOptions map[string]string) (string, error) {
response, err := v.doVolumeSetGetResponse(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON,
},
Options: attachOptions,
},
)
if err != nil {
return "", err
}
if response.Volume != nil {
if response.Volume.Spec.Encrypted {
return response.Volume.SecureDevicePath, nil
} else {
return response.Volume.DevicePath, nil
}
}
return "", nil
}
// Detach device from the host.
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Detach(volumeID string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF,
},
Options: options,
},
)
}
func (v *volumeClient) MountedAt(mountPath string) string {
return ""
}
// Mount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Mount(volumeID string, mountPath string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON,
MountPath: mountPath,
},
Options: options,
},
)
}
// Unmount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
func (v *volumeClient) Unmount(volumeID string, mountPath string, options map[string]string) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Action: &api.VolumeStateAction{
Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF,
MountPath: mountPath,
},
Options: options,
},
)
}
// Update volume
func (v *volumeClient) Set(volumeID string, locator *api.VolumeLocator,
spec *api.VolumeSpec) error {
return v.doVolumeSet(
volumeID,
&api.VolumeSetRequest{
Locator: locator,
Spec: spec,
},
)
}
func (v *volumeClient) doVolumeSet(volumeID string,
request *api.VolumeSetRequest) error {
_, err := v.doVolumeSetGetResponse(volumeID, request)
return err
}
func (v *volumeClient) doVolumeSetGetResponse(volumeID string,
request *api.VolumeSetRequest) (*api.VolumeSetResponse, error) {
response := &api.VolumeSetResponse{}
if err := v.c.Put().Resource(volumePath).Instance(volumeID).Body(request).Do().Unmarshal(response); err != nil {
return nil, err
}
if response.VolumeResponse != nil && response.VolumeResponse.Error != "" {
return nil, errors.New(response.VolumeResponse.Error)
}
return response, nil
}
// Quiesce quiesces volume i/o
func (v *volumeClient) Quiesce(
volumeID string,
timeoutSec uint64,
quiesceID string,
) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(volumePath + "/quiesce").Instance(volumeID)
req.QueryOption(api.OptTimeoutSec, strconv.FormatUint(timeoutSec, 10))
req.QueryOption(api.OptQuiesceID, quiesceID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
// Unquiesce un-quiesces volume i/o
func (v *volumeClient) Unquiesce(volumeID string) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(volumePath + "/unquiesce").Instance(volumeID)
if err := req.Do().Unmarshal(response); err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
func (v *volumeClient) CredsEnumerate() (map[string]interface{}, error) {
creds := make(map[string]interface{}, 0)
err := v.c.Get().Resource(credsPath).Do().Unmarshal(&creds)
return creds, err
}
func (v *volumeClient) CredsCreate(params map[string]string) (string, error) {
response := api.CredCreateResponse{}
request := &api.CredCreateRequest{
InputParams: params,
}
err := v.c.Post().Resource(credsPath).Body(request).Do().Unmarshal(&response)
if err == nil {
if response.CredErr != "" {
err = errors.New(response.CredErr)
}
}
return response.UUID, err
}
func (v *volumeClient) CredsDelete(uuid string) error {
response := &api.VolumeResponse{}
req := v.c.Delete().Resource(credsPath).Instance(uuid)
err := req.Do().Unmarshal(&response)
if err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
func (v *volumeClient) CredsValidate(uuid string) error {
response := &api.VolumeResponse{}
req := v.c.Post().Resource(credsPath + "/validate").Instance(uuid)
err := req.Do().Unmarshal(&response)
if err != nil {
return err
}
if response.Error != "" {
return errors.New(response.Error)
}
return nil
}
| 1 | 6,419 | Remove this file from the PR | libopenstorage-openstorage | go |
@@ -35,9 +35,9 @@ namespace Nethermind.Precompiles.Benchmark
// BenchmarkRunner.Run<Bn256MulBenchmark>();
// BenchmarkRunner.Run<Bn256PairingBenchmark>();
BenchmarkRunner.Run<ModExp2565Benchmark>();
- BenchmarkRunner.Run<ModExpBenchmark>();
+ // BenchmarkRunner.Run<ModExpBenchmark>();
Console.ReadLine();
}
#endif
}
-}
+} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using BenchmarkDotNet.Configs;
using BenchmarkDotNet.Running;
namespace Nethermind.Precompiles.Benchmark
{
public class Program
{
public static void Main(string[] args)
#if DEBUG
=> BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args, new DebugInProcessConfig());
#else
{
// BenchmarkRunner.Run<Sha256Benchmark>();
// BenchmarkRunner.Run<RipEmdBenchmark>();
// BenchmarkRunner.Run<Blake2fBenchmark>();
// BenchmarkRunner.Run<KeccakBenchmark>();
// BenchmarkRunner.Run<Bn256AddBenchmark>();
// BenchmarkRunner.Run<Bn256MulBenchmark>();
// BenchmarkRunner.Run<Bn256PairingBenchmark>();
BenchmarkRunner.Run<ModExp2565Benchmark>();
BenchmarkRunner.Run<ModExpBenchmark>();
Console.ReadLine();
}
#endif
}
} | 1 | 25,166 | I cannot find the changes / results of the eip2565 benchmark | NethermindEth-nethermind | .cs |
@@ -151,7 +151,7 @@ const withData = (
'googlesitekit.moduleDataReset',
'googlesitekit.moduleDataResetHandler',
() => {
- this.setState( { data: false } );
+ this.setState( { data: false, zeroData: false } );
}
);
| 1 | /**
* withData higher-order component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { each } from 'lodash';
/**
* WordPress dependencies
*/
import { addFilter, addAction } from '@wordpress/hooks';
import { Component } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import { getModulesData } from '../../util';
import getNoDataComponent from '../notifications/nodata';
import getDataErrorComponent from '../notifications/data-error';
import getSetupIncompleteComponent from '../notifications/setup-incomplete';
/**
* A Higher order Component that provides data functionality to Components.
*
* This function takes a React Component that is data dependent, resolving via the data API.
*
* Automatically detects data errors, displaying an error CTA Component. Components can extend the default
* error handling to enable custom error messaging or data shapes.
*
* Components can provide a callback that checks if the data is "zero" - typically when an account is newly established and not yet providing data. In most cases the API returns all 0s, however some APIs may return empty strings or null.
*
* Components can optionally include `handleDataError` and `handleDataSuccess` function as props. `handleDataError` will be
* called with the error message string if there is a data error and called with no string if the data is empty.
* `handleDataSuccess` will be called when data resolves correctly.
*
* @param {WPElement} DataDependentComponent The React Component to render once we have its required data.
* @param {Array} selectData An array of data objects to resolve.
* Each object includes the following properties:
* {string} type The data type. Either 'core' or 'modules'.
* {string} identifier The data identifier, for example a module slug.
* {string} datapoint The datapoint.
* {Object?} data Optional arguments to pass along.
* {number} priority The data request priority, used for batching.
* {number} maxAge How long to cache the data results.
* {string | array} context The context(s) to resolve data, eg 'Dashboard'.
*
* @param {WPElement} loadingComponent A React Component to render while the data is resolving.
* @param {Object} layoutOptions An object with layout options that are passed to the getNoDataComponent and getDataErrorComponent components.
* @param {Function} isDataZero A callback function that is passed the resolved data and returns true
* if the data is "zero".
* @param {Function} getDataError A callback function that is passed the resolved data and returns the
* error message.
*
* @return {WPElement} Component Returns React.Components based on data and state.
* If has data Return DataDependentComponent with data.
* has no data Fallback message when no data.
* in loading state Return loadingComponent.
* has an error Returns error.
*
*/
const withData = (
DataDependentComponent,
selectData,
loadingComponent = null,
layoutOptions = {
inGrid: false,
fullWidth: false,
createGrid: false,
},
// The default isDataZero handler always returns false, Components must define when data is zero.
// `isDataZero` is passed `returnedData`and `datapoint`.
isDataZero = () => {
return false;
},
// The default getDataError handler detects data.error and extracts the message from data.error.message or data.error.errors[0].message.
getDataError = ( data ) => {
if ( data && data.error ) {
if ( data.error.message ) {
return data.error.message;
}
if ( data.error.errors && data.error.errors[ 0 ] && data.error.errors[ 0 ].message ) {
return data.error.errors[ 0 ].message;
}
return __( 'Unidentified error', 'google-site-kit' );
}
if ( data && data.errors && data.errors[ 0 ] && data.errors[ 0 ].message ) {
return data.errors[ 0 ].message;
}
if ( data && data.error_data ) {
const errors = Object.values( data.error_data );
// Catch RateLimitExceeded specifically.
if ( errors[ 0 ] && 'RateLimitExceeded' === errors[ 0 ].reason ) {
return __( 'Too many requests have been sent within a given time span. Please reload this page again in a few seconds', 'google-site-kit' );
}
}
if ( data && data.errors ) {
const errors = Object.values( data.errors );
if ( errors[ 0 ] && errors[ 0 ][ 0 ] ) {
return errors[ 0 ][ 0 ];
}
}
// If error is the root of the response, ensure all expected parts are
// present, just to "be sure" that it is an error. All above error
// handlers are legacy and are likely never hit, but let's keep them
// because nobody will ever know.
if ( data.code && data.message && data.data && data.data.status ) {
return data.message;
}
// No error.
return false;
}
) => {
// ...and returns another component...
return class NewComponent extends Component {
constructor( props ) {
super( props );
this.state = {
data: false,
zeroData: false,
error: false,
};
addAction(
'googlesitekit.moduleDataReset',
'googlesitekit.moduleDataResetHandler',
() => {
this.setState( { data: false } );
}
);
/**
* Handle a single datapoint returned from the data API.
*
* Each resolved data point is passed thru this handler to detect errors and zero data conditions, and
* to trigger `handleDataError` and `handleDataSuccess` helpers.
*
* @param {Object} returnedData The data returned from the API.
* @param {Object} requestData The data object for the request.
*/
const handleReturnedData = ( returnedData, requestData ) => {
// If available, `handleDataError` will be called for errors (with a string) and empty data.
const {
handleDataError,
handleDataSuccess,
} = this.props;
const { datapoint, identifier, toState } = requestData;
// Check to see if the returned data is an error. If so, getDataError will return a string.
const error = getDataError( returnedData );
if ( error ) {
// Set an error state on the Component.
this.setState( {
error,
module: identifier,
} );
// If the Component included a `handleDataError` helper, pass it the error message.
if ( handleDataError ) {
handleDataError( error, returnedData );
}
} else if ( isDataZero( returnedData, datapoint, requestData ) ) { // No data error, next check for zero data.
// If we have a `handleDataError` call it without any parameters (indicating empty data).
if ( handleDataError ) {
handleDataError( error, returnedData );
}
// Set a zeroData state on the Component.
this.setState( { zeroData: true } );
} else if ( handleDataSuccess ) {
// Success! `handleDataSuccess` will be called (ie. not error or zero).
handleDataSuccess();
}
// Resolve the returned data my setting state on the Component.
this.setState( {
requestDataToState: toState,
data: returnedData,
datapoint,
module: identifier,
} );
};
// Resolve all selectedData.
each( selectData, ( data ) => {
// Handle single contexts, or arrays of contexts.
if ( Array.isArray( data.context ) ) {
each( data.context, ( acontext ) => {
/**
* Request data for the context.
*/
addFilter( `googlesitekit.module${ acontext }DataRequest`,
`googlesitekit.data${ acontext }`, ( moduleData ) => {
data.callback = ( returnedData ) => {
handleReturnedData( returnedData, data );
};
moduleData.push( data );
return moduleData;
} );
} );
} else {
/**
* Request data for the context.
*/
addFilter( `googlesitekit.module${ data.context }DataRequest`,
`googlesitekit.data${ data.context }`, ( moduleData ) => {
data.callback = ( returnedData ) => {
handleReturnedData( returnedData, data );
};
moduleData.push( data );
return moduleData;
} );
}
} );
}
render() {
const {
data,
datapoint,
module,
zeroData,
error,
requestDataToState,
} = this.state;
// Render the loading component until we have data.
if ( ! data ) {
return loadingComponent;
}
const modulesData = getModulesData();
const moduleName = module ? modulesData[ module ].name : __( 'Site Kit', 'google-site-kit' );
// If module is active but setup not complete.
if ( module && modulesData[ module ].active && ! modulesData[ module ].setupComplete ) {
return getSetupIncompleteComponent( module, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
}
// If we have an error, display the DataErrorComponent.
if ( error ) {
return ( 'string' !== typeof error ) ? error : getDataErrorComponent( moduleName, error, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid, data );
}
// If we have zeroData, display the NoDataComponent.
if ( zeroData ) {
return getNoDataComponent( moduleName, layoutOptions.inGrid, layoutOptions.fullWidth, layoutOptions.createGrid );
}
// Render the Component when we have data, passing the datapoint.
return (
<DataDependentComponent
data={ data }
datapoint={ datapoint }
requestDataToState={ requestDataToState }
{ ...this.props }
/>
);
}
};
};
export default withData;
| 1 | 31,236 | @tofumatt now that I see this in context, we're now resetting 2/3 of the keys in the `NewComponent` state - the other being `error`. It seems that `error` should probably also be reset - essentially resetting all of the component state when the date range changes. What do you think? cc: @felixarntz @adamsilverstein | google-site-kit-wp | js |
@@ -32,7 +32,11 @@ namespace OpenTelemetry.Trace.Export
public override Task ShutdownAsync(CancellationToken cancellationToken)
{
+#if NET452
+ return Task.FromResult(0);
+#else
return Task.CompletedTask;
+#endif
}
}
} | 1 | // <copyright file="NoopActivityProcessor.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
namespace OpenTelemetry.Trace.Export
{
internal sealed class NoopActivityProcessor : ActivityProcessor
{
public override void OnStart(Activity activity)
{
}
public override void OnEnd(Activity activity)
{
}
public override Task ShutdownAsync(CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
}
}
| 1 | 14,202 | This one and the ones below are for shutdowns but if it appears in some method called frequently during the lifetime of the process it should be cached in a static. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -5540,7 +5540,9 @@ describe('Model', function() {
return co(function*() {
const createdUser = yield User.create({ name: 'Hafez' });
- let threw = false;
+
+ let err;
+
try {
yield User.bulkWrite([{
updateOne: { | 1 | 'use strict';
/**
* Test dependencies.
*/
const start = require('./common');
const assert = require('assert');
const co = require('co');
const random = require('../lib/utils').random;
const util = require('./util');
const Buffer = require('safe-buffer').Buffer;
const mongoose = start.mongoose;
const Schema = mongoose.Schema;
const ValidatorError = mongoose.Error.ValidatorError;
const ValidationError = mongoose.Error.ValidationError;
const ObjectId = Schema.Types.ObjectId;
const DocumentObjectId = mongoose.Types.ObjectId;
const EmbeddedDocument = mongoose.Types.Embedded;
const MongooseError = mongoose.Error;
describe('Model', function() {
let db;
let Comments;
let BlogPost;
beforeEach(() => db.deleteModel(/.*/));
beforeEach(function() {
Comments = new Schema;
Comments.add({
title: String,
date: Date,
body: String,
comments: [Comments]
});
BlogPost = new Schema({
title: String,
author: String,
slug: String,
date: Date,
meta: {
date: Date,
visitors: Number
},
published: Boolean,
mixed: {},
numbers: [Number],
owners: [ObjectId],
comments: [Comments],
nested: { array: [Number] }
});
BlogPost
.virtual('titleWithAuthor')
.get(function() {
return this.get('title') + ' by ' + this.get('author');
})
.set(function(val) {
const split = val.split(' by ');
this.set('title', split[0]);
this.set('author', split[1]);
});
BlogPost.method('cool', function() {
return this;
});
BlogPost.static('woot', function() {
return this;
});
BlogPost = db.model('BlogPost', BlogPost);
});
before(function() {
db = start();
});
after(function() {
db.close();
});
afterEach(() => util.clearTestData(db));
it('can be created using _id as embedded document', function(done) {
const Test = db.model('Test', Schema({
_id: { first_name: String, age: Number },
last_name: String,
doc_embed: {
some: String
}
}));
const t = new Test({
_id: {
first_name: 'Daniel',
age: 21
},
last_name: 'Alabi',
doc_embed: {
some: 'a'
}
});
t.save(function(err) {
assert.ifError(err);
Test.findOne({}, function(err, doc) {
assert.ifError(err);
assert.ok('last_name' in doc);
assert.ok('_id' in doc);
assert.ok('first_name' in doc._id);
assert.equal(doc._id.first_name, 'Daniel');
assert.ok('age' in doc._id);
assert.equal(doc._id.age, 21);
assert.ok('doc_embed' in doc);
assert.ok('some' in doc.doc_embed);
assert.equal(doc.doc_embed.some, 'a');
done();
});
});
});
describe('constructor', function() {
it('works without "new" keyword', function(done) {
const B = BlogPost;
let b = B();
assert.ok(b instanceof B);
b = B();
assert.ok(b instanceof B);
done();
});
it('works "new" keyword', function(done) {
const B = BlogPost;
let b = new B();
assert.ok(b instanceof B);
b = new B();
assert.ok(b instanceof B);
done();
});
});
describe('isNew', function() {
it('is true on instantiation', function(done) {
const post = new BlogPost;
assert.equal(post.isNew, true);
done();
});
});
it('gh-2140', function(done) {
db.deleteModel(/Test/);
const S = new Schema({
field: [{ text: String }]
});
const Model = db.model('Test', S);
const s = new Model();
s.field = [null];
s.field = [{ text: 'text' }];
assert.ok(s.field[0]);
done();
});
describe('schema', function() {
it('should exist', function(done) {
assert.ok(BlogPost.schema instanceof Schema);
assert.ok(BlogPost.prototype.schema instanceof Schema);
done();
});
it('emits init event', function(done) {
const schema = new Schema({ name: String });
let model;
schema.on('init', function(model_) {
model = model_;
});
db.deleteModel(/Test/);
const Named = db.model('Test', schema);
assert.equal(model, Named);
done();
});
});
describe('structure', function() {
it('default when instantiated', function(done) {
const post = new BlogPost;
assert.equal(post.db.model('BlogPost').modelName, 'BlogPost');
assert.equal(post.constructor.modelName, 'BlogPost');
assert.ok(post.get('_id') instanceof DocumentObjectId);
assert.equal(post.get('title'), undefined);
assert.equal(post.get('slug'), undefined);
assert.equal(post.get('date'), undefined);
assert.equal(typeof post.get('meta'), 'object');
assert.deepEqual(post.get('meta'), {});
assert.equal(post.get('meta.date'), undefined);
assert.equal(post.get('meta.visitors'), undefined);
assert.equal(post.get('published'), undefined);
assert.equal(Object.keys(post.get('nested')).length, 1);
assert.ok(Array.isArray(post.get('nested').array));
assert.ok(post.get('numbers').isMongooseArray);
assert.ok(post.get('owners').isMongooseArray);
assert.ok(post.get('comments').isMongooseDocumentArray);
assert.ok(post.get('nested.array').isMongooseArray);
done();
});
describe('array', function() {
describe('defaults', function() {
it('to a non-empty array', function(done) {
const DefaultArraySchema = new Schema({
arr: { type: Array, cast: String, default: ['a', 'b', 'c'] },
single: { type: Array, cast: String, default: ['a'] }
});
const DefaultArray = db.model('Test', DefaultArraySchema);
const arr = new DefaultArray;
assert.equal(arr.get('arr').length, 3);
assert.equal(arr.get('arr')[0], 'a');
assert.equal(arr.get('arr')[1], 'b');
assert.equal(arr.get('arr')[2], 'c');
assert.equal(arr.get('single').length, 1);
assert.equal(arr.get('single')[0], 'a');
done();
});
it('empty', function(done) {
const DefaultZeroCardArraySchema = new Schema({
arr: { type: Array, cast: String, default: [] },
auto: [Number]
});
const DefaultZeroCardArray = db.model('Test', DefaultZeroCardArraySchema);
const arr = new DefaultZeroCardArray();
assert.equal(arr.get('arr').length, 0);
assert.equal(arr.arr.length, 0);
assert.equal(arr.auto.length, 0);
done();
});
});
});
it('a hash with one null value', function(done) {
const post = new BlogPost({
title: null
});
assert.strictEqual(null, post.title);
done();
});
it('when saved', function(done) {
let pending = 2;
function cb() {
if (--pending) {
return;
}
done();
}
const post = new BlogPost();
post.on('save', function(post) {
assert.ok(post.get('_id') instanceof DocumentObjectId);
assert.equal(post.get('title'), undefined);
assert.equal(post.get('slug'), undefined);
assert.equal(post.get('date'), undefined);
assert.equal(post.get('published'), undefined);
assert.equal(typeof post.get('meta'), 'object');
assert.deepEqual(post.get('meta'), {});
assert.equal(post.get('meta.date'), undefined);
assert.equal(post.get('meta.visitors'), undefined);
assert.ok(post.get('owners').isMongooseArray);
assert.ok(post.get('comments').isMongooseDocumentArray);
cb();
});
post.save(function(err, post) {
assert.ifError(err);
assert.ok(post.get('_id') instanceof DocumentObjectId);
assert.equal(post.get('title'), undefined);
assert.equal(post.get('slug'), undefined);
assert.equal(post.get('date'), undefined);
assert.equal(post.get('published'), undefined);
assert.equal(typeof post.get('meta'), 'object');
assert.deepEqual(post.get('meta'), {});
assert.equal(post.get('meta.date'), undefined);
assert.equal(post.get('meta.visitors'), undefined);
assert.ok(post.get('owners').isMongooseArray);
assert.ok(post.get('comments').isMongooseDocumentArray);
cb();
});
});
describe('init', function() {
it('works', function(done) {
const post = new BlogPost();
post.init({
title: 'Test',
slug: 'test',
date: new Date,
meta: {
date: new Date,
visitors: 5
},
published: true,
owners: [new DocumentObjectId, new DocumentObjectId],
comments: [
{ title: 'Test', date: new Date, body: 'Test' },
{ title: 'Super', date: new Date, body: 'Cool' }
]
});
assert.equal(post.get('title'), 'Test');
assert.equal(post.get('slug'), 'test');
assert.ok(post.get('date') instanceof Date);
assert.equal(typeof post.get('meta'), 'object');
assert.ok(post.get('meta').date instanceof Date);
assert.equal(typeof post.get('meta').visitors, 'number');
assert.equal(post.get('published'), true);
assert.equal(post.title, 'Test');
assert.equal(post.slug, 'test');
assert.ok(post.date instanceof Date);
assert.equal(typeof post.meta, 'object');
assert.ok(post.meta.date instanceof Date);
assert.equal(typeof post.meta.visitors, 'number');
assert.equal(post.published, true);
assert.ok(post.get('owners').isMongooseArray);
assert.ok(post.get('owners')[0] instanceof DocumentObjectId);
assert.ok(post.get('owners')[1] instanceof DocumentObjectId);
assert.ok(post.owners.isMongooseArray);
assert.ok(post.owners[0] instanceof DocumentObjectId);
assert.ok(post.owners[1] instanceof DocumentObjectId);
assert.ok(post.get('comments').isMongooseDocumentArray);
assert.ok(post.get('comments')[0] instanceof EmbeddedDocument);
assert.ok(post.get('comments')[1] instanceof EmbeddedDocument);
assert.ok(post.comments.isMongooseDocumentArray);
assert.ok(post.comments[0] instanceof EmbeddedDocument);
assert.ok(post.comments[1] instanceof EmbeddedDocument);
done();
});
it('partially', function(done) {
const post = new BlogPost;
post.init({
title: 'Test',
slug: 'test',
date: new Date
});
assert.equal(post.get('title'), 'Test');
assert.equal(post.get('slug'), 'test');
assert.ok(post.get('date') instanceof Date);
assert.equal(typeof post.get('meta'), 'object');
assert.deepEqual(post.get('meta'), {});
assert.equal(post.get('meta.date'), undefined);
assert.equal(post.get('meta.visitors'), undefined);
assert.equal(post.get('published'), undefined);
assert.ok(post.get('owners').isMongooseArray);
assert.ok(post.get('comments').isMongooseDocumentArray);
done();
});
it('with partial hash', function(done) {
const post = new BlogPost({
meta: {
date: new Date,
visitors: 5
}
});
assert.equal(post.get('meta.visitors').valueOf(), 5);
done();
});
it('isNew on embedded documents', function(done) {
const post = new BlogPost();
post.init({
title: 'Test',
slug: 'test',
comments: [{ title: 'Test', date: new Date, body: 'Test' }]
});
assert.equal(post.get('comments')[0].isNew, false);
done();
});
it('isNew on embedded documents after saving', function(done) {
const post = new BlogPost({ title: 'hocus pocus' });
post.comments.push({ title: 'Humpty Dumpty', comments: [{ title: 'nested' }] });
assert.equal(post.get('comments')[0].isNew, true);
assert.equal(post.get('comments')[0].comments[0].isNew, true);
post.invalidate('title'); // force error
post.save(function() {
assert.equal(post.isNew, true);
assert.equal(post.get('comments')[0].isNew, true);
assert.equal(post.get('comments')[0].comments[0].isNew, true);
post.save(function(err) {
assert.strictEqual(null, err);
assert.equal(post.isNew, false);
assert.equal(post.get('comments')[0].isNew, false);
assert.equal(post.get('comments')[0].comments[0].isNew, false);
done();
});
});
});
});
});
it('collection name can be specified through schema', function(done) {
const schema = new Schema({ name: String }, { collection: 'tests' });
const Named = mongoose.model('CollectionNamedInSchema1', schema);
assert.equal(Named.prototype.collection.name, 'tests');
const users2schema = new Schema({ name: String }, { collection: 'tests' });
const Named2 = db.model('FooBar', users2schema);
assert.equal(Named2.prototype.collection.name, 'tests');
done();
});
it('saving a model with a null value should perpetuate that null value to the db', function(done) {
const post = new BlogPost({
title: null
});
assert.strictEqual(null, post.title);
post.save(function(err) {
assert.strictEqual(err, null);
BlogPost.findById(post.id, function(err, found) {
assert.strictEqual(err, null);
assert.strictEqual(found.title, null);
done();
});
});
});
it('saves subdocuments middleware correctly', function(done) {
let child_hook;
let parent_hook;
const childSchema = new Schema({
name: String
});
childSchema.pre('save', function(next) {
child_hook = this.name;
next();
});
const parentSchema = new Schema({
name: String,
children: [childSchema]
});
parentSchema.pre('save', function(next) {
parent_hook = this.name;
next();
});
const Parent = db.model('Parent', parentSchema);
const parent = new Parent({
name: 'Bob',
children: [{
name: 'Mary'
}]
});
parent.save(function(err, parent) {
assert.equal(parent_hook, 'Bob');
assert.equal(child_hook, 'Mary');
assert.ifError(err);
parent.children[0].name = 'Jane';
parent.save(function(err) {
assert.equal(child_hook, 'Jane');
assert.ifError(err);
done();
});
});
});
it('instantiating a model with a hash that maps to at least 1 undefined value', function(done) {
const post = new BlogPost({
title: undefined
});
assert.strictEqual(undefined, post.title);
post.save(function(err) {
assert.strictEqual(null, err);
BlogPost.findById(post.id, function(err, found) {
assert.strictEqual(err, null);
assert.strictEqual(found.title, undefined);
done();
});
});
});
it('modified nested objects which contain MongoseNumbers should not cause a RangeError on save (gh-714)', function(done) {
const schema = new Schema({
nested: {
num: Number
}
});
const M = db.model('Test', schema);
const m = new M;
m.nested = null;
m.save(function(err) {
assert.ifError(err);
M.findById(m, function(err, m) {
assert.ifError(err);
m.nested.num = 5;
m.save(function(err) {
assert.ifError(err);
done();
});
});
});
});
it('no RangeError on remove() of a doc with Number _id (gh-714)', function(done) {
const MySchema = new Schema({
_id: { type: Number },
name: String
});
const MyModel = db.model('Test', MySchema);
const instance = new MyModel({
name: 'test',
_id: 35
});
instance.save(function(err) {
assert.ifError(err);
MyModel.findById(35, function(err, doc) {
assert.ifError(err);
doc.remove({}, function(err) {
assert.ifError(err);
done();
});
});
});
});
it('over-writing a number should persist to the db (gh-342)', function(done) {
const post = new BlogPost({
meta: {
date: new Date,
visitors: 10
}
});
post.save(function(err) {
assert.ifError(err);
post.set('meta.visitors', 20);
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.id, function(err, found) {
assert.ifError(err);
assert.equal(found.get('meta.visitors').valueOf(), 20);
done();
});
});
});
});
describe('methods', function() {
it('can be defined', function(done) {
const post = new BlogPost();
assert.equal(post.cool(), post);
done();
});
it('can be defined on embedded documents', function(done) {
const ChildSchema = new Schema({ name: String });
ChildSchema.method('talk', function() {
return 'gaga';
});
const ParentSchema = new Schema({
children: [ChildSchema]
});
const ChildA = db.model('Child', ChildSchema);
const ParentA = db.model('Parent', ParentSchema);
const c = new ChildA;
assert.equal(typeof c.talk, 'function');
const p = new ParentA();
p.children.push({});
assert.equal(typeof p.children[0].talk, 'function');
done();
});
it('can be defined with nested key', function(done) {
const NestedKeySchema = new Schema({});
NestedKeySchema.method('foo', {
bar: function() {
return this;
}
});
const NestedKey = db.model('Test', NestedKeySchema);
const n = new NestedKey();
assert.equal(n.foo.bar(), n);
done();
});
});
describe('statics', function() {
it('can be defined', function(done) {
assert.equal(BlogPost.woot(), BlogPost);
done();
});
});
describe('casting as validation errors', function() {
it('error', function(done) {
let threw = false;
let post;
try {
post = new BlogPost({ date: 'Test', meta: { date: 'Test' } });
} catch (e) {
threw = true;
}
assert.equal(threw, false);
try {
post.set('title', 'Test');
} catch (e) {
threw = true;
}
assert.equal(threw, false);
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.equal(Object.keys(err.errors).length, 2);
post.date = new Date;
post.meta.date = new Date;
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('nested error', function(done) {
let threw = false;
const post = new BlogPost;
try {
post.init({
meta: {
date: 'Test'
}
});
} catch (e) {
threw = true;
}
assert.equal(threw, false);
try {
post.set('meta.date', 'Test');
} catch (e) {
threw = true;
}
assert.equal(threw, false);
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
done();
});
});
it('subdocument cast error', function(done) {
const post = new BlogPost({
title: 'Test',
slug: 'test',
comments: [{ title: 'Test', date: new Date, body: 'Test' }]
});
post.get('comments')[0].set('date', 'invalid');
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
done();
});
});
it('subdocument validation error', function(done) {
function failingvalidator() {
return false;
}
db.deleteModel(/BlogPost/);
const subs = new Schema({
str: {
type: String, validate: failingvalidator
}
});
const BlogPost = db.model('BlogPost', { subs: [subs] });
const post = new BlogPost();
post.init({
subs: [{ str: 'gaga' }]
});
post.save(function(err) {
assert.ok(err instanceof ValidationError);
done();
});
});
it('subdocument error when adding a subdoc', function(done) {
let threw = false;
const post = new BlogPost();
try {
post.get('comments').push({
date: 'Bad date'
});
} catch (e) {
threw = true;
}
assert.equal(threw, false);
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
done();
});
});
it('updates', function(done) {
const post = new BlogPost();
post.set('title', '1');
const id = post.get('_id');
post.save(function(err) {
assert.ifError(err);
BlogPost.updateOne({ title: 1, _id: id }, { title: 2 }, function(err) {
assert.ifError(err);
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.get('title'), '2');
done();
});
});
});
});
it('$pull', function(done) {
const post = new BlogPost();
post.get('numbers').push('3');
assert.equal(post.get('numbers')[0], 3);
done();
});
it('$push', function(done) {
const post = new BlogPost();
post.get('numbers').push(1, 2, 3, 4);
post.save(function() {
BlogPost.findById(post.get('_id'), function(err, found) {
assert.equal(found.get('numbers').length, 4);
found.get('numbers').pull('3');
found.save(function() {
BlogPost.findById(found.get('_id'), function(err, found2) {
assert.ifError(err);
assert.equal(found2.get('numbers').length, 3);
done();
});
});
});
});
});
it('Number arrays', function(done) {
const post = new BlogPost();
post.numbers.push(1, '2', 3);
post.save(function(err) {
assert.strictEqual(err, null);
BlogPost.findById(post._id, function(err, doc) {
assert.ifError(err);
assert.ok(~doc.numbers.indexOf(1));
assert.ok(~doc.numbers.indexOf(2));
assert.ok(~doc.numbers.indexOf(3));
done();
});
});
});
it('date casting compat with datejs (gh-502)', function(done) {
Date.prototype.toObject = function() {
return {
millisecond: 86,
second: 42,
minute: 47,
hour: 17,
day: 13,
week: 50,
month: 11,
year: 2011
};
};
const S = new Schema({
name: String,
description: String,
sabreId: String,
data: {
lastPrice: Number,
comm: String,
curr: String,
rateName: String
},
created: { type: Date, default: Date.now },
valid: { type: Boolean, default: true }
});
const M = db.model('Test', S);
const m = new M;
m.save(function(err) {
assert.ifError(err);
M.findById(m._id, function(err, m) {
assert.ifError(err);
m.save(function(err) {
assert.ifError(err);
M.deleteOne({}, function(err) {
delete Date.prototype.toObject;
assert.ifError(err);
done();
});
});
});
});
});
});
describe('validation', function() {
it('works', function(done) {
function dovalidate() {
assert.equal(this.asyncScope, 'correct');
return true;
}
function dovalidateAsync() {
assert.equal(this.scope, 'correct');
return global.Promise.resolve(true);
}
const TestValidation = db.model('Test', new Schema({
simple: { type: String, required: true },
scope: { type: String, validate: [dovalidate, 'scope failed'], required: true },
asyncScope: { type: String, validate: [dovalidateAsync, 'async scope failed'], required: true }
}));
const post = new TestValidation();
post.set('simple', '');
post.set('scope', 'correct');
post.set('asyncScope', 'correct');
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
post.set('simple', 'here');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('custom messaging', function(done) {
function validate(val) {
return val === 'abc';
}
const TestValidationMessage = db.model('Test', new Schema({
simple: { type: String, validate: [validate, 'must be abc'] }
}));
const post = new TestValidationMessage();
post.set('simple', '');
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.ok(err.errors.simple instanceof ValidatorError);
assert.equal(err.errors.simple.message, 'must be abc');
assert.equal(post.errors.simple.message, 'must be abc');
post.set('simple', 'abc');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('with Model.schema.path introspection (gh-272)', function(done) {
const IntrospectionValidationSchema = new Schema({
name: String
});
const IntrospectionValidation = db.model('Test', IntrospectionValidationSchema);
IntrospectionValidation.schema.path('name').validate(function(value) {
return value.length < 2;
}, 'Name cannot be greater than 1 character for path "{PATH}" with value `{VALUE}`');
const doc = new IntrospectionValidation({ name: 'hi' });
doc.save(function(err) {
assert.equal(err.errors.name.message, 'Name cannot be greater than 1 character for path "name" with value `hi`');
assert.equal(err.name, 'ValidationError');
assert.ok(err.message.indexOf('Test validation failed') !== -1, err.message);
done();
});
});
it('of required undefined values', function(done) {
const TestUndefinedValidation = db.model('Test', new Schema({
simple: { type: String, required: true }
}));
const post = new TestUndefinedValidation;
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
post.set('simple', 'here');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('save callback should only execute once (gh-319)', function(done) {
const D = db.model('Test', new Schema({
username: { type: String, validate: /^[a-z]{6}$/i },
email: { type: String, validate: /^[a-z]{6}$/i },
password: { type: String, validate: /^[a-z]{6}$/i }
}));
const post = new D({
username: 'nope',
email: 'too',
password: 'short'
});
let timesCalled = 0;
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.equal(++timesCalled, 1);
assert.equal(Object.keys(err.errors).length, 3);
assert.ok(err.errors.password instanceof ValidatorError);
assert.ok(err.errors.email instanceof ValidatorError);
assert.ok(err.errors.username instanceof ValidatorError);
assert.equal(err.errors.password.message, 'Validator failed for path `password` with value `short`');
assert.equal(err.errors.email.message, 'Validator failed for path `email` with value `too`');
assert.equal(err.errors.username.message, 'Validator failed for path `username` with value `nope`');
assert.equal(Object.keys(post.errors).length, 3);
assert.ok(post.errors.password instanceof ValidatorError);
assert.ok(post.errors.email instanceof ValidatorError);
assert.ok(post.errors.username instanceof ValidatorError);
assert.equal(post.errors.password.message, 'Validator failed for path `password` with value `short`');
assert.equal(post.errors.email.message, 'Validator failed for path `email` with value `too`');
assert.equal(post.errors.username.message, 'Validator failed for path `username` with value `nope`');
done();
});
});
it('query result', function(done) {
const TestV = db.model('Test', new Schema({
resultv: { type: String, required: true }
}));
const post = new TestV;
post.validate(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
post.resultv = 'yeah';
post.save(function(err) {
assert.ifError(err);
TestV.findOne({ _id: post.id }, function(err, found) {
assert.ifError(err);
assert.equal(found.resultv, 'yeah');
found.save(function(err) {
assert.ifError(err);
done();
});
});
});
});
});
it('of required previously existing null values', function(done) {
const TestP = db.model('Test', new Schema({
previous: { type: String, required: true },
a: String
}));
TestP.collection.insertOne({ a: null, previous: null }, {}, function(err, f) {
assert.ifError(err);
TestP.findOne({ _id: f.ops[0]._id }, function(err, found) {
assert.ifError(err);
assert.equal(found.isNew, false);
assert.strictEqual(found.get('previous'), null);
found.validate(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
found.set('previous', 'yoyo');
found.save(function(err) {
assert.strictEqual(err, null);
done();
});
});
});
});
});
it('nested', function(done) {
const TestNestedValidation = db.model('Test', new Schema({
nested: {
required: { type: String, required: true }
}
}));
const post = new TestNestedValidation();
post.set('nested.required', null);
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
post.set('nested.required', 'here');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('of nested subdocuments', function(done) {
const Subsubdocs = new Schema({ required: { type: String, required: true } });
const Subdocs = new Schema({
required: { type: String, required: true },
subs: [Subsubdocs]
});
const TestSubdocumentsValidation = db.model('Test', new Schema({
items: [Subdocs]
}));
const post = new TestSubdocumentsValidation();
post.get('items').push({ required: '', subs: [{ required: '' }] });
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.ok(err.errors['items.0.subs.0.required'] instanceof ValidatorError);
assert.equal(err.errors['items.0.subs.0.required'].message, 'Path `required` is required.');
assert.ok(post.errors['items.0.subs.0.required'] instanceof ValidatorError);
assert.equal(post.errors['items.0.subs.0.required'].message, 'Path `required` is required.');
assert.ok(err.errors['items.0.required']);
assert.ok(post.errors['items.0.required']);
post.items[0].subs[0].set('required', true);
assert.equal(post.$__.validationError, undefined);
post.save(function(err) {
assert.ok(err);
assert.ok(err.errors);
assert.ok(err.errors['items.0.required'] instanceof ValidatorError);
assert.equal(err.errors['items.0.required'].message, 'Path `required` is required.');
assert.ok(!err.errors['items.0.subs.0.required']);
assert.ok(!err.errors['items.0.subs.0.required']);
assert.ok(!post.errors['items.0.subs.0.required']);
assert.ok(!post.errors['items.0.subs.0.required']);
post.get('items')[0].set('required', true);
post.save(function(err) {
assert.ok(!post.errors);
assert.ifError(err);
done();
});
});
});
});
it('without saving', function(done) {
const TestCallingValidation = db.model('Test', new Schema({
item: { type: String, required: true }
}));
const post = new TestCallingValidation;
assert.equal(post.schema.path('item').isRequired, true);
assert.strictEqual(post.isNew, true);
post.validate(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.strictEqual(post.isNew, true);
post.item = 'yo';
post.validate(function(err) {
assert.equal(err, null);
assert.strictEqual(post.isNew, true);
done();
});
});
});
it('when required is set to false', function(done) {
function validator() {
return true;
}
const TestV = db.model('Test', new Schema({
result: { type: String, validate: [validator, 'chump validator'], required: false }
}));
const post = new TestV;
assert.equal(post.schema.path('result').isRequired, false);
done();
});
describe('middleware', function() {
it('works', function(done) {
let ValidationMiddlewareSchema = null,
Post = null,
post = null;
ValidationMiddlewareSchema = new Schema({
baz: { type: String }
});
ValidationMiddlewareSchema.pre('validate', function(next) {
if (this.get('baz') === 'bad') {
this.invalidate('baz', 'bad');
}
next();
});
Post = db.model('Test', ValidationMiddlewareSchema);
post = new Post();
post.set({ baz: 'bad' });
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.equal(err.errors.baz.kind, 'user defined');
assert.equal(err.errors.baz.path, 'baz');
post.set('baz', 'good');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('async', function(done) {
let AsyncValidationMiddlewareSchema = null;
let Post = null;
let post = null;
AsyncValidationMiddlewareSchema = new Schema({
prop: { type: String }
});
AsyncValidationMiddlewareSchema.pre('validate', true, function(next, done) {
const _this = this;
setTimeout(function() {
if (_this.get('prop') === 'bad') {
_this.invalidate('prop', 'bad');
}
done();
}, 5);
next();
});
Post = db.model('Test', AsyncValidationMiddlewareSchema);
post = new Post();
post.set({ prop: 'bad' });
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.equal(err.errors.prop.kind, 'user defined');
assert.equal(err.errors.prop.path, 'prop');
post.set('prop', 'good');
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
it('complex', function(done) {
let ComplexValidationMiddlewareSchema = null;
let Post = null;
let post = null;
const abc = v => v === 'abc';
ComplexValidationMiddlewareSchema = new Schema({
baz: { type: String },
abc: { type: String, validate: [abc, 'must be abc'] },
test: { type: String, validate: [/test/, 'must also be abc'] },
required: { type: String, required: true }
});
ComplexValidationMiddlewareSchema.pre('validate', true, function(next, done) {
const _this = this;
setTimeout(function() {
if (_this.get('baz') === 'bad') {
_this.invalidate('baz', 'bad');
}
done();
}, 5);
next();
});
Post = db.model('Test', ComplexValidationMiddlewareSchema);
post = new Post();
post.set({
baz: 'bad',
abc: 'not abc',
test: 'fail'
});
post.save(function(err) {
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
assert.equal(Object.keys(err.errors).length, 4);
assert.ok(err.errors.baz instanceof ValidatorError);
assert.equal(err.errors.baz.kind, 'user defined');
assert.equal(err.errors.baz.path, 'baz');
assert.ok(err.errors.abc instanceof ValidatorError);
assert.equal(err.errors.abc.kind, 'user defined');
assert.equal(err.errors.abc.message, 'must be abc');
assert.equal(err.errors.abc.path, 'abc');
assert.ok(err.errors.test instanceof ValidatorError);
assert.equal(err.errors.test.message, 'must also be abc');
assert.equal(err.errors.test.kind, 'user defined');
assert.equal(err.errors.test.path, 'test');
assert.ok(err.errors.required instanceof ValidatorError);
assert.equal(err.errors.required.kind, 'required');
assert.equal(err.errors.required.path, 'required');
post.set({
baz: 'good',
abc: 'abc',
test: 'test',
required: 'here'
});
post.save(function(err) {
assert.ifError(err);
done();
});
});
});
});
});
describe('defaults application', function() {
it('works', function(done) {
const now = Date.now();
const TestDefaults = db.model('Test', new Schema({
date: { type: Date, default: now }
}));
const post = new TestDefaults;
assert.ok(post.get('date') instanceof Date);
assert.equal(+post.get('date'), now);
done();
});
it('nested', function(done) {
const now = Date.now();
const TestDefaults = db.model('Test', new Schema({
nested: {
date: { type: Date, default: now }
}
}));
const post = new TestDefaults();
assert.ok(post.get('nested.date') instanceof Date);
assert.equal(+post.get('nested.date'), now);
done();
});
it('subdocument', function(done) {
const now = Date.now();
const Items = new Schema({
date: { type: Date, default: now }
});
const TestSubdocumentsDefaults = db.model('Test', new Schema({
items: [Items]
}));
const post = new TestSubdocumentsDefaults();
post.get('items').push({});
assert.ok(post.get('items')[0].get('date') instanceof Date);
assert.equal(+post.get('items')[0].get('date'), now);
done();
});
it('allows nulls', function(done) {
const T = db.model('Test', new Schema({ name: { type: String, default: null } }));
const t = new T();
assert.strictEqual(null, t.name);
t.save(function(err) {
assert.ifError(err);
T.findById(t._id, function(err, t) {
assert.ifError(err);
assert.strictEqual(null, t.name);
done();
});
});
});
});
describe('virtuals', function() {
it('getters', function(done) {
const post = new BlogPost({
title: 'Letters from Earth',
author: 'Mark Twain'
});
assert.equal(post.get('titleWithAuthor'), 'Letters from Earth by Mark Twain');
assert.equal(post.titleWithAuthor, 'Letters from Earth by Mark Twain');
done();
});
it('set()', function(done) {
const post = new BlogPost();
post.set('titleWithAuthor', 'Huckleberry Finn by Mark Twain');
assert.equal(post.get('title'), 'Huckleberry Finn');
assert.equal(post.get('author'), 'Mark Twain');
done();
});
it('should not be saved to the db', function(done) {
const post = new BlogPost();
post.set('titleWithAuthor', 'Huckleberry Finn by Mark Twain');
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, found) {
assert.ifError(err);
assert.equal(found.get('title'), 'Huckleberry Finn');
assert.equal(found.get('author'), 'Mark Twain');
assert.ok(!('titleWithAuthor' in found.toObject()));
done();
});
});
});
it('nested', function(done) {
const PersonSchema = new Schema({
name: {
first: String,
last: String
}
});
PersonSchema.
virtual('name.full').
get(function() {
return this.get('name.first') + ' ' + this.get('name.last');
}).
set(function(fullName) {
const split = fullName.split(' ');
this.set('name.first', split[0]);
this.set('name.last', split[1]);
});
const Person = db.model('Person', PersonSchema);
const person = new Person({
name: {
first: 'Michael',
last: 'Sorrentino'
}
});
assert.equal(person.get('name.full'), 'Michael Sorrentino');
person.set('name.full', 'The Situation');
assert.equal(person.get('name.first'), 'The');
assert.equal(person.get('name.last'), 'Situation');
assert.equal(person.name.full, 'The Situation');
person.name.full = 'Michael Sorrentino';
assert.equal(person.name.first, 'Michael');
assert.equal(person.name.last, 'Sorrentino');
done();
});
});
describe('.remove()', function() {
it('works', function(done) {
BlogPost.create({ title: 1 }, { title: 2 }, function(err) {
assert.ifError(err);
BlogPost.remove({ title: 1 }, function(err) {
assert.ifError(err);
BlogPost.find({}, function(err, found) {
assert.ifError(err);
assert.equal(found.length, 1);
assert.equal(found[0].title, '2');
done();
});
});
});
});
it('errors when id deselected (gh-3118)', function(done) {
BlogPost.create({ title: 1 }, { title: 2 }, function(err) {
assert.ifError(err);
BlogPost.findOne({ title: 1 }, { _id: 0 }, function(error, doc) {
assert.ifError(error);
doc.remove(function(err) {
assert.ok(err);
assert.equal(err.message, 'No _id found on document!');
done();
});
});
});
});
it('should not remove any records when deleting by id undefined', function(done) {
BlogPost.create({ title: 1 }, { title: 2 }, function(err) {
assert.ifError(err);
BlogPost.remove({ _id: undefined }, function(err) {
assert.ifError(err);
BlogPost.find({}, function(err, found) {
assert.equal(found.length, 2, 'Should not remove any records');
done();
});
});
});
});
it('should not remove all documents in the collection (gh-3326)', function(done) {
BlogPost.create({ title: 1 }, { title: 2 }, function(err) {
assert.ifError(err);
BlogPost.findOne({ title: 1 }, function(error, doc) {
assert.ifError(error);
doc.remove(function(err) {
assert.ifError(err);
BlogPost.find(function(err, found) {
assert.ifError(err);
assert.equal(found.length, 1);
assert.equal(found[0].title, '2');
done();
});
});
});
});
});
});
describe('#remove()', function() {
it('passes the removed document (gh-1419)', function(done) {
BlogPost.create({}, function(err, post) {
assert.ifError(err);
BlogPost.findById(post, function(err, found) {
assert.ifError(err);
found.remove(function(err, doc) {
assert.ifError(err);
assert.ok(doc);
assert.ok(doc.equals(found));
done();
});
});
});
});
it('works as a promise', function(done) {
BlogPost.create({}, function(err, post) {
assert.ifError(err);
BlogPost.findById(post, function(err, found) {
assert.ifError(err);
found.remove().then(function(doc) {
assert.ok(doc);
assert.ok(doc.equals(found));
done();
}).catch(done);
});
});
});
it('works as a promise with a hook', function(done) {
let called = 0;
const RHS = new Schema({
name: String
});
RHS.pre('remove', function(next) {
called++;
return next();
});
const RH = db.model('Test', RHS);
RH.create({ name: 'to be removed' }, function(err, post) {
assert.ifError(err);
assert.ok(post);
RH.findById(post, function(err, found) {
assert.ifError(err);
assert.ok(found);
found.remove().then(function(doc) {
assert.ifError(err);
assert.equal(called, 1);
assert.ok(doc);
assert.ok(doc.equals(found));
done();
}).catch(done);
});
});
});
it('handles query vs document middleware (gh-3054)', function() {
const schema = new Schema({ name: String });
let docMiddleware = 0;
let queryMiddleware = 0;
schema.pre('remove', { query: true }, function() {
assert.ok(this instanceof Model.Query);
++queryMiddleware;
});
schema.pre('remove', { document: true }, function() {
assert.ok(this instanceof Model);
++docMiddleware;
});
const Model = db.model('Test', schema);
return co(function*() {
const doc = yield Model.create({ name: String });
assert.equal(docMiddleware, 0);
assert.equal(queryMiddleware, 0);
yield doc.remove();
assert.equal(docMiddleware, 1);
assert.equal(queryMiddleware, 0);
yield Model.remove({});
assert.equal(docMiddleware, 1);
assert.equal(queryMiddleware, 1);
});
});
describe('when called multiple times', function() {
it('always executes the passed callback gh-1210', function(done) {
const post = new BlogPost();
post.save(function(err) {
assert.ifError(err);
let pending = 2;
post.remove(function() {
if (--pending) {
return;
}
done();
});
post.remove(function() {
if (--pending) {
return;
}
done();
});
});
});
});
});
describe('getters', function() {
it('with same name on embedded docs do not class', function(done) {
const Post = new Schema({
title: String,
author: { name: String },
subject: { name: String }
});
db.deleteModel(/BlogPost/);
const PostModel = db.model('BlogPost', Post);
const post = new PostModel({
title: 'Test',
author: { name: 'A' },
subject: { name: 'B' }
});
assert.equal(post.author.name, 'A');
assert.equal(post.subject.name, 'B');
assert.equal(post.author.name, 'A');
done();
});
it('should not be triggered at construction (gh-685)', function(done) {
let called = false;
const schema = new mongoose.Schema({
number: {
type: Number,
set: function(x) {
return x / 2;
},
get: function(x) {
called = true;
return x * 2;
}
}
});
const A = db.model('Test', schema);
const a = new A({ number: 100 });
assert.equal(called, false);
let num = a.number;
assert.equal(called, true);
assert.equal(num.valueOf(), 100);
assert.equal(a.$__getValue('number').valueOf(), 50);
called = false;
const b = new A;
b.init({ number: 50 });
assert.equal(called, false);
num = b.number;
assert.equal(called, true);
assert.equal(num.valueOf(), 100);
assert.equal(b.$__getValue('number').valueOf(), 50);
done();
});
it('with type defined with { type: Native } (gh-190)', function(done) {
const schema = new Schema({ date: { type: Date } });
const ShortcutGetter = db.model('Test', schema);
const post = new ShortcutGetter();
post.set('date', Date.now());
assert.ok(post.date instanceof Date);
done();
});
describe('nested', function() {
it('works', function(done) {
const schema = new Schema({
first: {
second: [Number]
}
});
const ShortcutGetterNested = db.model('Test', schema);
const doc = new ShortcutGetterNested();
assert.equal(typeof doc.first, 'object');
assert.ok(doc.first.second.isMongooseArray);
done();
});
it('works with object literals', function(done) {
const date = new Date;
const meta = {
date: date,
visitors: 5
};
const post = new BlogPost();
post.init({
meta: meta
});
assert.ok(post.get('meta').date instanceof Date);
assert.ok(post.meta.date instanceof Date);
let threw = false;
let getter1;
let getter2;
try {
JSON.stringify(meta);
getter1 = JSON.stringify(post.get('meta'));
getter2 = JSON.stringify(post.meta);
} catch (err) {
threw = true;
}
assert.equal(threw, false);
getter1 = JSON.parse(getter1);
getter2 = JSON.parse(getter2);
assert.equal(getter1.visitors, 5);
assert.equal(getter2.visitors, 5);
assert.equal(getter1.date, getter2.date);
post.meta.date = new Date - 1000;
assert.ok(post.meta.date instanceof Date);
assert.ok(post.get('meta').date instanceof Date);
post.meta.visitors = 2;
assert.equal(typeof post.get('meta').visitors, 'number');
assert.equal(typeof post.meta.visitors, 'number');
const newmeta = {
date: date - 2000,
visitors: 234
};
post.set(newmeta, 'meta');
assert.ok(post.meta.date instanceof Date);
assert.ok(post.get('meta').date instanceof Date);
assert.equal(typeof post.meta.visitors, 'number');
assert.equal(typeof post.get('meta').visitors, 'number');
assert.equal((+post.meta.date), date - 2000);
assert.equal((+post.get('meta').date), date - 2000);
assert.equal((+post.meta.visitors), 234);
assert.equal((+post.get('meta').visitors), 234);
// set object directly
post.meta = {
date: date - 3000,
visitors: 4815162342
};
assert.ok(post.meta.date instanceof Date);
assert.ok(post.get('meta').date instanceof Date);
assert.equal(typeof post.meta.visitors, 'number');
assert.equal(typeof post.get('meta').visitors, 'number');
assert.equal((+post.meta.date), date - 3000);
assert.equal((+post.get('meta').date), date - 3000);
assert.equal((+post.meta.visitors), 4815162342);
assert.equal((+post.get('meta').visitors), 4815162342);
done();
});
it('object property access works when root initd with null', function(done) {
const schema = new Schema({
nest: {
st: String
}
});
const T = db.model('Test', schema);
const t = new T({ nest: null });
assert.strictEqual(t.nest.st, undefined);
t.nest = { st: 'jsconf rules' };
assert.deepEqual(t.nest.toObject(), { st: 'jsconf rules' });
assert.equal(t.nest.st, 'jsconf rules');
t.save(function(err) {
assert.ifError(err);
done();
});
});
it('object property access works when root initd with undefined', function(done) {
const schema = new Schema({
nest: {
st: String
}
});
const T = db.model('Test', schema);
const t = new T({ nest: undefined });
assert.strictEqual(t.nest.st, undefined);
t.nest = { st: 'jsconf rules' };
assert.deepEqual(t.nest.toObject(), { st: 'jsconf rules' });
assert.equal(t.nest.st, 'jsconf rules');
t.save(function(err) {
assert.ifError(err);
done();
});
});
it('pre-existing null object re-save', function(done) {
const schema = new Schema({
nest: {
st: String,
yep: String
}
});
const T = db.model('Test', schema);
const t = new T({ nest: null });
t.save(function(err) {
assert.ifError(err);
t.nest = { st: 'jsconf rules', yep: 'it does' };
// check that entire `nest` object is being $set
const u = t.$__delta()[1];
assert.ok(u.$set);
assert.ok(u.$set.nest);
assert.equal(Object.keys(u.$set.nest).length, 2);
assert.ok(u.$set.nest.yep);
assert.ok(u.$set.nest.st);
t.save(function(err) {
assert.ifError(err);
T.findById(t.id, function(err, t) {
assert.ifError(err);
assert.equal(t.nest.st, 'jsconf rules');
assert.equal(t.nest.yep, 'it does');
t.nest = null;
t.save(function(err) {
assert.ifError(err);
assert.strictEqual(t._doc.nest, null);
done();
});
});
});
});
});
it('array of Mixed on existing doc can be pushed to', function(done) {
const DooDad = db.model('Test', new Schema({
nested: {
arrays: []
}
}));
const doodad = new DooDad({ nested: { arrays: [] } });
const date = 1234567890;
doodad.nested.arrays.push(['+10', 'yup', date]);
doodad.save(function(err) {
assert.ifError(err);
DooDad.findById(doodad._id, function(err, doodad) {
assert.ifError(err);
assert.deepEqual(doodad.nested.arrays.toObject(), [['+10', 'yup', date]]);
doodad.nested.arrays.push(['another', 1]);
doodad.save(function(err) {
assert.ifError(err);
DooDad.findById(doodad._id, function(err, doodad) {
assert.ifError(err);
assert.deepEqual(doodad.nested.arrays.toObject(), [['+10', 'yup', date], ['another', 1]]);
done();
});
});
});
});
});
it('props can be set directly when property was named "type"', function(done) {
function def() {
return [{ x: 1 }, { x: 2 }, { x: 3 }];
}
const DooDad = db.model('Test', new Schema({
nested: {
type: { type: String, default: 'yep' },
array: {
type: Array, default: def
}
}
}));
const doodad = new DooDad();
doodad.save(function(err) {
assert.ifError(err);
DooDad.findById(doodad._id, function(err, doodad) {
assert.ifError(err);
assert.equal(doodad.nested.type, 'yep');
assert.deepEqual(doodad.nested.array.toObject(), [{ x: 1 }, { x: 2 }, { x: 3 }]);
doodad.nested.type = 'nope';
doodad.nested.array = ['some', 'new', 'stuff'];
doodad.save(function(err) {
assert.ifError(err);
DooDad.findById(doodad._id, function(err, doodad) {
assert.ifError(err);
assert.equal(doodad.nested.type, 'nope');
assert.deepEqual(doodad.nested.array.toObject(), ['some', 'new', 'stuff']);
done();
});
});
});
});
});
});
});
describe('setters', function() {
it('are used on embedded docs (gh-365 gh-390 gh-422)', function(done) {
function setLat(val) {
return parseInt(val, 10);
}
let tick = 0;
function uptick() {
return ++tick;
}
let Location = new Schema({
lat: { type: Number, default: 0, set: setLat },
long: { type: Number, set: uptick }
});
let Deal = new Schema({
title: String,
locations: [Location]
});
Location = db.model('Location', Location);
Deal = db.model('Test', Deal);
const location = new Location({ lat: 1.2, long: 10 });
assert.equal(location.lat.valueOf(), 1);
assert.equal(location.long.valueOf(), 1);
const deal = new Deal({ title: 'My deal', locations: [{ lat: 1.2, long: 33 }] });
assert.equal(deal.locations[0].lat.valueOf(), 1);
assert.equal(deal.locations[0].long.valueOf(), 2);
deal.save(function(err) {
assert.ifError(err);
Deal.findById(deal._id, function(err, deal) {
assert.ifError(err);
assert.equal(deal.locations[0].lat.valueOf(), 1);
// GH-422
assert.equal(deal.locations[0].long.valueOf(), 2);
done();
});
});
});
});
it('changing a number non-atomically (gh-203)', function(done) {
const post = new BlogPost();
post.meta.visitors = 5;
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, doc) {
assert.ifError(err);
doc.meta.visitors -= 2;
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, doc) {
assert.ifError(err);
assert.equal(+doc.meta.visitors, 3);
done();
});
});
});
});
});
describe('atomic subdocument', function() {
it('saving', function(done) {
let totalDocs = 4;
const saveQueue = [];
const post = new BlogPost;
function complete() {
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.get('comments').length, 5);
let v = doc.get('comments').some(function(comment) {
return comment.get('title') === '1';
});
assert.ok(v);
v = doc.get('comments').some(function(comment) {
return comment.get('title') === '2';
});
assert.ok(v);
v = doc.get('comments').some(function(comment) {
return comment.get('title') === '3';
});
assert.ok(v);
v = doc.get('comments').some(function(comment) {
return comment.get('title') === '4';
});
assert.ok(v);
v = doc.get('comments').some(function(comment) {
return comment.get('title') === '5';
});
assert.ok(v);
done();
});
}
function save(doc) {
saveQueue.push(doc);
if (saveQueue.length === 4) {
saveQueue.forEach(function(doc) {
doc.save(function(err) {
assert.ifError(err);
--totalDocs || complete();
});
});
}
}
post.save(function(err) {
assert.ifError(err);
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('comments').push({ title: '1' });
save(doc);
});
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('comments').push({ title: '2' });
save(doc);
});
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('comments').push({ title: '3' });
save(doc);
});
BlogPost.findOne({ _id: post.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('comments').push({ title: '4' }, { title: '5' });
save(doc);
});
});
});
it('setting (gh-310)', function(done) {
BlogPost.create({
comments: [{ title: 'first-title', body: 'first-body' }]
}, function(err, blog) {
assert.ifError(err);
BlogPost.findById(blog.id, function(err, agent1blog) {
assert.ifError(err);
BlogPost.findById(blog.id, function(err, agent2blog) {
assert.ifError(err);
agent1blog.get('comments')[0].title = 'second-title';
agent1blog.save(function(err) {
assert.ifError(err);
agent2blog.get('comments')[0].body = 'second-body';
agent2blog.save(function(err) {
assert.ifError(err);
BlogPost.findById(blog.id, function(err, foundBlog) {
assert.ifError(err);
const comment = foundBlog.get('comments')[0];
assert.equal(comment.title, 'second-title');
assert.equal(comment.body, 'second-body');
done();
});
});
});
});
});
});
});
});
it('doubly nested array saving and loading', function(done) {
const Inner = new Schema({
arr: [Number]
});
let Outer = new Schema({
inner: [Inner]
});
Outer = db.model('Test', Outer);
const outer = new Outer();
outer.inner.push({});
outer.save(function(err) {
assert.ifError(err);
assert.ok(outer.get('_id') instanceof DocumentObjectId);
Outer.findById(outer.get('_id'), function(err, found) {
assert.ifError(err);
assert.equal(found.inner.length, 1);
found.inner[0].arr.push(5);
found.save(function(err) {
assert.ifError(err);
assert.ok(found.get('_id') instanceof DocumentObjectId);
Outer.findById(found.get('_id'), function(err, found2) {
assert.ifError(err);
assert.equal(found2.inner.length, 1);
assert.equal(found2.inner[0].arr.length, 1);
assert.equal(found2.inner[0].arr[0], 5);
done();
});
});
});
});
});
it('multiple number push() calls', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
Temp.create({}, function(err, t) {
assert.ifError(err);
t.nested.nums.push(1);
t.nested.nums.push(2);
assert.equal(t.nested.nums.length, 2);
t.save(function(err) {
assert.ifError(err);
assert.equal(t.nested.nums.length, 2);
Temp.findById(t._id, function(err) {
assert.ifError(err);
assert.equal(t.nested.nums.length, 2);
done();
});
});
});
});
it('multiple push() calls', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
Temp.create({}, function(err, t) {
assert.ifError(err);
t.nested.nums.push(1);
t.nested.nums.push(2, 3);
assert.equal(t.nested.nums.length, 3);
t.save(function(err) {
assert.ifError(err);
assert.equal(t.nested.nums.length, 3);
Temp.findById(t._id, function(err, found) {
assert.ifError(err);
assert.equal(found.nested.nums.length, 3);
done();
});
});
});
});
it('activePaths should be updated for nested modifieds', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
Temp.create({ nested: { nums: [1, 2, 3, 4, 5] } }, function(err, t) {
assert.ifError(err);
t.nested.nums.pull(1);
t.nested.nums.pull(2);
assert.equal(t.$__.activePaths.paths['nested.nums'], 'modify');
done();
});
});
it('activePaths should be updated for nested modifieds as promise', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
const p1 = Temp.create({ nested: { nums: [1, 2, 3, 4, 5] } });
p1.then(function(t) {
t.nested.nums.pull(1);
t.nested.nums.pull(2);
assert.equal(t.$__.activePaths.paths['nested.nums'], 'modify');
done();
}).catch(done);
});
it('$pull should affect what you see in an array before a save', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
Temp.create({ nested: { nums: [1, 2, 3, 4, 5] } }, function(err, t) {
assert.ifError(err);
t.nested.nums.pull(1);
assert.equal(t.nested.nums.length, 4);
done();
});
});
it('$shift', function(done) {
const schema = new Schema({
nested: {
nums: [Number]
}
});
const Temp = db.model('Test', schema);
Temp.create({ nested: { nums: [1, 2, 3] } }, function(err, t) {
assert.ifError(err);
Temp.findById(t._id, function(err, found) {
assert.ifError(err);
assert.equal(found.nested.nums.length, 3);
found.nested.nums.$pop();
assert.equal(found.nested.nums.length, 2);
assert.equal(found.nested.nums[0], 1);
assert.equal(found.nested.nums[1], 2);
found.save(function(err) {
assert.ifError(err);
Temp.findById(t._id, function(err, found) {
assert.ifError(err);
assert.equal(found.nested.nums.length, 2);
assert.equal(found.nested.nums[0], 1, 1);
assert.equal(found.nested.nums[1], 2, 2);
found.nested.nums.$shift();
assert.equal(found.nested.nums.length, 1);
assert.equal(found.nested.nums[0], 2);
found.save(function(err) {
assert.ifError(err);
Temp.findById(t._id, function(err, found) {
assert.ifError(err);
assert.equal(found.nested.nums.length, 1);
assert.equal(found.nested.nums[0], 2);
done();
});
});
});
});
});
});
});
describe('saving embedded arrays', function() {
it('of Numbers atomically', function(done) {
const TempSchema = new Schema({
nums: [Number]
});
let totalDocs = 2;
const saveQueue = [];
const Temp = db.model('Test', TempSchema);
const t = new Temp();
function complete() {
Temp.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.get('nums').length, 3);
let v = doc.get('nums').some(function(num) {
return num.valueOf() === 1;
});
assert.ok(v);
v = doc.get('nums').some(function(num) {
return num.valueOf() === 2;
});
assert.ok(v);
v = doc.get('nums').some(function(num) {
return num.valueOf() === 3;
});
assert.ok(v);
done();
});
}
function save(doc) {
saveQueue.push(doc);
if (saveQueue.length === totalDocs) {
saveQueue.forEach(function(doc) {
doc.save(function(err) {
assert.ifError(err);
--totalDocs || complete();
});
});
}
}
t.save(function(err) {
assert.ifError(err);
Temp.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('nums').push(1);
save(doc);
});
Temp.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('nums').push(2, 3);
save(doc);
});
});
});
it('of Strings atomically', function(done) {
const StrListSchema = new Schema({
strings: [String]
});
let totalDocs = 2;
const saveQueue = [];
const StrList = db.model('Test', StrListSchema);
const t = new StrList();
function complete() {
StrList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.get('strings').length, 3);
let v = doc.get('strings').some(function(str) {
return str === 'a';
});
assert.ok(v);
v = doc.get('strings').some(function(str) {
return str === 'b';
});
assert.ok(v);
v = doc.get('strings').some(function(str) {
return str === 'c';
});
assert.ok(v);
done();
});
}
function save(doc) {
saveQueue.push(doc);
if (saveQueue.length === totalDocs) {
saveQueue.forEach(function(doc) {
doc.save(function(err) {
assert.ifError(err);
--totalDocs || complete();
});
});
}
}
t.save(function(err) {
assert.ifError(err);
StrList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('strings').push('a');
save(doc);
});
StrList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('strings').push('b', 'c');
save(doc);
});
});
});
it('of Buffers atomically', function(done) {
const BufListSchema = new Schema({
buffers: [Buffer]
});
let totalDocs = 2;
const saveQueue = [];
const BufList = db.model('Test', BufListSchema);
const t = new BufList();
function complete() {
BufList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.get('buffers').length, 3);
let v = doc.get('buffers').some(function(buf) {
return buf[0] === 140;
});
assert.ok(v);
v = doc.get('buffers').some(function(buf) {
return buf[0] === 141;
});
assert.ok(v);
v = doc.get('buffers').some(function(buf) {
return buf[0] === 142;
});
assert.ok(v);
done();
});
}
function save(doc) {
saveQueue.push(doc);
if (saveQueue.length === totalDocs) {
saveQueue.forEach(function(doc) {
doc.save(function(err) {
assert.ifError(err);
--totalDocs || complete();
});
});
}
}
t.save(function(err) {
assert.ifError(err);
BufList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('buffers').push(Buffer.from([140]));
save(doc);
});
BufList.findOne({ _id: t.get('_id') }, function(err, doc) {
assert.ifError(err);
doc.get('buffers').push(Buffer.from([141]), Buffer.from([142]));
save(doc);
});
});
});
it('works with modified element properties + doc removal (gh-975)', function(done) {
const B = BlogPost;
const b = new B({ comments: [{ title: 'gh-975' }] });
b.save(function(err) {
assert.ifError(err);
b.comments[0].title = 'changed';
b.save(function(err) {
assert.ifError(err);
b.comments[0].remove();
b.save(function(err) {
assert.ifError(err);
B.findByIdAndUpdate({ _id: b._id }, { $set: { comments: [{ title: 'a' }] } }, { new: true }, function(err, doc) {
assert.ifError(err);
doc.comments[0].title = 'differ';
doc.comments[0].remove();
doc.save(function(err) {
assert.ifError(err);
B.findById(doc._id, function(err, doc) {
assert.ifError(err);
assert.equal(doc.comments.length, 0);
done();
});
});
});
});
});
});
});
it('updating an embedded document in an embedded array with set call', function(done) {
BlogPost.create({
comments: [{
title: 'before-change'
}]
}, function(err, post) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, found) {
assert.ifError(err);
assert.equal(found.comments[0].title, 'before-change');
const subDoc = [{
_id: found.comments[0]._id,
title: 'after-change'
}];
found.set('comments', subDoc);
found.save(function(err) {
assert.ifError(err);
BlogPost.findById(found._id, function(err, updated) {
assert.ifError(err);
assert.equal(updated.comments[0].title, 'after-change');
done();
});
});
});
});
});
});
it('updating an embedded document in an embedded array (gh-255)', function(done) {
BlogPost.create({ comments: [{ title: 'woot' }] }, function(err, post) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, found) {
assert.ifError(err);
assert.equal(found.comments[0].title, 'woot');
found.comments[0].title = 'notwoot';
found.save(function(err) {
assert.ifError(err);
BlogPost.findById(found._id, function(err, updated) {
assert.ifError(err);
assert.equal(updated.comments[0].title, 'notwoot');
done();
});
});
});
});
});
it('updating an embedded array document to an Object value (gh-334)', function(done) {
const SubSchema = new Schema({
name: String,
subObj: { subName: String }
});
const GH334Schema = new Schema({ name: String, arrData: [SubSchema] });
const AModel = db.model('Test', GH334Schema);
const instance = new AModel();
instance.set({ name: 'name-value', arrData: [{ name: 'arrName1', subObj: { subName: 'subName1' } }] });
instance.save(function(err) {
assert.ifError(err);
AModel.findById(instance.id, function(err, doc) {
assert.ifError(err);
doc.arrData[0].set('subObj', { subName: 'modified subName' });
doc.save(function(err) {
assert.ifError(err);
AModel.findById(instance.id, function(err, doc) {
assert.ifError(err);
assert.equal(doc.arrData[0].subObj.subName, 'modified subName');
done();
});
});
});
});
});
it('saving an embedded document twice should not push that doc onto the parent doc twice (gh-267)', function(done) {
const post = new BlogPost();
post.comments.push({ title: 'woot' });
post.save(function(err) {
assert.ifError(err);
assert.equal(post.comments.length, 1);
BlogPost.findById(post.id, function(err, found) {
assert.ifError(err);
assert.equal(found.comments.length, 1);
post.save(function(err) {
assert.ifError(err);
assert.equal(post.comments.length, 1);
BlogPost.findById(post.id, function(err, found) {
assert.ifError(err);
assert.equal(found.comments.length, 1);
done();
});
});
});
});
});
describe('embedded array filtering', function() {
it('by the id shortcut function', function(done) {
const post = new BlogPost();
post.comments.push({ title: 'woot' });
post.comments.push({ title: 'aaaa' });
const subdoc1 = post.comments[0];
const subdoc2 = post.comments[1];
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
// test with an objectid
assert.equal(doc.comments.id(subdoc1.get('_id')).title, 'woot');
// test with a string
const id = subdoc2._id.toString();
assert.equal(doc.comments.id(id).title, 'aaaa');
done();
});
});
});
it('by the id with cast error', function(done) {
const post = new BlogPost();
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
assert.strictEqual(doc.comments.id(null), null);
done();
});
});
});
it('by the id shortcut with no match', function(done) {
const post = new BlogPost();
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
assert.strictEqual(doc.comments.id(new DocumentObjectId), null);
done();
});
});
});
});
it('removing a subdocument atomically', function(done) {
const post = new BlogPost();
post.title = 'hahaha';
post.comments.push({ title: 'woot' });
post.comments.push({ title: 'aaaa' });
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
doc.comments[0].remove();
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
assert.equal(doc.comments.length, 1);
assert.equal(doc.comments[0].title, 'aaaa');
done();
});
});
});
});
});
it('single pull embedded doc', function(done) {
const post = new BlogPost();
post.title = 'hahaha';
post.comments.push({ title: 'woot' });
post.comments.push({ title: 'aaaa' });
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
doc.comments.pull(doc.comments[0]);
doc.comments.pull(doc.comments[0]);
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(post.get('_id'), function(err, doc) {
assert.ifError(err);
assert.equal(doc.comments.length, 0);
done();
});
});
});
});
});
it('saving mixed data', function(done) {
let count = 3;
// string
const post = new BlogPost();
post.mixed = 'woot';
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err) {
assert.ifError(err);
if (--count) {
return;
}
done();
});
});
// array
const post2 = new BlogPost();
post2.mixed = { name: 'mr bungle', arr: [] };
post2.save(function(err) {
assert.ifError(err);
BlogPost.findById(post2._id, function(err, doc) {
assert.ifError(err);
assert.equal(Array.isArray(doc.mixed.arr), true);
doc.mixed = [{ foo: 'bar' }];
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(doc._id, function(err, doc) {
assert.ifError(err);
assert.equal(Array.isArray(doc.mixed), true);
doc.mixed.push({ hello: 'world' });
doc.mixed.push(['foo', 'bar']);
doc.markModified('mixed');
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(post2._id, function(err, doc) {
assert.ifError(err);
assert.deepEqual(doc.mixed[0], { foo: 'bar' });
assert.deepEqual(doc.mixed[1], { hello: 'world' });
assert.deepEqual(doc.mixed[2], ['foo', 'bar']);
if (--count) {
return;
}
done();
});
});
});
// date
const post3 = new BlogPost();
post3.mixed = new Date;
post3.save(function(err) {
assert.ifError(err);
BlogPost.findById(post3._id, function(err, doc) {
assert.ifError(err);
assert.ok(doc.mixed instanceof Date);
if (--count) {
return;
}
done();
});
});
});
});
});
});
it('populating mixed data from the constructor (gh-200)', function(done) {
const post = new BlogPost({
mixed: {
type: 'test',
github: 'rules',
nested: {
number: 3
}
}
});
assert.equal(post.mixed.type, 'test');
assert.equal(post.mixed.github, 'rules');
assert.equal(post.mixed.nested.number, 3);
done();
});
it('"type" is allowed as a key', function(done) {
mongoose.model('TestTypeDefaults', new Schema({
type: { type: String, default: 'YES!' }
}));
const TestDefaults = db.model('Test', new Schema({
type: { type: String, default: 'YES!' }
}));
let post = new TestDefaults();
assert.equal(typeof post.get('type'), 'string');
assert.equal(post.get('type'), 'YES!');
// GH-402
db.deleteModel('Test');
const TestDefaults2 = db.model('Test', new Schema({
x: { y: { type: { type: String }, owner: String } }
}));
post = new TestDefaults2;
post.x.y.type = '#402';
post.x.y.owner = 'me';
post.save(function(err) {
assert.ifError(err);
done();
});
});
it('unaltered model does not clear the doc (gh-195)', function(done) {
const post = new BlogPost();
post.title = 'woot';
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, doc) {
assert.ifError(err);
// we deliberately make no alterations
doc.save(function(err) {
assert.ifError(err);
BlogPost.findById(doc._id, function(err, doc) {
assert.ifError(err);
assert.equal(doc.title, 'woot');
done();
});
});
});
});
});
describe('hooks', function() {
describe('pre', function() {
it('with undefined and null', function(done) {
const schema = new Schema({ name: String });
let called = 0;
schema.pre('save', function(next) {
called++;
next(undefined);
});
schema.pre('save', function(next) {
called++;
next(null);
});
const S = db.model('Test', schema);
const s = new S({ name: 'zupa' });
s.save(function(err) {
assert.ifError(err);
assert.equal(called, 2);
done();
});
});
it('with an async waterfall', function(done) {
const schema = new Schema({ name: String });
let called = 0;
schema.pre('save', true, function(next, done) {
called++;
process.nextTick(function() {
next();
done();
});
});
schema.pre('save', function(next) {
called++;
return next();
});
const S = db.model('Test', schema);
const s = new S({ name: 'zupa' });
const p = s.save();
p.then(function() {
assert.equal(called, 2);
done();
}).catch(done);
});
it('called on all sub levels', function(done) {
const grandSchema = new Schema({ name: String });
grandSchema.pre('save', function(next) {
this.name = 'grand';
next();
});
const childSchema = new Schema({ name: String, grand: [grandSchema] });
childSchema.pre('save', function(next) {
this.name = 'child';
next();
});
const schema = new Schema({ name: String, child: [childSchema] });
schema.pre('save', function(next) {
this.name = 'parent';
next();
});
const S = db.model('Test', schema);
const s = new S({ name: 'a', child: [{ name: 'b', grand: [{ name: 'c' }] }] });
s.save(function(err, doc) {
assert.ifError(err);
assert.equal(doc.name, 'parent');
assert.equal(doc.child[0].name, 'child');
assert.equal(doc.child[0].grand[0].name, 'grand');
done();
});
});
it('error on any sub level', function(done) {
const grandSchema = new Schema({ name: String });
grandSchema.pre('save', function(next) {
next(new Error('Error 101'));
});
const childSchema = new Schema({ name: String, grand: [grandSchema] });
childSchema.pre('save', function(next) {
this.name = 'child';
next();
});
const schema = new Schema({ name: String, child: [childSchema] });
schema.pre('save', function(next) {
this.name = 'parent';
next();
});
const S = db.model('Test', schema);
const s = new S({ name: 'a', child: [{ name: 'b', grand: [{ name: 'c' }] }] });
s.save(function(err) {
assert.ok(err instanceof Error);
assert.equal(err.message, 'Error 101');
done();
});
});
describe('init', function() {
it('has access to the true ObjectId when used with querying (gh-289)', function(done) {
const PreInitSchema = new Schema({});
let preId = null;
PreInitSchema.pre('init', function() {
preId = this._id;
});
const PreInit = db.model('Test', PreInitSchema);
const doc = new PreInit();
doc.save(function(err) {
assert.ifError(err);
PreInit.findById(doc._id, function(err) {
assert.ifError(err);
assert.strictEqual(undefined, preId);
done();
});
});
});
});
});
describe('post', function() {
it('works', function(done) {
const schema = new Schema({
title: String
});
let save = false;
let remove = false;
let init = false;
let post = undefined;
schema.post('save', function(arg) {
assert.equal(arg.id, post.id);
save = true;
});
schema.post('init', function() {
init = true;
});
schema.post('remove', function(arg) {
assert.equal(arg.id, post.id);
remove = true;
});
const BlogPost = db.model('Test', schema);
post = new BlogPost();
post.save(function(err) {
process.nextTick(function() {
assert.ifError(err);
assert.ok(save);
BlogPost.findById(post._id, function(err, doc) {
process.nextTick(function() {
assert.ifError(err);
assert.ok(init);
doc.remove(function(err) {
process.nextTick(function() {
assert.ifError(err);
assert.ok(remove);
done();
});
});
});
});
});
});
});
it('on embedded docs', function(done) {
let save = false;
const EmbeddedSchema = new Schema({
title: String
});
EmbeddedSchema.post('save', function() {
save = true;
});
const ParentSchema = new Schema({
embeds: [EmbeddedSchema]
});
const Parent = db.model('Parent', ParentSchema);
const parent = new Parent();
parent.embeds.push({ title: 'Testing post hooks for embedded docs' });
parent.save(function(err) {
assert.ifError(err);
assert.ok(save);
done();
});
});
});
});
describe('#exec()', function() {
it.skip('count()', function(done) {
BlogPost.create({ title: 'interoperable count as promise' }, function(err) {
assert.ifError(err);
const query = BlogPost.count({ title: 'interoperable count as promise' });
query.exec(function(err, count) {
assert.ifError(err);
assert.equal(count, 1);
done();
});
});
});
it('countDocuments()', function() {
return BlogPost.create({ title: 'foo' }).
then(() => BlogPost.countDocuments({ title: 'foo' }).exec()).
then(count => {
assert.equal(count, 1);
});
});
it('estimatedDocumentCount()', function() {
return BlogPost.create({ title: 'foo' }).
then(() => BlogPost.estimatedDocumentCount({ title: 'foo' }).exec()).
then(count => {
assert.equal(count, 1);
});
});
it('update()', function(done) {
BlogPost.create({ title: 'interoperable update as promise' }, function(err) {
assert.ifError(err);
const query = BlogPost.update({ title: 'interoperable update as promise' }, { title: 'interoperable update as promise delta' });
query.exec(function(err, res) {
assert.ifError(err);
assert.equal(res.n, 1);
assert.equal(res.nModified, 1);
BlogPost.count({ title: 'interoperable update as promise delta' }, function(err, count) {
assert.ifError(err);
assert.equal(count, 1);
done();
});
});
});
});
it('findOne()', function(done) {
BlogPost.create({ title: 'interoperable findOne as promise' }, function(err, created) {
assert.ifError(err);
const query = BlogPost.findOne({ title: 'interoperable findOne as promise' });
query.exec(function(err, found) {
assert.ifError(err);
assert.equal(found.id, created.id);
done();
});
});
});
it('find()', function(done) {
BlogPost.create(
{ title: 'interoperable find as promise' },
{ title: 'interoperable find as promise' },
function(err, createdOne, createdTwo) {
assert.ifError(err);
const query = BlogPost.find({ title: 'interoperable find as promise' }).sort('_id');
query.exec(function(err, found) {
assert.ifError(err);
assert.equal(found.length, 2);
const ids = {};
ids[String(found[0]._id)] = 1;
ids[String(found[1]._id)] = 1;
assert.ok(String(createdOne._id) in ids);
assert.ok(String(createdTwo._id) in ids);
done();
});
});
});
it.skip('remove()', function(done) {
BlogPost.create(
{ title: 'interoperable remove as promise' },
function(err) {
assert.ifError(err);
const query = BlogPost.remove({ title: 'interoperable remove as promise' });
query.exec(function(err) {
assert.ifError(err);
BlogPost.count({ title: 'interoperable remove as promise' }, function(err, count) {
assert.equal(count, 0);
done();
});
});
});
});
it('op can be changed', function(done) {
const title = 'interop ad-hoc as promise';
BlogPost.create({ title: title }, function(err, created) {
assert.ifError(err);
const query = BlogPost.count({ title: title });
query.exec('findOne', function(err, found) {
assert.ifError(err);
assert.equal(found.id, created.id);
done();
});
});
});
describe('promises', function() {
it.skip('count()', function(done) {
BlogPost.create({ title: 'interoperable count as promise 2' }, function(err) {
assert.ifError(err);
const query = BlogPost.count({ title: 'interoperable count as promise 2' });
const promise = query.exec();
promise.then(function(count) {
assert.equal(count, 1);
done();
}).catch(done);
});
});
it.skip('update()', function(done) {
BlogPost.create({ title: 'interoperable update as promise 2' }, function(err) {
assert.ifError(err);
const query = BlogPost.update({ title: 'interoperable update as promise 2' }, { title: 'interoperable update as promise delta 2' });
const promise = query.exec();
promise.then(function() {
BlogPost.count({ title: 'interoperable update as promise delta 2' }, function(err, count) {
assert.ifError(err);
assert.equal(count, 1);
done();
});
});
});
});
it('findOne()', function() {
let created;
return BlogPost.create({ title: 'interoperable findOne as promise 2' }).
then(doc => {
created = doc;
return BlogPost.
findOne({ title: 'interoperable findOne as promise 2' }).
exec();
}).
then(found => {
assert.equal(found.id, created.id);
});
});
it('find()', function(done) {
BlogPost.create(
{ title: 'interoperable find as promise 2' },
{ title: 'interoperable find as promise 2' },
function(err, createdOne, createdTwo) {
assert.ifError(err);
const query = BlogPost.find({ title: 'interoperable find as promise 2' }).sort('_id');
const promise = query.exec();
promise.then(function(found) {
assert.ifError(err);
assert.equal(found.length, 2);
assert.equal(found[0].id, createdOne.id);
assert.equal(found[1].id, createdTwo.id);
done();
}).catch(done);
});
});
it.skip('remove()', function() {
return BlogPost.create({ title: 'interoperable remove as promise 2' }).
then(() => {
return BlogPost.remove({ title: 'interoperable remove as promise 2' });
}).
then(() => {
return BlogPost.count({ title: 'interoperable remove as promise 2' });
}).
then(count => {
assert.equal(count, 0);
});
});
it('are thenable', function(done) {
const peopleSchema = new Schema({ name: String, likes: ['ObjectId'] });
const P = db.model('Test', peopleSchema);
BlogPost.create(
{ title: 'then promise 1' },
{ title: 'then promise 2' },
{ title: 'then promise 3' },
function(err, d1, d2, d3) {
assert.ifError(err);
P.create(
{ name: 'brandon', likes: [d1] },
{ name: 'ben', likes: [d2] },
{ name: 'bernie', likes: [d3] },
function(err) {
assert.ifError(err);
const promise = BlogPost.find({ title: /^then promise/ }).select('_id').exec();
promise.then(function(blogs) {
const ids = blogs.map(function(m) {
return m._id;
});
return P.where('likes').in(ids).exec();
}).then(function(people) {
assert.equal(people.length, 3);
return people;
}).then(function() {
done();
}, function(err) {
done(new Error(err));
});
});
});
});
});
});
describe('console.log', function() {
it('hides private props', function(done) {
const date = new Date(1305730951086);
const id0 = new DocumentObjectId('4dd3e169dbfb13b4570000b9');
const id1 = new DocumentObjectId('4dd3e169dbfb13b4570000b6');
const id2 = new DocumentObjectId('4dd3e169dbfb13b4570000b7');
const id3 = new DocumentObjectId('4dd3e169dbfb13b4570000b8');
const post = new BlogPost({
title: 'Test',
_id: id0,
date: date,
numbers: [5, 6, 7],
owners: [id1],
meta: { visitors: 45 },
comments: [
{ _id: id2, title: 'my comment', date: date, body: 'this is a comment' },
{ _id: id3, title: 'the next thang', date: date, body: 'this is a comment too!' }]
});
const out = post.inspect();
assert.equal(out.meta.visitors, post.meta.visitors);
assert.deepEqual(out.numbers, Array.prototype.slice.call(post.numbers));
assert.equal(out.date.valueOf(), post.date.valueOf());
assert.equal(out.activePaths, undefined);
done();
});
});
describe('pathnames', function() {
it('named path can be used', function(done) {
const P = db.model('Test', new Schema({ path: String }));
let threw = false;
try {
new P({ path: 'i should not throw' });
} catch (err) {
threw = true;
}
assert.ok(!threw);
done();
});
});
it('subdocuments with changed values should persist the values', function(done) {
const Subdoc = new Schema({ name: String, mixed: Schema.Types.Mixed });
const T = db.model('Test', new Schema({ subs: [Subdoc] }));
const t = new T({ subs: [{ name: 'Hubot', mixed: { w: 1, x: 2 } }] });
assert.equal(t.subs[0].name, 'Hubot');
assert.equal(t.subs[0].mixed.w, 1);
assert.equal(t.subs[0].mixed.x, 2);
t.save(function(err) {
assert.ifError(err);
T.findById(t._id, function(err, t) {
assert.ifError(err);
assert.equal(t.subs[0].name, 'Hubot');
assert.equal(t.subs[0].mixed.w, 1);
assert.equal(t.subs[0].mixed.x, 2);
const sub = t.subs[0];
sub.name = 'Hubot1';
assert.equal(sub.name, 'Hubot1');
assert.ok(sub.isModified('name'));
assert.ok(t.isModified());
t.save(function(err) {
assert.ifError(err);
T.findById(t._id, function(err, t) {
assert.ifError(err);
assert.strictEqual(t.subs[0].name, 'Hubot1');
const sub = t.subs[0];
sub.mixed.w = 5;
assert.equal(sub.mixed.w, 5);
assert.ok(!sub.isModified('mixed'));
sub.markModified('mixed');
assert.ok(sub.isModified('mixed'));
assert.ok(sub.isModified());
assert.ok(t.isModified());
t.save(function(err) {
assert.ifError(err);
T.findById(t._id, function(err, t) {
assert.ifError(err);
assert.strictEqual(t.subs[0].mixed.w, 5);
done();
});
});
});
});
});
});
});
describe('RegExps', function() {
it('can be saved', function(done) {
const post = new BlogPost({ mixed: { rgx: /^asdf$/ } });
assert.ok(post.mixed.rgx instanceof RegExp);
assert.equal(post.mixed.rgx.source, '^asdf$');
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, post) {
assert.ifError(err);
assert.ok(post.mixed.rgx instanceof RegExp);
assert.equal(post.mixed.rgx.source, '^asdf$');
done();
});
});
});
});
// Demonstration showing why GH-261 is a misunderstanding
it('a single instantiated document should be able to update its embedded documents more than once', function(done) {
const post = new BlogPost();
post.comments.push({ title: 'one' });
post.save(function(err) {
assert.ifError(err);
assert.equal(post.comments[0].title, 'one');
post.comments[0].title = 'two';
assert.equal(post.comments[0].title, 'two');
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post._id, function(err, found) {
assert.ifError(err);
assert.equal(found.comments[0].title, 'two');
done();
});
});
});
});
describe('save()', function() {
describe('when no callback is passed', function() {
it('should emit error on its Model when there are listeners', function(done) {
const DefaultErrSchema = new Schema({});
DefaultErrSchema.pre('save', function(next) {
next(new Error);
});
const DefaultErr = db.model('Test', DefaultErrSchema);
DefaultErr.on('error', function(err) {
assert.ok(err instanceof Error);
done();
});
new DefaultErr().save().catch(() => {});
});
});
it('saved changes made within callback of a previous no-op save gh-1139', function(done) {
const post = new BlogPost({ title: 'first' });
post.save(function(err) {
assert.ifError(err);
// no op
post.save(function(err) {
assert.ifError(err);
post.title = 'changed';
post.save(function(err) {
assert.ifError(err);
BlogPost.findById(post, function(err, doc) {
assert.ifError(err);
assert.equal(doc.title, 'changed');
done();
});
});
});
});
});
it('rejects new documents that have no _id set (1595)', function(done) {
const s = new Schema({ _id: { type: String } });
const B = db.model('Test', s);
const b = new B;
b.save(function(err) {
assert.ok(err);
assert.ok(/must have an _id/.test(err));
done();
});
});
it('no TypeError when attempting to save more than once after using atomics', function(done) {
const M = db.model('Test', new Schema({
test: { type: 'string', unique: true },
elements: [{
el: { type: 'string', required: true }
}]
}));
const a = new M({
test: 'a',
elements: [{ el: 'b' }]
});
const b = new M({
test: 'b',
elements: [{ el: 'c' }]
});
M.init(function() {
a.save(function() {
b.save(function() {
b.elements.push({ el: 'd' });
b.test = 'a';
b.save(function(error, res) {
assert.ok(error);
assert.strictEqual(res, undefined);
b.save(function(error, res) {
assert.ok(error);
assert.strictEqual(res, undefined);
M.collection.drop(done);
});
});
});
});
});
});
it('should clear $versionError and saveOptions after saved (gh-8040)', function(done) {
const schema = new Schema({ name: String });
const Model = db.model('Test', schema);
const doc = new Model({
name: 'Fonger'
});
const savePromise = doc.save();
assert.ok(doc.$__.$versionError);
assert.ok(doc.$__.saveOptions);
savePromise.then(function() {
assert.ok(!doc.$__.$versionError);
assert.ok(!doc.$__.saveOptions);
done();
}).catch(done);
});
});
describe('_delta()', function() {
it('should overwrite arrays when directly set (gh-1126)', function(done) {
BlogPost.create({ title: 'gh-1126', numbers: [1, 2] }, function(err, b) {
assert.ifError(err);
BlogPost.findById(b._id, function(err, b) {
assert.ifError(err);
assert.deepEqual([1, 2].join(), b.numbers.join());
b.numbers = [];
b.numbers.push(3);
const d = b.$__delta()[1];
assert.ok('$set' in d, 'invalid delta ' + JSON.stringify(d));
assert.ok(Array.isArray(d.$set.numbers));
assert.equal(d.$set.numbers.length, 1);
assert.equal(d.$set.numbers[0], 3);
b.save(function(err) {
assert.ifError(err);
BlogPost.findById(b._id, function(err, b) {
assert.ifError(err);
assert.ok(Array.isArray(b.numbers));
assert.equal(b.numbers.length, 1);
assert.equal(b.numbers[0], 3);
b.numbers = [3];
const d = b.$__delta();
assert.ok(!d);
b.numbers = [4];
b.numbers.push(5);
b.save(function(err) {
assert.ifError(err);
BlogPost.findById(b._id, function(err, b) {
assert.ifError(err);
assert.ok(Array.isArray(b.numbers));
assert.equal(b.numbers.length, 2);
assert.equal(b.numbers[0], 4);
assert.equal(b.numbers[1], 5);
done();
});
});
});
});
});
});
});
it('should use $set when subdoc changed before pulling (gh-1303)', function(done) {
const B = BlogPost;
B.create(
{ title: 'gh-1303', comments: [{ body: 'a' }, { body: 'b' }, { body: 'c' }] },
function(err, b) {
assert.ifError(err);
B.findById(b._id, function(err, b) {
assert.ifError(err);
b.comments[2].body = 'changed';
b.comments.pull(b.comments[1]);
assert.equal(b.comments.length, 2);
assert.equal(b.comments[0].body, 'a');
assert.equal(b.comments[1].body, 'changed');
const d = b.$__delta()[1];
assert.ok('$set' in d, 'invalid delta ' + JSON.stringify(d));
assert.ok(Array.isArray(d.$set.comments));
assert.equal(d.$set.comments.length, 2);
b.save(function(err) {
assert.ifError(err);
B.findById(b._id, function(err, b) {
assert.ifError(err);
assert.ok(Array.isArray(b.comments));
assert.equal(b.comments.length, 2);
assert.equal(b.comments[0].body, 'a');
assert.equal(b.comments[1].body, 'changed');
done();
});
});
});
});
});
});
describe('backward compatibility', function() {
it('with conflicted data in db', function(done) {
const M = db.model('Test', new Schema({ namey: { first: String, last: String } }));
const m = new M({ namey: '[object Object]' });
m.namey = { first: 'GI', last: 'Joe' };// <-- should overwrite the string
m.save(function(err) {
assert.strictEqual(err, null);
assert.strictEqual('GI', m.namey.first);
assert.strictEqual('Joe', m.namey.last);
done();
});
});
it('with positional notation on path not existing in schema (gh-1048)', function(done) {
const db = start();
const M = db.model('Test', Schema({ name: 'string' }));
db.on('open', function() {
const o = {
name: 'gh-1048',
_id: new mongoose.Types.ObjectId,
databases: {
0: { keys: 100, expires: 0 },
15: { keys: 1, expires: 0 }
}
};
M.collection.insertOne(o, { safe: true }, function(err) {
assert.ifError(err);
M.findById(o._id, function(err, doc) {
db.close();
assert.ifError(err);
assert.ok(doc);
assert.ok(doc._doc.databases);
assert.ok(doc._doc.databases['0']);
assert.ok(doc._doc.databases['15']);
assert.equal(doc.databases, undefined);
done();
});
});
});
});
});
describe('non-schema adhoc property assignments', function() {
it('are not saved', function(done) {
const B = BlogPost;
const b = new B;
b.whateveriwant = 10;
b.save(function(err) {
assert.ifError(err);
B.collection.findOne({ _id: b._id }, function(err, doc) {
assert.ifError(err);
assert.ok(!('whateveriwant' in doc));
done();
});
});
});
});
it('should not throw range error when using Number _id and saving existing doc (gh-691)', function(done) {
const T = new Schema({ _id: Number, a: String });
const D = db.model('Test', T);
const d = new D({ _id: 1 });
d.save(function(err) {
assert.ifError(err);
D.findById(d._id, function(err, d) {
assert.ifError(err);
d.a = 'yo';
d.save(function(err) {
assert.ifError(err);
done();
});
});
});
});
describe('setting an unset value', function() {
it('is saved (gh-742)', function(done) {
const DefaultTestObject = db.model('Test',
new Schema({
score: { type: Number, default: 55 }
})
);
const myTest = new DefaultTestObject();
myTest.save(function(err, doc) {
assert.ifError(err);
assert.equal(doc.score, 55);
DefaultTestObject.findById(doc._id, function(err, doc) {
assert.ifError(err);
doc.score = undefined; // unset
doc.save(function(err) {
assert.ifError(err);
DefaultTestObject.findById(doc._id, function(err, doc) {
assert.ifError(err);
doc.score = 55;
doc.save(function(err, doc) {
assert.ifError(err);
assert.equal(doc.score, 55);
done();
});
});
});
});
});
});
});
it('path is cast to correct value when retreived from db', function(done) {
const schema = new Schema({ title: { type: 'string', index: true } });
const T = db.model('Test', schema);
T.collection.insertOne({ title: 234 }, { safe: true }, function(err) {
assert.ifError(err);
T.findOne(function(err, doc) {
assert.ifError(err);
assert.equal(doc.title, '234');
done();
});
});
});
it('setting a path to undefined should retain the value as undefined', function(done) {
const B = BlogPost;
const doc = new B;
doc.title = 'css3';
assert.equal(doc.$__delta()[1].$set.title, 'css3');
doc.title = undefined;
assert.equal(doc.$__delta()[1].$unset.title, 1);
assert.strictEqual(undefined, doc.$__delta()[1].$set.title);
doc.title = 'css3';
doc.author = 'aaron';
doc.numbers = [3, 4, 5];
doc.meta.date = new Date;
doc.meta.visitors = 89;
doc.comments = [{ title: 'thanksgiving', body: 'yuuuumm' }];
doc.comments.push({ title: 'turkey', body: 'cranberries' });
doc.save(function(err) {
assert.ifError(err);
B.findById(doc._id, function(err, b) {
assert.ifError(err);
assert.equal(b.title, 'css3');
assert.equal(b.author, 'aaron');
assert.equal(b.meta.date.toString(), doc.meta.date.toString());
assert.equal(b.meta.visitors.valueOf(), doc.meta.visitors.valueOf());
assert.equal(b.comments.length, 2);
assert.equal(b.comments[0].title, 'thanksgiving');
assert.equal(b.comments[0].body, 'yuuuumm');
assert.equal(b.comments[1].title, 'turkey');
assert.equal(b.comments[1].body, 'cranberries');
b.title = undefined;
b.author = null;
b.meta.date = undefined;
b.meta.visitors = null;
b.comments[0].title = null;
b.comments[0].body = undefined;
b.save(function(err) {
assert.ifError(err);
B.findById(b._id, function(err, b) {
assert.ifError(err);
assert.strictEqual(undefined, b.title);
assert.strictEqual(null, b.author);
assert.strictEqual(undefined, b.meta.date);
assert.strictEqual(null, b.meta.visitors);
assert.strictEqual(null, b.comments[0].title);
assert.strictEqual(undefined, b.comments[0].body);
assert.equal(b.comments[1].title, 'turkey');
assert.equal(b.comments[1].body, 'cranberries');
b.meta = undefined;
b.comments = undefined;
b.save(function(err) {
assert.ifError(err);
B.collection.findOne({ _id: b._id }, function(err, b) {
assert.ifError(err);
assert.strictEqual(undefined, b.meta);
assert.strictEqual(undefined, b.comments);
done();
});
});
});
});
});
});
});
describe('unsetting a default value', function() {
it('should be ignored (gh-758)', function(done) {
const M = db.model('Test', new Schema({ s: String, n: Number, a: Array }));
M.collection.insertOne({}, { safe: true }, function(err) {
assert.ifError(err);
M.findOne(function(err, m) {
assert.ifError(err);
m.s = m.n = m.a = undefined;
assert.equal(m.$__delta(), undefined);
done();
});
});
});
});
it('allow for object passing to ref paths (gh-1606)', function(done) {
const schA = new Schema({ title: String });
const schma = new Schema({
thing: { type: Schema.Types.ObjectId, ref: 'Test' },
subdoc: {
some: String,
thing: [{ type: Schema.Types.ObjectId, ref: 'Test' }]
}
});
const M1 = db.model('Test', schA);
const M2 = db.model('Test1', schma);
const a = new M1({ title: 'hihihih' }).toObject();
const thing = new M2({
thing: a,
subdoc: {
title: 'blah',
thing: [a]
}
});
assert.equal(thing.thing, a._id);
assert.equal(thing.subdoc.thing[0], a._id);
done();
});
it('setters trigger on null values (gh-1445)', function(done) {
const calls = [];
const OrderSchema = new Schema({
total: {
type: Number,
default: 0,
set: function(value) {
calls.push(value);
return 10;
}
}
});
const Order = db.model('Test', OrderSchema);
const o = new Order({ total: null });
assert.deepEqual(calls, [0, null]);
assert.equal(o.total, 10);
done();
});
describe('Skip setting default value for Geospatial-indexed fields (gh-1668)', function() {
beforeEach(() => db.deleteModel(/Person/));
this.timeout(5000);
it('2dsphere indexed field with value is saved', function() {
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
index: '2dsphere'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const loc = [0.3, 51.4];
const p = new Person({
name: 'Jimmy Page',
loc: loc
});
return co(function*() {
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.loc[0], loc[0]);
assert.equal(personDoc.loc[1], loc[1]);
});
});
it('2dsphere indexed field without value is saved (gh-1668)', function() {
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
index: '2dsphere'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
return co(function*() {
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.name, 'Jimmy Page');
assert.equal(personDoc.loc, undefined);
});
});
it('2dsphere indexed field in subdoc without value is saved', function() {
const PersonSchema = new Schema({
name: { type: String, required: true },
nested: {
tag: String,
loc: {
type: [Number]
}
}
}, { autoIndex: false });
PersonSchema.index({ 'nested.loc': '2dsphere' });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
p.nested.tag = 'guitarist';
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.name, 'Jimmy Page');
assert.equal(personDoc.nested.tag, 'guitarist');
assert.equal(personDoc.nested.loc, undefined);
});
});
it('2dsphere indexed field with geojson without value is saved (gh-3233)', function() {
const LocationSchema = new Schema({
name: { type: String, required: true },
location: {
type: { type: String, enum: ['Point'] },
coordinates: [Number]
}
}, { autoIndex: false });
LocationSchema.index({ location: '2dsphere' });
const Location = db.model('Test', LocationSchema);
return co(function*() {
yield Location.collection.drop();
yield Location.createIndexes();
yield Location.create({
name: 'Undefined location'
});
});
});
it('Doc with 2dsphere indexed field without initial value can be updated', function() {
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
index: '2dsphere'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const updates = {
$set: {
loc: [0.3, 51.4]
}
};
const personDoc = yield Person.findByIdAndUpdate(p._id, updates, { new: true });
assert.equal(personDoc.loc[0], updates.$set.loc[0]);
assert.equal(personDoc.loc[1], updates.$set.loc[1]);
});
});
it('2dsphere indexed required field without value is rejected', function() {
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
required: true,
index: '2dsphere'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
let err;
yield p.save().catch(_err => { err = _err; });
assert.ok(err instanceof MongooseError);
assert.ok(err instanceof ValidationError);
});
});
it('2dsphere field without value but with schema default is saved', function() {
const loc = [0, 1];
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
default: loc,
index: '2dsphere'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(loc[0], personDoc.loc[0]);
assert.equal(loc[1], personDoc.loc[1]);
});
});
it('2d indexed field without value is saved', function() {
const PersonSchema = new Schema({
name: String,
loc: {
type: [Number],
index: '2d'
}
}, { autoIndex: false });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page'
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.loc, undefined);
});
});
it.skip('Compound index with 2dsphere field without value is saved', function() {
const PersonSchema = new Schema({
name: String,
type: String,
slug: { type: String, index: { unique: true } },
loc: { type: [Number] },
tags: { type: [String], index: true }
}, { autoIndex: false });
PersonSchema.index({ name: 1, loc: '2dsphere' });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page',
type: 'musician',
slug: 'ledzep-1',
tags: ['guitarist']
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.name, 'Jimmy Page');
assert.equal(personDoc.loc, undefined);
yield Person.collection.drop();
});
});
it.skip('Compound index on field earlier declared with 2dsphere index is saved', function() {
const PersonSchema = new Schema({
name: String,
type: String,
slug: { type: String, index: { unique: true } },
loc: { type: [Number] },
tags: { type: [String], index: true }
}, { autoIndex: false });
PersonSchema.index({ loc: '2dsphere' });
PersonSchema.index({ name: 1, loc: -1 });
const Person = db.model('Person', PersonSchema);
const p = new Person({
name: 'Jimmy Page',
type: 'musician',
slug: 'ledzep-1',
tags: ['guitarist']
});
return co(function*() {
yield Person.collection.drop();
yield Person.createIndexes();
yield p.save();
const personDoc = yield Person.findById(p._id);
assert.equal(personDoc.name, 'Jimmy Page');
assert.equal(personDoc.loc, undefined);
yield Person.collection.drop();
});
});
});
it('save max bson size error with buffering (gh-3906)', function(done) {
this.timeout(10000);
const db = start({ noErrorListener: true });
const Test = db.model('Test', { name: Object });
const test = new Test({
name: {
data: (new Array(16 * 1024 * 1024)).join('x')
}
});
test.save(function(error) {
assert.ok(error);
assert.equal(error.name, 'MongoError');
db.close(done);
});
});
it('reports max bson size error in save (gh-3906)', function(done) {
this.timeout(10000);
const db = start({ noErrorListener: true });
const Test = db.model('Test', { name: Object });
const test = new Test({
name: {
data: (new Array(16 * 1024 * 1024)).join('x')
}
});
db.on('connected', function() {
test.save(function(error) {
assert.ok(error);
assert.equal(error.name, 'MongoError');
db.close(done);
});
});
});
describe('bug fixes', function() {
it('doesnt crash (gh-1920)', function(done) {
const parentSchema = new Schema({
children: [new Schema({
name: String
})]
});
const Parent = db.model('Parent', parentSchema);
const parent = new Parent();
parent.children.push({ name: 'child name' });
parent.save(function(err, it) {
assert.ifError(err);
parent.children.push({ name: 'another child' });
Parent.findByIdAndUpdate(it._id, { $set: { children: parent.children } }, function(err) {
assert.ifError(err);
done();
});
});
});
it('doesnt reset "modified" status for fields', function(done) {
const UniqueSchema = new Schema({
changer: String,
unique: {
type: Number,
unique: true
}
});
const Unique = db.model('Test', UniqueSchema);
const u1 = new Unique({
changer: 'a',
unique: 5
});
const u2 = new Unique({
changer: 'a',
unique: 6
});
Unique.on('index', function() {
u1.save(function(err) {
assert.ifError(err);
assert.ok(!u1.isModified('changer'));
u2.save(function(err) {
assert.ifError(err);
assert.ok(!u2.isModified('changer'));
u2.changer = 'b';
u2.unique = 5;
assert.ok(u2.isModified('changer'));
u2.save(function(err) {
assert.ok(err);
assert.ok(u2.isModified('changer'));
Unique.collection.drop(done);
});
});
});
});
});
it('insertMany() (gh-723)', function(done) {
const schema = new Schema({
name: String
}, { timestamps: true });
const Movie = db.model('Movie', schema);
const arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }];
Movie.insertMany(arr, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.ok(!docs[0].isNew);
assert.ok(!docs[1].isNew);
assert.ok(docs[0].createdAt);
assert.ok(docs[1].createdAt);
assert.strictEqual(docs[0].__v, 0);
assert.strictEqual(docs[1].__v, 0);
Movie.find({}, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.ok(docs[0].createdAt);
assert.ok(docs[1].createdAt);
done();
});
});
});
it('insertMany() ordered option for constraint errors (gh-3893)', function(done) {
start.mongodVersion(function(err, version) {
if (err) {
done(err);
return;
}
const mongo34 = version[0] > 3 || (version[0] === 3 && version[1] >= 4);
if (!mongo34) {
done();
return;
}
test();
});
function test() {
const schema = new Schema({
name: { type: String, unique: true }
});
const Movie = db.model('Movie', schema);
const arr = [
{ name: 'Star Wars' },
{ name: 'Star Wars' },
{ name: 'The Empire Strikes Back' }
];
Movie.on('index', function(error) {
assert.ifError(error);
Movie.insertMany(arr, { ordered: false }, function(error) {
assert.equal(error.message.indexOf('E11000'), 0);
Movie.find({}).sort({ name: 1 }).exec(function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.equal(docs[0].name, 'Star Wars');
assert.equal(docs[1].name, 'The Empire Strikes Back');
Movie.collection.drop(done);
});
});
});
}
});
describe('insertMany() lean option to bypass validation (gh-8234)', () => {
const gh8234Schema = new Schema({
name: String,
age: { type: Number, required: true }
});
const arrGh8234 = [{ name: 'Rigas' }, { name: 'Tonis', age: 9 }];
let Gh8234;
before('init model', () => {
Gh8234 = db.model('Test', gh8234Schema);
return Gh8234.deleteMany({});
});
afterEach('delete inserted data', function() {
return Gh8234.deleteMany({});
});
it('insertMany() should bypass validation if lean option set to `true`', (done) => {
Gh8234.insertMany(arrGh8234, { lean: true }, (error, docs) => {
assert.ifError(error);
assert.equal(docs.length, 2);
Gh8234.find({}, (error, docs) => {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.equal(arrGh8234[0].age, undefined);
assert.equal(arrGh8234[1].age, 9);
done();
});
});
});
it('insertMany() should validate if lean option not set', (done) => {
Gh8234.insertMany(arrGh8234, (error) => {
assert.ok(error);
assert.equal(error.name, 'ValidationError');
assert.equal(error.errors.age.kind, 'required');
done();
});
});
it('insertMany() should validate if lean option set to `false`', (done) => {
Gh8234.insertMany(arrGh8234, { lean: false }, (error) => {
assert.ok(error);
assert.equal(error.name, 'ValidationError');
assert.equal(error.errors.age.kind, 'required');
done();
});
});
});
it('insertMany() ordered option for validation errors (gh-5068)', function(done) {
start.mongodVersion(function(err, version) {
if (err) {
done(err);
return;
}
const mongo34 = version[0] > 3 || (version[0] === 3 && version[1] >= 4);
if (!mongo34) {
done();
return;
}
test();
});
function test() {
const schema = new Schema({
name: { type: String, required: true }
});
const Movie = db.model('Movie', schema);
const arr = [
{ name: 'Star Wars' },
{ foo: 'Star Wars' },
{ name: 'The Empire Strikes Back' }
];
Movie.insertMany(arr, { ordered: false }, function(error) {
assert.ifError(error);
Movie.find({}).sort({ name: 1 }).exec(function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.equal(docs[0].name, 'Star Wars');
assert.equal(docs[1].name, 'The Empire Strikes Back');
done();
});
});
}
});
it('insertMany() ordered option for single validation error', function(done) {
start.mongodVersion(function(err, version) {
if (err) {
done(err);
return;
}
const mongo34 = version[0] > 3 || (version[0] === 3 && version[1] >= 4);
if (!mongo34) {
done();
return;
}
test();
});
function test() {
const schema = new Schema({
name: { type: String, required: true }
});
const Movie = db.model('Movie', schema);
const arr = [
{ foo: 'Star Wars' },
{ foo: 'The Fast and the Furious' }
];
Movie.insertMany(arr, { ordered: false }, function(error) {
assert.ifError(error);
Movie.find({}).sort({ name: 1 }).exec(function(error, docs) {
assert.equal(docs.length, 0);
done();
});
});
}
});
it('insertMany() hooks (gh-3846)', function(done) {
const schema = new Schema({
name: String
});
let calledPre = 0;
let calledPost = 0;
schema.pre('insertMany', function(next, docs) {
assert.equal(docs.length, 2);
assert.equal(docs[0].name, 'Star Wars');
++calledPre;
next();
});
schema.pre('insertMany', function(next, docs) {
assert.equal(docs.length, 2);
assert.equal(docs[0].name, 'Star Wars');
docs[0].name = 'A New Hope';
++calledPre;
next();
});
schema.post('insertMany', function() {
++calledPost;
});
const Movie = db.model('Movie', schema);
const arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }];
Movie.insertMany(arr, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
assert.equal(calledPre, 2);
assert.equal(calledPost, 1);
Movie.find({}).sort({ name: 1 }).exec(function(error, docs) {
assert.ifError(error);
assert.equal(docs[0].name, 'A New Hope');
assert.equal(docs[1].name, 'The Empire Strikes Back');
done();
});
});
});
it('insertMany() with timestamps (gh-723)', function() {
const schema = new Schema({ name: String }, { timestamps: true });
const Movie = db.model('Movie', schema);
const start = Date.now();
const arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }];
return Movie.insertMany(arr).
then(docs => {
assert.equal(docs.length, 2);
assert.ok(!docs[0].isNew);
assert.ok(!docs[1].isNew);
assert.ok(docs[0].createdAt.valueOf() >= start);
assert.ok(docs[1].createdAt.valueOf() >= start);
}).
then(() => Movie.find()).
then(docs => {
assert.equal(docs.length, 2);
assert.ok(docs[0].createdAt.valueOf() >= start);
assert.ok(docs[1].createdAt.valueOf() >= start);
});
});
it('returns empty array if no documents (gh-8130)', function() {
const Movie = db.model('Movie', Schema({ name: String }));
return Movie.insertMany([]).then(docs => assert.deepEqual(docs, []));
});
it('insertMany() multi validation error with ordered false (gh-5337)', function(done) {
const schema = new Schema({
name: { type: String, required: true }
});
const Movie = db.model('Movie', schema);
const arr = [
{ foo: 'The Phantom Menace' },
{ name: 'Star Wars' },
{ name: 'The Empire Strikes Back' },
{ foobar: 'The Force Awakens' }
];
const opts = { ordered: false, rawResult: true };
Movie.insertMany(arr, opts, function(error, res) {
assert.ifError(error);
assert.equal(res.mongoose.validationErrors.length, 2);
assert.equal(res.mongoose.validationErrors[0].name, 'ValidationError');
assert.equal(res.mongoose.validationErrors[1].name, 'ValidationError');
done();
});
});
it('insertMany() depopulate (gh-4590)', function(done) {
const personSchema = new Schema({
name: String
});
const movieSchema = new Schema({
name: String,
leadActor: {
type: Schema.Types.ObjectId,
ref: 'gh4590'
}
});
const Person = db.model('Person', personSchema);
const Movie = db.model('Movie', movieSchema);
const arnold = new Person({ name: 'Arnold Schwarzenegger' });
const movies = [{ name: 'Predator', leadActor: arnold }];
Movie.insertMany(movies, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 1);
Movie.findOne({ name: 'Predator' }, function(error, doc) {
assert.ifError(error);
assert.equal(doc.leadActor.toHexString(), arnold._id.toHexString());
done();
});
});
});
it('insertMany() with promises (gh-4237)', function(done) {
const schema = new Schema({
name: String
});
const Movie = db.model('Movie', schema);
const arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }];
Movie.insertMany(arr).then(function(docs) {
assert.equal(docs.length, 2);
assert.ok(!docs[0].isNew);
assert.ok(!docs[1].isNew);
Movie.find({}, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
done();
});
});
});
it('insertMany() with error handlers (gh-6228)', function() {
const schema = new Schema({
name: { type: String, unique: true }
}, { autoIndex: false });
let postCalled = 0;
let postErrorCalled = 0;
schema.post('insertMany', (doc, next) => {
++postCalled;
next();
});
schema.post('insertMany', (err, doc, next) => {
++postErrorCalled;
next(err);
});
const Movie = db.model('Movie', schema);
return co(function*() {
yield Movie.createIndexes();
let threw = false;
try {
yield Movie.insertMany([
{ name: 'Star Wars' },
{ name: 'Star Wars' }
]);
} catch (error) {
assert.ok(error);
threw = true;
}
assert.ok(threw);
assert.equal(postCalled, 0);
assert.equal(postErrorCalled, 1);
yield Movie.collection.drop();
});
});
it('insertMany() with non object array error can be catched (gh-8363)', function(done) {
const schema = mongoose.Schema({
_id: mongoose.Schema.Types.ObjectId,
url: { type: String }
});
const Image = db.model('Test', schema);
Image.insertMany(['a', 'b', 'c']).catch((error) => {
assert.equal(error.name, 'ObjectParameterError');
done();
});
});
it('insertMany() return docs with empty modifiedPaths (gh-7852)', function() {
const schema = new Schema({
name: { type: String }
});
const Food = db.model('Test', schema);
return co(function*() {
const foods = yield Food.insertMany([
{ name: 'Rice dumplings' },
{ name: 'Beef noodle' }
]);
assert.equal(foods[0].modifiedPaths().length, 0);
assert.equal(foods[1].modifiedPaths().length, 0);
});
});
it('deleteOne() with options (gh-7857)', function(done) {
const schema = new Schema({
name: String
});
const Character = db.model('Test', schema);
const arr = [
{ name: 'Tyrion Lannister' },
{ name: 'Cersei Lannister' },
{ name: 'Jon Snow' },
{ name: 'Daenerys Targaryen' }
];
Character.insertMany(arr, function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 4);
Character.deleteOne({ name: 'Jon Snow' }, { w: 1 }, function(err) {
assert.ifError(err);
Character.find({}, function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 3);
done();
});
});
});
});
it('deleteMany() with options (gh-6805)', function(done) {
const schema = new Schema({
name: String
});
const Character = db.model('Test', schema);
const arr = [
{ name: 'Tyrion Lannister' },
{ name: 'Cersei Lannister' },
{ name: 'Jon Snow' },
{ name: 'Daenerys Targaryen' }
];
Character.insertMany(arr, function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 4);
Character.deleteMany({ name: /Lannister/ }, { w: 1 }, function(err) {
assert.ifError(err);
Character.find({}, function(err, docs) {
assert.ifError(err);
assert.equal(docs.length, 2);
done();
});
});
});
});
it('run default function with correct this scope in DocumentArray (gh-6840)', function() {
const schema = new Schema({
title: String,
actors: {
type: [{ name: String, character: String }],
default: function() {
// `this` should be root document and has initial data
if (this.title === 'Passengers') {
return [
{ name: 'Jennifer Lawrence', character: 'Aurora Lane' },
{ name: 'Chris Pratt', character: 'Jim Preston' }
];
}
return [];
}
}
});
const Movie = db.model('Movie', schema);
const movie = new Movie({ title: 'Passengers' });
assert.equal(movie.actors.length, 2);
});
describe('3.6 features', function() {
before(function(done) {
start.mongodVersion((err, version) => {
if (err) {
done(err);
return;
}
const mongo36 = version[0] > 3 || (version[0] === 3 && version[1] >= 6);
if (!mongo36) {
this.skip();
}
done();
});
});
it('arrayFilter (gh-5965)', function() {
return co(function*() {
const MyModel = db.model('Test', new Schema({
_id: Number,
grades: [Number]
}));
yield MyModel.create([
{ _id: 1, grades: [95, 92, 90] },
{ _id: 2, grades: [98, 100, 102] },
{ _id: 3, grades: [95, 110, 100] }
]);
yield MyModel.updateMany({}, { $set: { 'grades.$[element]': 100 } }, {
arrayFilters: [{ element: { $gte: 100 } }]
});
const docs = yield MyModel.find().sort({ _id: 1 });
assert.deepEqual(docs[0].toObject().grades, [95, 92, 90]);
assert.deepEqual(docs[1].toObject().grades, [98, 100, 100]);
assert.deepEqual(docs[2].toObject().grades, [95, 100, 100]);
});
});
it('arrayFilter casting (gh-5965) (gh-7079)', function() {
return co(function*() {
const MyModel = db.model('Test', new Schema({
_id: Number,
grades: [Number]
}));
yield MyModel.create([
{ _id: 1, grades: [95, 92, 90] },
{ _id: 2, grades: [98, 100, 102] },
{ _id: 3, grades: [95, 110, 100] }
]);
yield MyModel.updateMany({}, { $set: { 'grades.$[element]': 100 } }, {
arrayFilters: [{
element: { $gte: '100', $lte: { valueOf: () => 109 } }
}]
});
const docs = yield MyModel.find().sort({ _id: 1 });
assert.deepEqual(docs[0].toObject().grades, [95, 92, 90]);
assert.deepEqual(docs[1].toObject().grades, [98, 100, 100]);
assert.deepEqual(docs[2].toObject().grades, [95, 110, 100]);
});
});
describe('watch()', function() {
before(function() {
if (!process.env.REPLICA_SET) {
this.skip();
}
});
it('watch() (gh-5964)', function() {
return co(function*() {
const MyModel = db.model('Test', new Schema({ name: String }));
const doc = yield MyModel.create({ name: 'Ned Stark' });
const changed = new global.Promise(resolve => {
MyModel.watch().once('change', data => resolve(data));
});
yield doc.remove();
const changeData = yield changed;
assert.equal(changeData.operationType, 'delete');
assert.equal(changeData.documentKey._id.toHexString(),
doc._id.toHexString());
});
});
it('watch() before connecting (gh-5964)', function() {
return co(function*() {
const db = start();
const MyModel = db.model('Test', new Schema({ name: String }));
// Synchronous, before connection happens
const changeStream = MyModel.watch();
const changed = new global.Promise(resolve => {
changeStream.once('change', data => resolve(data));
});
yield db;
yield MyModel.create({ name: 'Ned Stark' });
const changeData = yield changed;
assert.equal(changeData.operationType, 'insert');
assert.equal(changeData.fullDocument.name, 'Ned Stark');
});
});
it('watch() close() prevents buffered watch op from running (gh-7022)', function() {
return co(function*() {
const db = start();
const MyModel = db.model('Test', new Schema({}));
const changeStream = MyModel.watch();
const ready = new global.Promise(resolve => {
changeStream.once('ready', () => {
resolve(true);
});
setTimeout(resolve, 500, false);
});
changeStream.close();
yield db;
const readyCalled = yield ready;
assert.strictEqual(readyCalled, false);
});
});
it('watch() close() closes the stream (gh-7022)', function() {
return co(function*() {
const db = yield start();
const MyModel = db.model('Test', new Schema({ name: String }));
yield MyModel.createCollection();
const changeStream = MyModel.watch();
const closed = new global.Promise(resolve => {
changeStream.once('close', () => resolve(true));
});
yield MyModel.create({ name: 'Hodor' });
changeStream.close();
const closedData = yield closed;
assert.strictEqual(closedData, true);
});
});
});
describe('sessions (gh-6362)', function() {
let MyModel;
const delay = ms => done => setTimeout(done, ms);
beforeEach(function(done) {
const nestedSchema = new Schema({ foo: String });
db.deleteModel(/Test/);
MyModel = db.model('Test', new Schema({
name: String,
nested: nestedSchema,
arr: [nestedSchema]
}));
start.mongodVersion((err, version) => {
if (err) {
done(err);
return;
}
const mongo36 = version[0] > 3 || (version[0] === 3 && version[1] >= 6);
if (!mongo36) {
this.skip();
}
done();
});
});
it('startSession()', function() {
return co(function*() {
const session = yield MyModel.startSession({ causalConsistency: true });
assert.equal(session.supports.causalConsistency, true);
session.endSession();
});
});
it('startSession() before connecting', function() {
return co(function*() {
const db = start();
const MyModel = db.model('Test', new Schema({ name: String }));
// Don't wait for promise
const sessionPromise = MyModel.startSession({ causalConsistency: true });
yield db;
const session = yield sessionPromise;
assert.equal(session.supports.causalConsistency, true);
session.endSession();
});
});
it('sets session when pulling a document from db', function() {
return co(function*() {
let doc = yield MyModel.create({ name: 'test', nested: { foo: 'bar' } });
const session = yield MyModel.startSession();
let lastUse = session.serverSession.lastUse;
yield delay(1);
doc = yield MyModel.findOne({ _id: doc._id }, null, { session });
assert.strictEqual(doc.$__.session, session);
assert.strictEqual(doc.$session(), session);
assert.strictEqual(doc.nested.$session(), session);
assert.ok(session.serverSession.lastUse > lastUse);
lastUse = session.serverSession.lastUse;
yield delay(1);
doc = yield MyModel.findOneAndUpdate({}, { name: 'test2' },
{ session: session });
assert.strictEqual(doc.$__.session, session);
assert.strictEqual(doc.$session(), session);
assert.strictEqual(doc.nested.$session(), session);
assert.ok(session.serverSession.lastUse > lastUse);
lastUse = session.serverSession.lastUse;
yield delay(1);
doc.name = 'test3';
yield doc.save();
assert.ok(session.serverSession.lastUse > lastUse);
session.endSession();
});
});
it('sets session on child doc when creating new doc (gh-7104)', function() {
return co(function*() {
let doc = yield MyModel.create({ name: 'test', arr: [{ foo: 'bar' }] });
const session = yield MyModel.startSession();
const lastUse = session.serverSession.lastUse;
yield delay(1);
doc = yield MyModel.findOne({ _id: doc._id }, null, { session });
assert.strictEqual(doc.$__.session, session);
assert.strictEqual(doc.$session(), session);
assert.strictEqual(doc.arr[0].$session(), session);
assert.ok(session.serverSession.lastUse > lastUse);
doc.arr.push({ foo: 'baz' });
assert.strictEqual(doc.arr[0].$session(), session);
assert.strictEqual(doc.arr[1].$session(), session);
doc.nested = { foo: 'foo' };
assert.strictEqual(doc.nested.$session(), session);
yield doc.save();
assert.strictEqual(doc.arr[0].$session(), session);
assert.strictEqual(doc.arr[1].$session(), session);
doc.$session(null);
assert.equal(doc.arr[0].$session(), null);
assert.equal(doc.arr[1].$session(), null);
});
});
it('sets session when pulling multiple docs from db', function() {
return co(function*() {
const doc = yield MyModel.create({ name: 'test' });
const session = yield MyModel.startSession();
let lastUse = session.serverSession.lastUse;
yield delay(1);
const docs = yield MyModel.find({ _id: doc._id }, null,
{ session: session });
assert.equal(docs.length, 1);
assert.strictEqual(docs[0].$__.session, session);
assert.strictEqual(docs[0].$session(), session);
assert.ok(session.serverSession.lastUse > lastUse);
lastUse = session.serverSession.lastUse;
yield delay(1);
docs[0].name = 'test3';
yield docs[0].save();
assert.ok(session.serverSession.lastUse > lastUse);
session.endSession();
});
});
it('supports overwriting `session` in save()', function() {
return co(function*() {
let doc = yield MyModel.create({ name: 'test' });
const session = yield MyModel.startSession();
let lastUse = session.serverSession.lastUse;
yield delay(1);
doc = yield MyModel.findOne({ _id: doc._id }, null, { session });
assert.ok(session.serverSession.lastUse > lastUse);
lastUse = session.serverSession.lastUse;
yield delay(1);
doc.name = 'test3';
yield doc.save({ session: null });
assert.ok(session.serverSession.lastUse <= lastUse);
session.endSession();
});
});
});
});
it('method with same name as prop should throw (gh-4475)', function(done) {
const testSchema = new mongoose.Schema({
isPaid: Boolean
});
testSchema.methods.isPaid = function() {
return false;
};
let threw = false;
try {
db.model('Test', testSchema);
} catch (error) {
threw = true;
assert.equal(error.message, 'You have a method and a property in ' +
'your schema both named "isPaid"');
}
assert.ok(threw);
done();
});
it('emits errors in create cb (gh-3222) (gh-3478)', function(done) {
const schema = new Schema({ name: 'String' });
const Movie = db.model('Movie', schema);
Movie.on('error', function(error) {
assert.equal(error.message, 'fail!');
done();
});
Movie.create({ name: 'Conan the Barbarian' }, function(error) {
assert.ifError(error);
throw new Error('fail!');
});
});
it('create() reuses existing doc if one passed in (gh-4449)', function(done) {
const testSchema = new mongoose.Schema({
name: String
});
const Test = db.model('Test', testSchema);
const t = new Test();
Test.create(t, function(error, t2) {
assert.ifError(error);
assert.ok(t === t2);
done();
});
});
it('emits errors correctly from exec (gh-4500)', function(done) {
const someModel = db.model('Test', new Schema({}));
someModel.on('error', function(error) {
assert.equal(error.message, 'This error will not disappear');
done();
});
someModel.findOne().exec(function() {
throw new Error('This error will not disappear');
});
});
it('disabling id getter with .set() (gh-5548)', function(done) {
const ChildSchema = new mongoose.Schema({
name: String,
_id: false
});
ChildSchema.set('id', false);
const ParentSchema = new mongoose.Schema({
child: {
type: ChildSchema,
default: {}
}
}, { id: false });
const Parent = db.model('Parent', ParentSchema);
const doc = new Parent({ child: { name: 'test' } });
assert.ok(!doc.id);
assert.ok(!doc.child.id);
const obj = doc.toObject({ virtuals: true });
assert.ok(!('id' in obj));
assert.ok(!('id' in obj.child));
done();
});
it('creates new array when initializing from existing doc (gh-4449)', function(done) {
const TodoSchema = new mongoose.Schema({
title: String
}, { _id: false });
const UserSchema = new mongoose.Schema({
name: String,
todos: [TodoSchema]
});
const User = db.model('User', UserSchema);
const val = new User({ name: 'Val' });
User.create(val, function(error, val) {
assert.ifError(error);
val.todos.push({ title: 'Groceries' });
val.save(function(error) {
assert.ifError(error);
User.findById(val, function(error, val) {
assert.ifError(error);
assert.deepEqual(val.toObject().todos, [{ title: 'Groceries' }]);
const u2 = new User();
val.todos = u2.todos;
val.todos.push({ title: 'Cook' });
val.save(function(error) {
assert.ifError(error);
User.findById(val, function(error, val) {
assert.ifError(error);
assert.equal(val.todos.length, 1);
assert.equal(val.todos[0].title, 'Cook');
done();
});
});
});
});
});
});
describe('bulkWrite casting', function() {
it('basic casting (gh-3998)', function(done) {
const schema = new Schema({
str: String,
num: Number
});
const M = db.model('Test', schema);
const ops = [
{
insertOne: {
document: { str: 1, num: '1' }
}
},
{
updateOne: {
filter: { str: 1 },
update: {
$set: { num: '2' }
}
}
}
];
M.bulkWrite(ops, function(error) {
assert.ifError(error);
M.findOne({}, function(error, doc) {
assert.ifError(error);
assert.strictEqual(doc.str, '1');
assert.strictEqual(doc.num, 2);
done();
});
});
});
it('setDefaultsOnInsert (gh-5708)', function(done) {
const schema = new Schema({
str: { type: String, default: 'test' },
num: Number
});
const M = db.model('Test', schema);
const ops = [
{
updateOne: {
filter: { num: 0 },
update: {
$inc: { num: 1 }
},
upsert: true,
setDefaultsOnInsert: true
}
}
];
M.bulkWrite(ops, function(error) {
assert.ifError(error);
M.findOne({}).lean().exec(function(error, doc) {
assert.ifError(error);
assert.strictEqual(doc.str, 'test');
assert.strictEqual(doc.num, 1);
done();
});
});
});
it('timestamps (gh-5708)', function() {
const schema = new Schema({
str: { type: String, default: 'test' },
num: Number
}, { timestamps: true });
const M = db.model('Test', schema);
const ops = [
{
insertOne: {
document: {
num: 42
}
}
},
{
updateOne: {
filter: { num: 0 },
update: {
$inc: { num: 1 }
},
upsert: true
}
}
];
const now = Date.now();
return co(function*() {
yield M.bulkWrite(ops);
let doc = yield M.findOne({ num: 42 });
assert.ok(doc.createdAt);
assert.ok(doc.createdAt.valueOf() >= now.valueOf());
assert.ok(doc.updatedAt);
assert.ok(doc.updatedAt.valueOf() >= now.valueOf());
doc = yield M.findOne({ num: 1 });
assert.ok(doc.createdAt);
assert.ok(doc.createdAt.valueOf() >= now.valueOf());
assert.ok(doc.updatedAt);
assert.ok(doc.updatedAt.valueOf() >= now.valueOf());
});
});
it('with child timestamps and array filters (gh-7032)', function() {
const childSchema = new Schema({ name: String }, { timestamps: true });
const parentSchema = new Schema({ children: [childSchema] }, {
timestamps: true
});
const Parent = db.model('Parent', parentSchema);
return co(function*() {
yield Parent.create({ children: [{ name: 'foo' }] });
const end = Date.now();
yield new Promise(resolve => setTimeout(resolve, 100));
yield Parent.bulkWrite([
{
updateOne: {
filter: {},
update: { $set: { 'children.$[].name': 'bar' } }
}
}
]);
const doc = yield Parent.findOne();
assert.ok(doc.children[0].updatedAt.valueOf() > end);
});
});
it('with timestamps and replaceOne (gh-5708)', function() {
const schema = new Schema({ num: Number }, { timestamps: true });
const M = db.model('Test', schema);
return co(function*() {
yield M.create({ num: 42 });
yield cb => setTimeout(cb, 10);
const now = Date.now();
yield M.bulkWrite([{
replaceOne: {
filter: { num: 42 },
replacement: { num: 100 }
}
}]);
const doc = yield M.findOne({ num: 100 });
assert.ok(doc.createdAt);
assert.ok(doc.createdAt.valueOf() >= now.valueOf());
assert.ok(doc.updatedAt);
assert.ok(doc.updatedAt.valueOf() >= now.valueOf());
});
});
it('with child timestamps (gh-7032)', function() {
const nested = new Schema({ name: String }, { timestamps: true });
const schema = new Schema({ nested: [nested] }, { timestamps: true });
const M = db.model('Test', schema);
return co(function*() {
yield M.create({ nested: [] });
yield cb => setTimeout(cb, 10);
const now = Date.now();
yield M.bulkWrite([{
updateOne: {
filter: {},
update: { $push: { nested: { name: 'test' } } }
}
}]);
const doc = yield M.findOne({});
assert.ok(doc.nested[0].createdAt);
assert.ok(doc.nested[0].createdAt.valueOf() >= now.valueOf());
assert.ok(doc.nested[0].updatedAt);
assert.ok(doc.nested[0].updatedAt.valueOf() >= now.valueOf());
});
});
it('with single nested and setOnInsert (gh-7534)', function() {
const nested = new Schema({ name: String });
const schema = new Schema({ nested: nested });
const Model = db.model('Test', schema);
return Model.
bulkWrite([{
updateOne: {
filter: {},
update: {
$setOnInsert: {
nested: {
name: 'foo'
}
}
},
upsert: true
}
}]).
then(() => Model.findOne()).
then(doc => assert.equal(doc.nested.name, 'foo'));
});
it('throws an error if no update object is provided (gh-8331)', function() {
const userSchema = new Schema({ name: { type: String, required: true } });
const User = db.model('User', userSchema);
return co(function*() {
const createdUser = yield User.create({ name: 'Hafez' });
let threw = false;
try {
yield User.bulkWrite([{
updateOne: {
filter: { _id: createdUser._id }
}
}]);
}
catch (err) {
threw = true;
assert.equal(err.message, 'Must provide an update object.');
}
finally {
assert.equal(threw, true);
const userAfterUpdate = yield User.findOne({ _id: createdUser._id });
assert.equal(userAfterUpdate.name, 'Hafez', 'Document data is not wiped if no update object is provided.');
}
});
});
});
it('insertMany with Decimal (gh-5190)', function(done) {
start.mongodVersion(function(err, version) {
if (err) {
done(err);
return;
}
const mongo34 = version[0] > 3 || (version[0] === 3 && version[1] >= 4);
if (!mongo34) {
done();
return;
}
test();
});
function test() {
const schema = new mongoose.Schema({
amount: mongoose.Schema.Types.Decimal
});
const Money = db.model('Test', schema);
Money.insertMany([{ amount: '123.45' }], function(error) {
assert.ifError(error);
done();
});
}
});
it('remove with cast error (gh-5323)', function(done) {
const schema = new mongoose.Schema({
name: String
});
const Model = db.model('Test', schema);
const arr = [
{ name: 'test-1' },
{ name: 'test-2' }
];
Model.create(arr, function(error) {
assert.ifError(error);
Model.remove([], function(error) {
assert.ok(error);
assert.ok(error.message.indexOf('must be an object') !== -1,
error.message);
Model.find({}, function(error, docs) {
assert.ifError(error);
assert.equal(docs.length, 2);
done();
});
});
});
});
it('.create() with non-object (gh-2037)', function(done) {
const schema = new mongoose.Schema({ name: String });
const Model = db.model('Test', schema);
Model.create(1, function(error) {
assert.ok(error);
assert.equal(error.name, 'ObjectParameterError');
done();
});
});
it('save() with unacknowledged writes (gh-6012)', function() {
const schema = new mongoose.Schema({ name: String }, { safe: false });
const Model = db.model('Test', schema);
return Model.create({});
});
it('save() with unacknowledged writes in options (gh-6012)', function() {
const schema = new mongoose.Schema({ name: String });
const Model = db.model('Test', schema);
const doc = new Model();
return doc.save({ safe: { w: 0 } });
});
it.skip('save() with wtimeout defined in schema (gh-6862)', function(done) {
// If you want to test this, setup replica set with 1 primary up and 1 secondary down
this.timeout(process.env.TRAVIS ? 9000 : 5500);
const schema = new Schema({
name: String
}, {
writeConcern: {
w: 2,
wtimeout: 1000
}
});
const User = db.model('User', schema);
const user = new User();
user.name = 'Jon Snow';
user.save(function(error) {
assert.ok(error);
assert.equal(error.name, 'MongoWriteConcernError');
// although timeout, the doc have been successfully saved in the primary.
User.findOne({}, function(err, user) {
if (err) return done(err);
assert.equal(user.name, 'Jon Snow');
done();
});
});
});
it.skip('save with wtimeout in options (gh_6862)', function(done) {
// If you want to test this, setup replica set with 1 primary up and 1 secondary down
this.timeout(process.env.TRAVIS ? 9000 : 5500);
const schema = new Schema({
name: String
});
const User = db.model('User', schema);
const user = new User();
user.name = 'Jon Snow';
user.save({ w: 2, wtimeout: 1000 }, function(error) {
assert.ok(error);
assert.equal(error.name, 'MongoWriteConcernError');
User.findOne({}, function(err, user) {
if (err) return done(err);
assert.equal(user.name, 'Jon Snow');
done();
});
});
});
it('bulkWrite casting updateMany, deleteOne, deleteMany (gh-3998)', function(done) {
const schema = new Schema({
str: String,
num: Number
});
const M = db.model('Test', schema);
const ops = [
{
insertOne: {
document: { str: 1, num: '1' }
}
},
{
insertOne: {
document: { str: '1', num: '1' }
}
},
{
updateMany: {
filter: { str: 1 },
update: {
$set: { num: '2' }
}
}
},
{
deleteMany: {
filter: { str: 1 }
}
}
];
M.bulkWrite(ops, function(error) {
assert.ifError(error);
M.countDocuments({}, function(error, count) {
assert.ifError(error);
assert.equal(count, 0);
done();
});
});
});
it('bulkWrite casting replaceOne (gh-3998)', function(done) {
const schema = new Schema({
str: String,
num: Number
});
const M = db.model('Test', schema);
const ops = [
{
insertOne: {
document: { str: 1, num: '1' }
}
},
{
replaceOne: {
filter: { str: 1 },
replacement: { str: 2, num: '2' }
}
}
];
M.bulkWrite(ops, function(error) {
assert.ifError(error);
M.findOne({}, function(error, doc) {
assert.ifError(error);
assert.strictEqual(doc.str, '2');
assert.strictEqual(doc.num, 2);
done();
});
});
});
it('alias with lean virtual (gh-6069)', function() {
const schema = new mongoose.Schema({
name: {
type: String,
alias: 'nameAlias'
}
});
const Model = db.model('Test', schema);
return co(function*() {
const doc = yield Model.create({ name: 'Val' });
const res = yield Model.findById(doc._id).lean();
assert.equal(schema.virtual('nameAlias').getters[0].call(res), 'Val');
});
});
it('marks array as modified when initializing non-array from db (gh-2442)', function(done) {
const s1 = new Schema({
array: mongoose.Schema.Types.Mixed
}, { minimize: false });
const s2 = new Schema({
array: {
type: [{
_id: false,
value: {
type: Number,
default: 0
}
}],
default: [{}]
}
});
const M1 = db.model('Test', s1);
const M2 = db.model('Test1', s2, M1.collection.name);
M1.create({ array: {} }, function(err, doc) {
assert.ifError(err);
assert.ok(doc.array);
M2.findOne({ _id: doc._id }, function(err, doc) {
assert.ifError(err);
assert.equal(doc.array[0].value, 0);
doc.array[0].value = 1;
doc.save(function(err) {
assert.ifError(err);
M2.findOne({ _id: doc._id }, function(err, doc) {
assert.ifError(err);
assert.ok(!doc.isModified('array'));
assert.deepEqual(doc.array[0].value, 1);
assert.equal(JSON.stringify(doc.array), '[{"value":1}]');
done();
});
});
});
});
});
it('Throws when saving same doc in parallel w/ callback (gh-6456)', function(done) {
let called = 0;
function counter() {
if (++called === 2) {
Test.countDocuments(function(err, cnt) {
assert.ifError(err);
assert.strictEqual(cnt, 1);
done();
});
}
}
const schema = new Schema({
name: String
});
const Test = db.model('Test', schema);
const test = new Test({
name: 'Billy'
});
test.save(function cb(err, doc) {
assert.ifError(err);
assert.strictEqual(doc.name, 'Billy');
counter();
});
test.save(function cb(err) {
assert.strictEqual(err.name, 'ParallelSaveError');
const regex = new RegExp(test.id);
assert.ok(regex.test(err.message));
counter();
});
});
it('syncIndexes() (gh-6281)', function() {
this.timeout(10000);
return co(function*() {
const coll = 'tests' + random();
let M = db.model('Test', new Schema({
name: { type: String, index: true }
}, { autoIndex: false }), coll);
let dropped = yield M.syncIndexes();
assert.deepEqual(dropped, []);
let indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 },
{ name: 1 }
]);
// New model, same collection, index on different property
db.deleteModel(/Test/);
M = db.model('Test', new Schema({
otherName: { type: String, index: true }
}, { autoIndex: false }), coll);
dropped = yield M.syncIndexes();
assert.deepEqual(dropped, ['name_1']);
indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 },
{ otherName: 1 }
]);
// New model, same collection, different options
db.deleteModel(/Test/);
M = db.model('Test', new Schema({
otherName: { type: String, unique: true }
}, { autoIndex: false }), coll);
dropped = yield M.syncIndexes();
assert.deepEqual(dropped, ['otherName_1']);
indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 },
{ otherName: 1 }
]);
// Re-run syncIndexes(), shouldn't change anything
dropped = yield M.syncIndexes();
assert.deepEqual(dropped, []);
yield M.collection.drop();
});
});
it('syncIndexes() with different key order (gh-8135)', function() {
this.timeout(10000);
return co(function*() {
const opts = { autoIndex: false };
let schema = new Schema({ name: String, age: Number }, opts);
schema.index({ name: 1, age: -1 });
const coll = 'tests' + random();
let M = db.model('Test', schema, coll);
let dropped = yield M.syncIndexes();
assert.deepEqual(dropped, []);
const indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 },
{ name: 1, age: -1 }
]);
// New model, same collection, different key order
schema = new Schema({ name: String, age: Number }, opts);
schema.index({ age: -1, name: 1 });
db.deleteModel(/Test/);
M = db.model('Test', schema, coll);
dropped = yield M.syncIndexes();
assert.deepEqual(dropped, ['name_1_age_-1']);
});
});
it('syncIndexes() with different key order (gh-8559)', function() {
this.timeout(5000);
return co(function*() {
yield db.dropDatabase();
const opts = { autoIndex: false };
let schema = new Schema({ name: String, age: Number }, opts);
schema.index({ name: 1, _id: 1 });
let M = db.model('Test', schema);
let dropped = yield M.syncIndexes();
assert.deepEqual(dropped, []);
// New model, same collection, different key order
schema = new Schema({ name: String, age: Number }, opts);
schema.index({ name: 1 });
db.deleteModel(/Test/);
M = db.model('Test', schema);
dropped = yield M.syncIndexes();
assert.deepEqual(dropped, ['name_1__id_1']);
});
});
it('syncIndexes() allows overwriting `background` option (gh-8645)', function() {
return co(function*() {
yield db.dropDatabase();
const opts = { autoIndex: false };
const schema = new Schema({ name: String }, opts);
schema.index({ name: 1 }, { background: true });
const M = db.model('Test', schema);
yield M.syncIndexes({ background: false });
const indexes = yield M.listIndexes();
assert.deepEqual(indexes[1].key, { name: 1 });
assert.strictEqual(indexes[1].background, false);
});
});
it('using `new db.model()()` (gh-6698)', function(done) {
db.model('Test', new Schema({
name: String
}));
assert.throws(function() {
new db.model('Test')({ name: 'test' });
}, /should not be run with `new`/);
done();
});
it('throws if non-function passed as callback (gh-6640)', function(done) {
const Model = db.model('Test', new Schema({
name: String
}));
const doc = new Model({});
assert.throws(function() {
doc.save({}, {});
}, /callback must be a function/i);
done();
});
it('Throws when saving same doc in parallel w/ promises (gh-6456)', function(done) {
let called = 0;
function counter() {
if (++called === 2) {
Test.countDocuments(function(err, cnt) {
assert.ifError(err);
assert.strictEqual(cnt, 1);
done();
});
}
}
const schema = new Schema({
name: String
});
const Test = db.model('Test', schema);
const test = new Test({
name: 'Sarah'
});
function handler(doc) {
assert.strictEqual(doc.id, test.id);
counter();
}
function error(err) {
assert.strictEqual(err.name, 'ParallelSaveError');
const regex = new RegExp(test.id);
assert.ok(regex.test(err.message));
counter();
}
test.save().then(handler);
test.save().catch(error);
});
it('allows calling save in a post save hook (gh-6611)', function() {
let called = 0;
const noteSchema = new Schema({
body: String
});
noteSchema.post('save', function(note) {
if (!called) {
called++;
note.body = 'a note, part deux.';
return note.save();
}
});
const Note = db.model('Test', noteSchema);
return co(function*() {
yield Note.create({ body: 'a note.' });
const doc = yield Note.findOne({});
assert.strictEqual(doc.body, 'a note, part deux.');
});
});
it('createCollection() respects schema collation (gh-6489)', function() {
const userSchema = new Schema({
name: String
}, { collation: { locale: 'en_US', strength: 1 } });
const Model = db.model('User', userSchema);
return co(function*() {
yield Model.collection.drop().catch(() => {});
yield Model.createCollection();
const collectionName = Model.collection.name;
// If the collection is not created, the following will throw
// MongoError: Collection [mongoose_test.User] not found.
yield db.collection(collectionName).stats();
yield Model.create([{ name: 'alpha' }, { name: 'Zeta' }]);
// Ensure that the default collation is set. Mongoose will set the
// collation on the query itself (see gh-4839).
const res = yield db.collection(collectionName).
find({}).sort({ name: 1 }).toArray();
assert.deepEqual(res.map(v => v.name), ['alpha', 'Zeta']);
});
});
});
it('dropDatabase() after init allows re-init (gh-6967)', function() {
this.timeout(10000);
const Model = db.model('Test', new Schema({
name: { type: String, index: true }
}));
return co(function*() {
yield Model.init();
yield db.dropDatabase();
assert.ok(!Model.$init);
let threw = false;
try {
yield Model.listIndexes();
} catch (err) {
assert.ok(err.message.indexOf('test') !== -1,
err.message);
threw = true;
}
assert.ok(threw);
yield Model.init();
const indexes = yield Model.listIndexes();
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { name: 1 });
});
});
it('replaceOne always sets version key in top-level (gh-7138)', function() {
const key = 'A';
const schema = new mongoose.Schema({
key: String,
items: { type: [String], default: [] }
});
const Record = db.model('Test', schema);
const record = { key: key, items: ['A', 'B', 'C'] };
return co(function*() {
yield Record.replaceOne({ key: key }, record, { upsert: true });
const fetchedRecord = yield Record.findOne({ key: key });
assert.deepEqual(fetchedRecord.toObject().items, ['A', 'B', 'C']);
});
});
it('can JSON.stringify(Model.schema) with nested (gh-7220)', function() {
const nested = Schema({ name: String });
const Model = db.model('Test', Schema({ nested }));
const _schema = JSON.parse(JSON.stringify(Model.schema));
assert.ok(_schema.obj.nested);
});
it('Model.events() (gh-7125)', function() {
const Model = db.model('Test', Schema({
name: { type: String, validate: () => false }
}));
let called = [];
Model.events.on('error', err => { called.push(err); });
return co(function*() {
yield Model.findOne({ _id: 'notanid' }).catch(() => {});
assert.equal(called.length, 1);
assert.equal(called[0].name, 'CastError');
called = [];
const doc = new Model({ name: 'fail' });
yield doc.save().catch(() => {});
assert.equal(called.length, 1);
assert.equal(called[0].name, 'ValidationError');
called = [];
yield Model.aggregate([{ $group: { fail: true } }]).exec().catch(() => {});
assert.equal(called.length, 1);
assert.equal(called[0].name, 'MongoError');
});
});
it('sets $session() before pre save hooks run (gh-7742)', function() {
const schema = new Schema({ name: String });
let sessions = [];
schema.pre('save', function() {
sessions.push(this.$session());
});
const SampleModel = db.model('Test', schema);
return co(function*() {
yield SampleModel.create({ name: 'foo' });
// start session
const session = yield db.startSession();
// get doc
const doc = yield SampleModel.findOne();
doc.foo = 'bar';
sessions = [];
yield doc.save({ session });
assert.equal(sessions.length, 1);
assert.strictEqual(sessions[0], session);
sessions = [];
yield doc.save({ session: null });
assert.equal(sessions.length, 1);
assert.strictEqual(sessions[0], null);
});
});
it('sets $session() before pre remove hooks run (gh-7742)', function() {
const schema = new Schema({ name: String });
let sessions = [];
schema.pre('remove', function() {
sessions.push(this.$session());
});
const SampleModel = db.model('Test', schema);
return co(function*() {
yield SampleModel.create({ name: 'foo' });
// start session
const session = yield db.startSession();
// get doc
const doc = yield SampleModel.findOne();
doc.foo = 'bar';
sessions = [];
yield doc.remove({ session });
assert.equal(sessions.length, 1);
assert.strictEqual(sessions[0], session);
});
});
it('set $session() before pre validate hooks run on bulkWrite and insertMany (gh-7769)', function() {
const schema = new Schema({ name: String });
const sessions = [];
schema.pre('validate', function() {
sessions.push(this.$session());
});
const SampleModel = db.model('Test', schema);
return co(function*() {
// start session
const session = yield db.startSession();
yield SampleModel.insertMany([{ name: 'foo' }, { name: 'bar' }], { session });
assert.strictEqual(sessions[0], session);
assert.strictEqual(sessions[1], session);
yield SampleModel.bulkWrite([{
insertOne: {
doc: { name: 'Samwell Tarly' }
}
}, {
replaceOne: {
filter: { name: 'bar' },
replacement: { name: 'Gilly' }
}
}], { session });
assert.strictEqual(sessions[2], session);
assert.strictEqual(sessions[3], session);
});
});
it('custom statics that overwrite query functions dont get hooks by default (gh-7790)', function() {
return co(function*() {
const schema = new Schema({ name: String, loadedAt: Date });
schema.statics.findOne = function() {
return this.findOneAndUpdate({}, { loadedAt: new Date() }, { new: true });
};
let called = 0;
schema.pre('findOne', function() {
++called;
});
const Model = db.model('Test', schema);
yield Model.create({ name: 'foo' });
const res = yield Model.findOne();
assert.ok(res.loadedAt);
assert.equal(called, 0);
});
});
it('error handling middleware passes saved doc (gh-7832)', function() {
const schema = new Schema({ _id: Number });
const errs = [];
const docs = [];
schema.post('save', (err, doc, next) => {
errs.push(err);
docs.push(doc);
next();
});
const Model = db.model('Test', schema);
return co(function*() {
yield Model.create({ _id: 1 });
const doc = new Model({ _id: 1 });
const err = yield doc.save().then(() => null, err => err);
assert.ok(err);
assert.equal(err.code, 11000);
assert.equal(errs.length, 1);
assert.equal(errs[0].code, 11000);
assert.equal(docs.length, 1);
assert.strictEqual(docs[0], doc);
});
});
it('throws readable error if calling Model function with bad context (gh-7957)', function() {
const Model = db.model('Test', Schema({ name: String }));
assert.throws(() => {
new Model.discriminator('gh5957_fail', Schema({ doesntMatter: String }));
}, /Model\.discriminator.*new Model/);
const discriminator = Model.discriminator;
assert.throws(() => {
discriminator('gh5957_fail', Schema({ doesntMatter: String }));
}, /Model\.discriminator.*MyModel/);
});
describe('exists() (gh-6872)', function() {
it('returns true if document exists', function() {
const Model = db.model('Test', new Schema({ name: String }));
return Model.create({ name: 'foo' }).
then(() => Model.exists({ name: 'foo' })).
then(res => assert.strictEqual(res, true)).
then(() => Model.exists({})).
then(res => assert.strictEqual(res, true)).
then(() => Model.exists()).
then(res => assert.strictEqual(res, true));
});
it('returns false if no doc exists', function() {
const Model = db.model('Test', new Schema({ name: String }));
return Model.create({ name: 'foo' }).
then(() => Model.exists({ name: 'bar' })).
then(res => assert.strictEqual(res, false)).
then(() => Model.exists({ otherProp: 'foo' })).
then(res => assert.strictEqual(res, false));
});
it('options (gh-8075)', function() {
const Model = db.model('Test', new Schema({ name: String }));
return Model.exists({}).
then(res => assert.ok(!res)).
then(() => Model.exists({}, { explain: true })).
then(res => assert.ok(res));
});
});
it('Model.validate() (gh-7587)', function() {
const Model = db.model('Test', new Schema({
name: {
first: {
type: String,
required: true
},
last: {
type: String,
required: true
}
},
age: {
type: Number,
required: true
},
comments: [{ name: { type: String, required: true } }]
}));
return co(function*() {
let err = null;
let obj = null;
err = yield Model.validate({ age: null }, ['age']).
then(() => null, err => err);
assert.ok(err);
assert.deepEqual(Object.keys(err.errors), ['age']);
err = yield Model.validate({ name: {} }, ['name']).
then(() => null, err => err);
assert.ok(err);
assert.deepEqual(Object.keys(err.errors), ['name.first', 'name.last']);
obj = { name: { first: 'foo' } };
err = yield Model.validate(obj, ['name']).
then(() => null, err => err);
assert.ok(err);
assert.deepEqual(Object.keys(err.errors), ['name.last']);
obj = { comments: [{ name: 'test' }, {}] };
err = yield Model.validate(obj, ['comments']).
then(() => null, err => err);
assert.ok(err);
assert.deepEqual(Object.keys(err.errors), ['comments.name']);
obj = { age: '42' };
yield Model.validate(obj, ['age']);
assert.strictEqual(obj.age, 42);
});
});
it('sets correct `Document#op` with `save()` (gh-8439)', function() {
const schema = Schema({ name: String });
const ops = [];
schema.pre('validate', function() {
ops.push(this.$op);
});
schema.pre('save', function() {
ops.push(this.$op);
});
schema.post('validate', function() {
ops.push(this.$op);
});
schema.post('save', function() {
ops.push(this.$op);
});
const Model = db.model('Test', schema);
const doc = new Model({ name: 'test' });
return doc.save().then(() => {
assert.deepEqual(ops, ['validate', 'validate', 'save', 'save']);
});
});
it('bulkWrite sets discriminator filters (gh-8590)', function() {
const Animal = db.model('Test', Schema({ name: String }));
const Dog = Animal.discriminator('Dog', Schema({ breed: String }));
return co(function*() {
yield Dog.bulkWrite([{
updateOne: {
filter: { name: 'Pooka' },
update: { $set: { breed: 'Chorkie' } },
upsert: true
}
}]);
const res = yield Animal.findOne();
assert.ok(res instanceof Dog);
assert.strictEqual(res.breed, 'Chorkie');
});
});
});
| 1 | 14,155 | Better to just do `const err = yield User.bulkWrite(ops).then(() => null, err => err);` so you don't need to write out the whole `try/catch` block. | Automattic-mongoose | js |
@@ -453,7 +453,7 @@ func evmErrToErrStatusCode(evmErr error, g genesis.Blockchain, height uint64) (e
// intrinsicGas returns the intrinsic gas of an execution
func intrinsicGas(data []byte) (uint64, error) {
dataSize := uint64(len(data))
- if action.ExecutionDataGas == 0 || (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
+ if (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
return 0, action.ErrInsufficientFunds
}
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package evm
import (
"bytes"
"context"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/tracer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
var (
// TODO: whenever ActionGasLimit is removed from genesis, we need to hard code it to 5M to make it compatible with
// the mainnet.
preAleutianActionGasLimit = genesis.Default.ActionGasLimit
inContractTransfer = hash.BytesToHash256([]byte{byte(iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER)})
// revertSelector is a special function selector for revert reason unpacking.
revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
// ErrInconsistentNonce is the error that the nonce is different from executor's nonce
ErrInconsistentNonce = errors.New("Nonce is not identical to executor nonce")
)
// CanTransfer checks whether the from account has enough balance
func CanTransfer(db vm.StateDB, fromHash common.Address, balance *big.Int) bool {
return db.GetBalance(fromHash).Cmp(balance) >= 0
}
// MakeTransfer transfers account
func MakeTransfer(db vm.StateDB, fromHash, toHash common.Address, amount *big.Int) {
db.SubBalance(fromHash, amount)
db.AddBalance(toHash, amount)
db.AddLog(&types.Log{
Topics: []common.Hash{
common.BytesToHash(inContractTransfer[:]),
common.BytesToHash(fromHash[:]),
common.BytesToHash(toHash[:]),
},
Data: amount.Bytes(),
})
}
type (
// Params is the context and parameters
Params struct {
context vm.BlockContext
txCtx vm.TxContext
nonce uint64
executorRawAddress string
amount *big.Int
contract *common.Address
gas uint64
data []byte
}
)
// newParams creates a new context for use in the EVM.
func newParams(
ctx context.Context,
execution *action.Execution,
stateDB *StateDBAdapter,
getBlockHash GetBlockHash,
) (*Params, error) {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
executorAddr := common.BytesToAddress(actionCtx.Caller.Bytes())
var contractAddrPointer *common.Address
if execution.Contract() != action.EmptyAddress {
contract, err := address.FromString(execution.Contract())
if err != nil {
return nil, errors.Wrap(err, "failed to convert encoded contract address to address")
}
contractAddr := common.BytesToAddress(contract.Bytes())
contractAddrPointer = &contractAddr
}
gasLimit := execution.GasLimit()
// Reset gas limit to the system wide action gas limit cap if it's greater than it
if blkCtx.BlockHeight > 0 && featureCtx.SystemWideActionGasLimit && gasLimit > preAleutianActionGasLimit {
gasLimit = preAleutianActionGasLimit
}
var getHashFn vm.GetHashFunc
if !featureCtx.FixGetHashFnHeight {
getHashFn = func(n uint64) common.Hash {
hash, err := getBlockHash(stateDB.blockHeight - n)
if err != nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
} else {
getHashFn = func(n uint64) common.Hash {
hash, err := getBlockHash(stateDB.blockHeight - (n + 1))
if err == nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
}
context := vm.BlockContext{
CanTransfer: CanTransfer,
Transfer: MakeTransfer,
GetHash: getHashFn,
Coinbase: common.BytesToAddress(blkCtx.Producer.Bytes()),
BlockNumber: new(big.Int).SetUint64(blkCtx.BlockHeight),
Time: new(big.Int).SetInt64(blkCtx.BlockTimeStamp.Unix()),
Difficulty: new(big.Int).SetUint64(uint64(50)),
GasLimit: gasLimit,
}
return &Params{
context,
vm.TxContext{
Origin: executorAddr,
GasPrice: execution.GasPrice(),
},
execution.Nonce(),
actionCtx.Caller.String(),
execution.Amount(),
contractAddrPointer,
gasLimit,
execution.Data(),
}, nil
}
func securityDeposit(ps *Params, stateDB vm.StateDB, gasLimit uint64) error {
executorNonce := stateDB.GetNonce(ps.txCtx.Origin)
if executorNonce > ps.nonce {
log.S().Errorf("Nonce on %v: %d vs %d", ps.txCtx.Origin, executorNonce, ps.nonce)
// TODO ignore inconsistent nonce problem until the actions are executed sequentially
// return ErrInconsistentNonce
}
if gasLimit < ps.gas {
return action.ErrGasLimit
}
gasConsumed := new(big.Int).Mul(new(big.Int).SetUint64(ps.gas), ps.txCtx.GasPrice)
if stateDB.GetBalance(ps.txCtx.Origin).Cmp(gasConsumed) < 0 {
return action.ErrInsufficientFunds
}
stateDB.SubBalance(ps.txCtx.Origin, gasConsumed)
return nil
}
// ExecuteContract processes a transfer which contains a contract
func ExecuteContract(
ctx context.Context,
sm protocol.StateManager,
execution *action.Execution,
getBlockHash GetBlockHash,
depositGasFunc DepositGas,
) ([]byte, *action.Receipt, error) {
ctx, span := tracer.NewSpan(ctx, "evm.ExecuteContract")
defer span.End()
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
g := genesis.MustExtractGenesisContext(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
stateDB := prepareStateDB(ctx, sm)
ps, err := newParams(ctx, execution, stateDB, getBlockHash)
if err != nil {
return nil, nil, err
}
retval, depositGas, remainingGas, contractAddress, statusCode, err := executeInEVM(ps, stateDB, g.Blockchain, blkCtx.GasLimit, blkCtx.BlockHeight)
if err != nil {
return nil, nil, err
}
receipt := &action.Receipt{
GasConsumed: ps.gas - remainingGas,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
ContractAddress: contractAddress,
}
receipt.Status = statusCode
var burnLog *action.TransactionLog
if featureCtx.FixDoubleChargeGas {
// Refund all deposit and, actual gas fee will be subtracted when depositing gas fee to the rewarding protocol
stateDB.AddBalance(ps.txCtx.Origin, big.NewInt(0).Mul(big.NewInt(0).SetUint64(depositGas), ps.txCtx.GasPrice))
} else {
if remainingGas > 0 {
remainingValue := new(big.Int).Mul(new(big.Int).SetUint64(remainingGas), ps.txCtx.GasPrice)
stateDB.AddBalance(ps.txCtx.Origin, remainingValue)
}
if depositGas-remainingGas > 0 {
burnLog = &action.TransactionLog{
Type: iotextypes.TransactionLogType_GAS_FEE,
Sender: actionCtx.Caller.String(),
Recipient: "", // burned
Amount: new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.txCtx.GasPrice),
}
}
}
var depositLog *action.TransactionLog
if depositGas-remainingGas > 0 {
gasValue := new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.txCtx.GasPrice)
depositLog, err = depositGasFunc(ctx, sm, gasValue)
if err != nil {
return nil, nil, err
}
}
if err := stateDB.CommitContracts(); err != nil {
return nil, nil, errors.Wrap(err, "failed to commit contracts to underlying db")
}
stateDB.clear()
receipt.AddLogs(stateDB.Logs()...).AddTransactionLogs(depositLog, burnLog)
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) ||
featureCtx.AddOutOfGasToTransactionLog && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas) {
receipt.AddTransactionLogs(stateDB.TransactionLogs()...)
}
if featureCtx.SetRevertMessageToReceipt && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrExecutionReverted) && retval != nil && bytes.Equal(retval[:4], revertSelector) {
// in case of the execution revert error, parse the retVal and add to receipt
data := retval[4:]
msgLength := byteutil.BytesToUint64BigEndian(data[56:64])
revertMsg := string(data[64 : 64+msgLength])
receipt.SetExecutionRevertMsg(revertMsg)
}
log.S().Debugf("Receipt: %+v, %v", receipt, err)
return retval, receipt, nil
}
// ReadContractStorage reads contract's storage
func ReadContractStorage(
ctx context.Context,
sm protocol.StateManager,
contract address.Address,
key []byte,
) ([]byte, error) {
bcCtx := protocol.MustGetBlockchainCtx(ctx)
ctx = protocol.WithFeatureCtx(protocol.WithBlockCtx(protocol.WithActionCtx(ctx,
protocol.ActionCtx{
ActionHash: hash.ZeroHash256,
}),
protocol.BlockCtx{
BlockHeight: bcCtx.Tip.Height + 1,
},
))
stateDB := prepareStateDB(ctx, sm)
res := stateDB.GetState(common.BytesToAddress(contract.Bytes()), common.BytesToHash(key))
return res[:], nil
}
func prepareStateDB(ctx context.Context, sm protocol.StateManager) *StateDBAdapter {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
opts := []StateDBAdapterOption{}
if featureCtx.UsePendingNonceOption {
opts = append(opts, SortCachedContractsOption(), UsePendingNonceOption())
}
if featureCtx.NotFixTopicCopyBug {
opts = append(opts, NotFixTopicCopyBugOption())
}
if featureCtx.AsyncContractTrie {
opts = append(opts, AsyncContractTrieOption())
}
if featureCtx.FixSnapshotOrder {
opts = append(opts, FixSnapshotOrderOption())
}
if featureCtx.ClearSnapshots {
opts = append(opts, ClearSnapshotsOption())
}
return NewStateDBAdapter(
sm,
blkCtx.BlockHeight,
actionCtx.ActionHash,
opts...,
)
}
func getChainConfig(g genesis.Blockchain, height uint64) *params.ChainConfig {
var chainConfig params.ChainConfig
chainConfig.ConstantinopleBlock = new(big.Int).SetUint64(0) // Constantinople switch block (nil = no fork, 0 = already activated)
chainConfig.BeringBlock = new(big.Int).SetUint64(g.BeringBlockHeight)
// enable earlier Ethereum forks at Greenland
chainConfig.GreenlandBlock = new(big.Int).SetUint64(g.GreenlandBlockHeight)
// support chainid and enable Istanbul + MuirGlacier at Iceland
chainConfig.IstanbulBlock = new(big.Int).SetUint64(g.IcelandBlockHeight)
chainConfig.MuirGlacierBlock = new(big.Int).SetUint64(g.IcelandBlockHeight)
if g.IsIceland(height) {
chainConfig.ChainID = new(big.Int).SetUint64(uint64(config.EVMNetworkID()))
}
return &chainConfig
}
//Error in executeInEVM is a consensus issue
func executeInEVM(evmParams *Params, stateDB *StateDBAdapter, g genesis.Blockchain, gasLimit uint64, blockHeight uint64) ([]byte, uint64, uint64, string, uint64, error) {
remainingGas := evmParams.gas
if err := securityDeposit(evmParams, stateDB, gasLimit); err != nil {
log.L().Warn("unexpected error: not enough security deposit", zap.Error(err))
return nil, 0, 0, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), err
}
var config vm.Config
chainConfig := getChainConfig(g, blockHeight)
evm := vm.NewEVM(evmParams.context, evmParams.txCtx, stateDB, chainConfig, config)
intriGas, err := intrinsicGas(evmParams.data)
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), err
}
if remainingGas < intriGas {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), action.ErrInsufficientFunds
}
remainingGas -= intriGas
contractRawAddress := action.EmptyAddress
executor := vm.AccountRef(evmParams.txCtx.Origin)
var ret []byte
var evmErr error
if evmParams.contract == nil {
// create contract
var evmContractAddress common.Address
_, evmContractAddress, remainingGas, evmErr = evm.Create(executor, evmParams.data, remainingGas, evmParams.amount)
log.L().Debug("evm Create.", log.Hex("addrHash", evmContractAddress[:]))
if evmErr == nil {
if contractAddress, err := address.FromBytes(evmContractAddress.Bytes()); err == nil {
contractRawAddress = contractAddress.String()
}
}
} else {
stateDB.SetNonce(evmParams.txCtx.Origin, stateDB.GetNonce(evmParams.txCtx.Origin)+1)
// process contract
ret, remainingGas, evmErr = evm.Call(executor, *evmParams.contract, evmParams.data, remainingGas, evmParams.amount)
}
if evmErr != nil {
log.L().Debug("evm error", zap.Error(evmErr))
// The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen.
// Should be a hard fork (Bering)
if evmErr == vm.ErrInsufficientBalance && g.IsBering(blockHeight) {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), evmErr
}
}
if stateDB.Error() != nil {
log.L().Debug("statedb error", zap.Error(stateDB.Error()))
}
refund := (evmParams.gas - remainingGas) / 2
if refund > stateDB.GetRefund() {
refund = stateDB.GetRefund()
}
remainingGas += refund
errCode := uint64(iotextypes.ReceiptStatus_Success)
if evmErr != nil {
errCode = evmErrToErrStatusCode(evmErr, g, blockHeight)
}
return ret, evmParams.gas, remainingGas, contractRawAddress, errCode, nil
}
// evmErrToErrStatusCode returns ReceiptStatuscode which describes error type
func evmErrToErrStatusCode(evmErr error, g genesis.Blockchain, height uint64) (errStatusCode uint64) {
if g.IsJutland(height) {
switch evmErr {
case vm.ErrOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrOutOfGas)
case vm.ErrCodeStoreOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas)
case vm.ErrDepth:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrDepth)
case vm.ErrContractAddressCollision:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrContractAddressCollision)
case vm.ErrExecutionReverted:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrExecutionReverted)
case vm.ErrMaxCodeSizeExceeded:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrMaxCodeSizeExceeded)
case vm.ErrWriteProtection:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrWriteProtection)
case vm.ErrInsufficientBalance:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrInsufficientBalance)
case vm.ErrInvalidJump:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrInvalidJump)
case vm.ErrReturnDataOutOfBounds:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrReturnDataOutOfBounds)
case vm.ErrGasUintOverflow:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrGasUintOverflow)
default:
//This errors from go-ethereum, are not-accessible variable.
switch evmErr.Error() {
case "no compatible interpreter":
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrNoCompatibleInterpreter)
default:
log.L().Error("evm internal error", zap.Error(evmErr))
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrUnknown)
}
}
return
}
if g.IsBering(height) {
switch evmErr {
case vm.ErrOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrOutOfGas)
case vm.ErrCodeStoreOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas)
case vm.ErrDepth:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrDepth)
case vm.ErrContractAddressCollision:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrContractAddressCollision)
case vm.ErrExecutionReverted:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrExecutionReverted)
case vm.ErrMaxCodeSizeExceeded:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrMaxCodeSizeExceeded)
case vm.ErrWriteProtection:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrWriteProtection)
default:
//This errors from go-ethereum, are not-accessible variable.
switch evmErr.Error() {
case "no compatible interpreter":
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrNoCompatibleInterpreter)
default:
log.L().Error("evm internal error", zap.Error(evmErr))
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrUnknown)
}
}
return
}
// before Bering height, return one common failure
errStatusCode = uint64(iotextypes.ReceiptStatus_Failure)
return
}
// intrinsicGas returns the intrinsic gas of an execution
func intrinsicGas(data []byte) (uint64, error) {
dataSize := uint64(len(data))
if action.ExecutionDataGas == 0 || (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
return 0, action.ErrInsufficientFunds
}
return dataSize*action.ExecutionDataGas + action.ExecutionBaseIntrinsicGas, nil
}
// SimulateExecution simulates the execution in evm
func SimulateExecution(
ctx context.Context,
sm protocol.StateManager,
caller address.Address,
ex *action.Execution,
getBlockHash GetBlockHash,
) ([]byte, *action.Receipt, error) {
ctx, span := tracer.NewSpan(ctx, "evm.SimulateExecution")
defer span.End()
bcCtx := protocol.MustGetBlockchainCtx(ctx)
g := genesis.MustExtractGenesisContext(ctx)
ctx = protocol.WithActionCtx(
ctx,
protocol.ActionCtx{
Caller: caller,
ActionHash: hash.Hash256b(byteutil.Must(proto.Marshal(ex.Proto()))),
},
)
zeroAddr, err := address.FromString(address.ZeroAddress)
if err != nil {
return nil, nil, err
}
ctx = protocol.WithBlockCtx(
ctx,
protocol.BlockCtx{
BlockHeight: bcCtx.Tip.Height + 1,
BlockTimeStamp: bcCtx.Tip.Timestamp.Add(g.BlockInterval),
GasLimit: g.BlockGasLimit,
Producer: zeroAddr,
},
)
ctx = protocol.WithFeatureCtx(ctx)
return ExecuteContract(
ctx,
sm,
ex,
getBlockHash,
func(context.Context, protocol.StateManager, *big.Int) (*action.TransactionLog, error) {
return nil, nil
},
)
}
| 1 | 24,112 | `action.ExecutionDataGas = 100` | iotexproject-iotex-core | go |
@@ -124,6 +124,9 @@ class GalleryControllerTest extends TestCase
$this->assertSame([$media], $gController->getGalleryMediasAction(1));
}
+ /**
+ * @group legacy
+ */
public function testPostGalleryMediaGalleryhasmediaAction()
{
$media = $this->createMock(MediaInterface::class); | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Tests\Controller\Api;
use Doctrine\Common\Collections\ArrayCollection;
use FOS\RestBundle\Request\ParamFetcher;
use FOS\RestBundle\View\View;
use PHPUnit\Framework\TestCase;
use Sonata\MediaBundle\Controller\Api\GalleryController;
use Sonata\MediaBundle\Model\GalleryHasMedia;
use Sonata\MediaBundle\Model\GalleryHasMediaInterface;
use Sonata\MediaBundle\Model\GalleryInterface;
use Sonata\MediaBundle\Model\GalleryManagerInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Model\MediaManagerInterface;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormFactoryInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
class GalleryTest extends GalleryHasMedia
{
}
/**
* @author Hugo Briand <[email protected]>
*/
class GalleryControllerTest extends TestCase
{
public function testGetGalleriesAction()
{
$gManager = $this->createMock(GalleryManagerInterface::class);
$mediaManager = $this->createMock(MediaManagerInterface::class);
$formFactory = $this->createMock(FormFactoryInterface::class);
$gManager->expects($this->once())->method('getPager')->will($this->returnValue([]));
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$paramFetcher = $this->createMock(ParamFetcher::class);
$paramFetcher->expects($this->exactly(3))->method('get');
$paramFetcher->expects($this->once())->method('all')->will($this->returnValue([]));
$this->assertSame([], $gController->getGalleriesAction($paramFetcher));
}
public function testGetGalleryAction()
{
$gManager = $this->createMock(GalleryManagerInterface::class);
$mediaManager = $this->createMock(MediaManagerInterface::class);
$gallery = $this->createMock(GalleryInterface::class);
$formFactory = $this->createMock(FormFactoryInterface::class);
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame($gallery, $gController->getGalleryAction(1));
}
public function testGetGalleryNotFoundAction()
{
$this->expectException(NotFoundHttpException::class);
$this->expectExceptionMessage('Gallery (42) not found');
$gManager = $this->createMock(GalleryManagerInterface::class);
$mediaManager = $this->createMock(MediaManagerInterface::class);
$formFactory = $this->createMock(FormFactoryInterface::class);
$gManager->expects($this->once())->method('findOneBy');
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$gController->getGalleryAction(42);
}
public function testGetGalleryGalleryhasmediasAction()
{
$gManager = $this->createMock(GalleryManagerInterface::class);
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$gallery = $this->createMock(GalleryInterface::class);
$formFactory = $this->createMock(FormFactoryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame([$galleryHasMedia], $gController->getGalleryGalleryhasmediasAction(1));
}
public function testGetGalleryMediaAction()
{
$media = $this->createMock(MediaInterface::class);
$formFactory = $this->createMock(FormFactoryInterface::class);
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$gManager = $this->createMock(GalleryManagerInterface::class);
$gManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$gController = new GalleryController($gManager, $mediaManager, $formFactory, 'test');
$this->assertSame([$media], $gController->getGalleryMediasAction(1));
}
public function testPostGalleryMediaGalleryhasmediaAction()
{
$media = $this->createMock(MediaInterface::class);
$media2 = $this->createMock(MediaInterface::class);
$media2->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media2));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->createMock(Form::class);
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(true));
$form->expects($this->once())->method('getData')->will($this->returnValue($galleryHasMedia));
$formFactory = $this->createMock(FormFactoryInterface::class);
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->postGalleryMediaGalleryhasmediaAction(1, 2, new Request());
$this->assertInstanceOf(View::class, $view);
$this->assertSame(200, $view->getResponse()->getStatusCode(), 'Should return 200');
}
public function testPostGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->createMock(MediaInterface::class);
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->createMock(FormFactoryInterface::class);
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->postGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf(View::class, $view);
$this->assertSame(400, $view->getResponse()->getStatusCode(), 'Should return 400');
}
public function testPutGalleryMediaGalleryhasmediaAction()
{
$media = $this->createMock(MediaInterface::class);
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->createMock(Form::class);
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(true));
$form->expects($this->once())->method('getData')->will($this->returnValue($galleryHasMedia));
$formFactory = $this->createMock(FormFactoryInterface::class);
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->putGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf(View::class, $view);
$this->assertSame(200, $view->getResponse()->getStatusCode(), 'Should return 200');
}
public function testPutGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->createMock(MediaInterface::class);
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->once())->method('getGalleryHasMedias')->will($this->returnValue([$galleryHasMedia]));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$form = $this->createMock(Form::class);
$form->expects($this->once())->method('handleRequest');
$form->expects($this->once())->method('isValid')->will($this->returnValue(false));
$formFactory = $this->createMock(FormFactoryInterface::class);
$formFactory->expects($this->once())->method('createNamed')->will($this->returnValue($form));
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->putGalleryMediaGalleryhasmediaAction(1, 1, new Request());
$this->assertInstanceOf(FormInterface::class, $view);
}
public function testDeleteGalleryMediaGalleryhasmediaAction()
{
$media = $this->createMock(MediaInterface::class);
$media->expects($this->any())->method('getId')->will($this->returnValue(1));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->any())->method('getGalleryHasMedias')->will($this->returnValue(new ArrayCollection([$galleryHasMedia])));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->createMock(FormFactoryInterface::class);
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->deleteGalleryMediaGalleryhasmediaAction(1, 1);
$this->assertSame(['deleted' => true], $view);
}
public function testDeleteGalleryMediaGalleryhasmediaInvalidAction()
{
$media = $this->createMock(MediaInterface::class);
$media2 = $this->createMock(MediaInterface::class);
$media2->expects($this->any())->method('getId')->will($this->returnValue(2));
$galleryHasMedia = $this->createMock(GalleryHasMediaInterface::class);
$galleryHasMedia->expects($this->once())->method('getMedia')->will($this->returnValue($media2));
$gallery = $this->createMock(GalleryInterface::class);
$gallery->expects($this->any())->method('getGalleryHasMedias')->will($this->returnValue(new ArrayCollection([$galleryHasMedia])));
$galleryManager = $this->createMock(GalleryManagerInterface::class);
$galleryManager->expects($this->once())->method('findOneBy')->will($this->returnValue($gallery));
$mediaManager = $this->createMock(MediaManagerInterface::class);
$mediaManager->expects($this->once())->method('findOneBy')->will($this->returnValue($media));
$formFactory = $this->createMock(FormFactoryInterface::class);
$galleryController = new GalleryController($galleryManager, $mediaManager, $formFactory, GalleryTest::class);
$view = $galleryController->deleteGalleryMediaGalleryhasmediaAction(1, 1);
$this->assertInstanceOf(View::class, $view);
$this->assertSame(400, $view->getResponse()->getStatusCode(), 'Should return 400');
}
}
| 1 | 10,230 | @supersmile2009 please review. Is it right to mark this as legacy? Are we testing the legacy path here? Are there tests for the non-legacy path? | sonata-project-SonataMediaBundle | php |
@@ -112,11 +112,14 @@ func (c *Controller) getSpcResource(key string) (*apis.StoragePoolClaim, error)
// The SPC resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
+
runtime.HandleError(fmt.Errorf("spcGot '%s' in work queue no longer exists", key))
- return nil, err
}
-
- return nil, err
+ // No need to return error to caller as we still want to fire the delete handler
+ // using the spc key(name)
+ // If error is returned the caller function will return without calling the spcEventHandler
+ // function that invokes business logic for pool deletion
+ return nil,nil
}
return spcGot, nil
} | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spc
import (
"fmt"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"github.com/openebs/maya/cmd/maya-apiserver/spc-actions"
)
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the spcPoolUpdated resource
// with the current status of the resource.
func (c *Controller) syncHandler(key, operation string) error {
// getSpcResource will take a key as argument which conatins the namespace/name or simply name
// of the object and will fetch the object.
spcGot, err := c.getSpcResource(key)
if err != nil {
return err
}
// Call the spcEventHandler which will take spc object , key(namespace/name of object) and type of operation we need to to for storage pool
// Type of operation for storage pool e.g. create, delete etc.
events, err := c.spcEventHandler(operation, spcGot, key)
if events == ignoreEvent {
glog.Warning("None of the SPC handler was executed")
return nil
}
// If this function returns a error then the object will be requeued.
// No need to error out even if it occurs,
return nil
}
// spcPoolEventHandler is to handle SPC related events.
func (c *Controller) spcEventHandler(operation string, spcGot *apis.StoragePoolClaim, key string) (string, error) {
switch operation {
case addEvent:
// CreateStoragePool function will create the storage pool
err := storagepoolactions.CreateStoragePool(spcGot)
if err !=nil{
glog.Error("Storagepool could not be created:",err)
// To-Do
// If Some error occur patch the spc object with appropriate reason
}
return addEvent,err
break
case updateEvent:
// TO-DO : Handle Business Logic
// Hook Update Business Logic Here
return updateEvent,nil
break
case deleteEvent:
err := storagepoolactions.DeleteStoragePool(key)
if err !=nil{
glog.Error("Storagepool could not be deleted:",err)
}
return deleteEvent, err
break
default:
// opeartion with tag other than add,update and delete are ignored.
break
}
return ignoreEvent, nil
}
// enqueueSpc takes a SPC resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than SPC.
func (c *Controller) enqueueSpc(obj interface{}, q QueueLoad) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
return
}
q.Key = key
c.workqueue.AddRateLimited(q)
}
// getSpcResource returns object corresponding to the resource key
func (c *Controller) getSpcResource(key string) (*apis.StoragePoolClaim, error) {
// Convert the key(namespace/name) string into a distinct name
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("Invalid resource key: %s", key))
return nil, err
}
spcGot, err := c.clientset.OpenebsV1alpha1().StoragePoolClaims().Get(name,metav1.GetOptions{})
if err != nil {
// The SPC resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("spcGot '%s' in work queue no longer exists", key))
return nil, err
}
return nil, err
}
return spcGot, nil
} | 1 | 8,797 | getSpcResource should not be bothered about deletion. Can we have `separation of concerns` for this logic? | openebs-maya | go |
@@ -4596,11 +4596,11 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
- 1 baz 3 baz 7
- 2 foo 1 foo 5
- 3 foo 1 foo 8
- 4 foo 5 foo 5
- 5 foo 5 foo 8
+ 5 baz 3 baz 7
+ 1 foo 1 foo 5
+ 2 foo 1 foo 8
+ 3 foo 5 foo 5
+ 4 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from distutils.version import LooseVersion
import re
import warnings
import inspect
from functools import partial, reduce
import sys
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructType)
from pyspark.sql.utils import AnalysisException
from pyspark.sql.window import Window
from pyspark.sql.functions import pandas_udf
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.internal import _InternalFrame, IndexMap
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import scol_for
from databricks.koalas.typedef import as_spark_type
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply by constant.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Divide by constant.
>>> df / 1
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df.div(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df // 2
angles degrees
circle 0 180
triangle 1 90
rectangle 2 180
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
if axis in ('index', 0, None):
exprs = []
num_args = len(signature(sfun).parameters)
for col in self._internal.data_columns:
col_sdf = self._internal.scol_for(col)
col_type = self._internal.spark_type_for(col)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_index is not None:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_index)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis in ('columns', 1):
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = 1000
pdf = self.head(limit + 1).to_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.data_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
# Arithmetic Operators
def _map_series_op(self, op, other):
if isinstance(other, DataFrame) or is_sequence(other):
raise ValueError(
"%s with another DataFrame or a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
applied = []
for column in self._internal.data_columns:
applied.append(getattr(self[column], op)(other))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other - self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other - self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].apply(func))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self, limit: Optional[int] = 1000):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the default limit of input length, 1000 and raises a ValueError.
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please use df.transpose(limit=<maximum number of rows>) to retrieve more than
1000 rows. Note that, before changing the given 'limit', this operation is
considerably expensive.
Parameters
----------
limit : int, optional
This parameter sets the limit of the current DataFrame. Set `None` to unlimit
the input length. When the limit is set, it is executed by the shortcut by collecting
the data into driver side, and then using pandas API. If the limit is unset,
the operation is executed by PySpark. Default is 1000.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
if len(self._internal.index_columns) != 1:
raise ValueError("Single index must be set to transpose the current DataFrame.")
if limit is not None:
pdf = self.head(limit + 1).to_pandas()
if len(pdf) > limit:
raise ValueError(
"Current DataFrame has more then the given limit %s rows. Please use "
"df.transpose(limit=<maximum number of rows>) to retrieve more than %s rows. "
"Note that, before changing the given 'limit', this operation is considerably "
"expensive." % (limit, limit))
return DataFrame(pdf.transpose())
index_columns = self._internal.index_columns
index_column = index_columns[0]
data_columns = self._internal.data_columns
sdf = self._sdf
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +-----+---+---+---+
# |index| x1| x2| x3|
# +-----+---+---+---+
# | y1| 1| 0| 0|
# | y2| 0| 50| 0|
# | y3| 3| 2| 1|
# +-----+---+---+---+
#
# Output of `exploded_df` becomes as below:
#
# +-----+---+-----+
# |index|key|value|
# +-----+---+-----+
# | y1| x1| 1|
# | y1| x2| 0|
# | y1| x3| 0|
# | y2| x1| 0|
# | y2| x2| 50|
# | y2| x3| 0|
# | y3| x1| 3|
# | y3| x2| 2|
# | y3| x3| 1|
# +-----+---+-----+
pairs = F.explode(F.array(*[
F.struct(
F.lit(column).alias("key"),
scol_for(sdf, column).alias("value")
) for column in data_columns]))
exploded_df = sdf.withColumn("pairs", pairs).select(
[scol_for(sdf, index_column), F.col("pairs.key"), F.col("pairs.value")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
pivoted_df = exploded_df.groupBy(F.col("key")).pivot('`{}`'.format(index_column))
# New index column is always single index.
internal_index_column = "__index_level_0__"
transposed_df = pivoted_df.agg(
F.first(F.col("value"))).withColumnRenamed("key", internal_index_column)
new_data_columns = filter(lambda x: x != internal_index_column, transposed_df.columns)
internal = self._internal.copy(
sdf=transposed_df,
data_columns=list(new_data_columns),
index_map=[(internal_index_column, None)])
return DataFrame(internal)
T = property(transpose)
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: unlike pandas, it is required for ``func`` to specify return type hint.
.. note:: the series within ``func`` is actually a pandas series, and
the length of each series is not guaranteed.
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
if return_sig is None:
raise ValueError("Given function must have return type hint; however, not found.")
wrapped = ks.pandas_wraps(func)
applied = []
for column in self._internal.data_columns:
applied.append(wrapped(self[column]).rename(column))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf)
return DataFrame(internal)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 0:
return None
elif len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.data_columns) == 0 or self._sdf.rdd.isEmpty()
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._internal.data_columns if column not in keys]
else:
data_columns = self._internal.data_columns
if append:
index_map = self._internal.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
index_columns = set(column for column, _ in index_map)
columns = [column for column, _ in index_map] + \
[column for column in data_columns if column not in index_columns]
# Sync Spark's columns as well.
sdf = self._sdf.select([self._internal.scol_for(name) for name in columns])
internal = _InternalFrame(sdf=sdf, index_map=index_map, data_columns=data_columns)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
if len(self._internal.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._internal.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
internal = self._internal.copy(
data_columns=[column for column, _ in new_index_map] + self._internal.data_columns,
index_map=index_map,
column_index=None)
if self._internal.column_index is not None:
column_depth = len(self._internal.column_index[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
columns = pd.MultiIndex.from_tuples(
[tuple(name if i == col_level else col_fill
for i in range(column_depth))
for _, name in new_index_map] + self._internal.column_index)
else:
columns = [name for _, name in new_index_map] + self._internal.data_columns
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].shift(periods, fill_value))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
# TODO: add axis parameter
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].diff(periods))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def nunique(self, axis: int = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("The 'nunique' method only works with axis=0 at the moment")
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
res = self._sdf.select([count_fn(self._internal.scol_for(c))
.alias(c)
for c in self.columns])
else:
res = self._sdf.select([(count_fn(self._internal.scol_for(c))
# If the count of null values in a column is at least 1,
# increase the total count by 1 else 0. This is like adding
# self.isnull().sum().clip(upper=1) but can be computed in a
# single Spark job when pulling it into the select statement.
+ F.when(F.count(F.when(self._internal.scol_for(c).isNull(), 1)
.otherwise(None))
>= 1, 1).otherwise(0))
.alias(c)
for c in self.columns])
return res.toPandas().T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals_list = [kv for kv in decimals.to_pandas().items()]
elif isinstance(decimals, dict):
decimals_list = [(k, v) for k, v in decimals.items()]
elif isinstance(decimals, int):
decimals_list = [(v, decimals) for v in self._internal.data_columns]
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
sdf = self._sdf
for decimal in decimals_list:
sdf = sdf.withColumn(decimal[0], F.round(scol_for(sdf, decimal[0]), decimal[1]))
return DataFrame(self._internal.copy(sdf=sdf))
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self._sdf.write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self._sdf.write.parquet(path=path, mode=mode, partitionBy=partition_cols,
compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self._sdf.write.save(path=path, format=format, mode=mode, partitionBy=partition_cols,
options=options)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._internal.spark_df
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = set(self._internal.data_columns)
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._internal.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fill 'value' or 'method'.")
sdf = self._sdf
if value is not None:
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
if limit is not None:
raise ValueError('limit parameter for value is not support now')
sdf = sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
else:
if method not in ['pad', 'ffill', 'backfill', 'bfill']:
raise ValueError("Expecting pad, ffill, backfill or bfill.")
applied = []
for column in self._internal.data_columns:
applied.append(self[column].fillna(value=value, method=method,
axis=axis, limit=limit))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
sdf = self._sdf.select(self._internal.data_columns)
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for df_column, replacement in to_replace.items():
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
kdf = DataFrame(sdf)
if inplace:
self._internal = kdf._internal
else:
return kdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [(c, self._internal.scol_for(c)) for c in self.columns
if isinstance(self._internal.spark_type_for(c), numeric_types)]
if lower is not None:
numeric_columns = [(c, F.when(scol < lower, lower).otherwise(scol).alias(c))
for c, scol in numeric_columns]
if upper is not None:
numeric_columns = [(c, F.when(scol > upper, upper).otherwise(scol).alias(c))
for c, scol in numeric_columns]
nonnumeric_columns = [self._internal.scol_for(c) for c in self.columns
if not isinstance(self._internal.spark_type_for(c), numeric_types)]
sdf = self._sdf.select([scol for _, scol in numeric_columns] + nonnumeric_columns)
return ks.DataFrame(sdf)[list(self.columns)]
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list of one column or a string. A list of columns
is not supported yet.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values = ['D'], index =['C'],
... columns="A", aggfunc={'D':'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
bar foo
C
small 5.5 2.333333
large 5.5 2.000000
"""
if not isinstance(columns, str):
raise ValueError("columns should be string.")
if not isinstance(values, str) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and (not isinstance(aggfunc, dict) or not all(
isinstance(key, str) and isinstance(value, str) for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and len(values) > 1:
raise NotImplementedError('Values as list of columns is not implemented yet.')
if isinstance(aggfunc, str):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(values, aggfunc))]
elif isinstance(aggfunc, dict):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(key, value))
for key, value in aggfunc.items()]
agg_columns = [key for key, value in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy().pivot(pivot_col=columns).agg(*agg_cols)
elif isinstance(index, list):
sdf = self._sdf.groupBy(index).pivot(pivot_col=columns).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
return DataFrame(sdf).set_index(index)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
return DataFrame(sdf.withColumn(columns, F.lit(index_values))).set_index(columns)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
index = [index]
else:
index = self._internal.index_columns
df = self.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_index is not None:
columns = pd.MultiIndex.from_tuples(self._internal.column_index)
else:
columns = pd.Index(self._internal.data_columns)
if self._internal.column_index_names is not None:
columns.names = self._internal.column_index_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_index = columns.tolist()
old_names = self._internal.data_columns
if len(old_names) != len(column_index):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_index)))
column_index_names = columns.names
self._internal = self._internal.copy(column_index=column_index,
column_index_names=column_index_names)
else:
old_names = self._internal.data_columns
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(old_name).alias(new_name)
for (old_name, new_name) in zip(old_names, columns)])
self._internal = self._internal.copy(sdf=sdf, data_columns=columns, column_index=None,
column_index_names=column_index_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._internal.data_columns],
index=self._internal.data_columns)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
columns = []
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
for col in self._internal.data_columns:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[col].dtype.name) in include_numpy_type or
self._sdf.schema[col].dataType in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[col].dtype.name) in exclude_numpy_type or
self._sdf.schema[col].dataType in exclude_spark_type)
if should_include:
columns += col
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_scols +
[scol_for(self._sdf, col) for col in columns]),
data_columns=columns))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
internal = self._internal.copy(
sdf=sdf,
data_columns=[column for column in self.columns if column not in columns])
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
if len(self._internal.index_map) == 0:
raise ValueError("Index should be set.")
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_columns
elif is_list_like(level):
by = [self._internal.index_columns[l] for l in level] # type: ignore
else:
by = self._internal.index_columns[level]
return self.sort_values(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._internal.scol_for(col)
.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._internal.scol_for(col).isin(list(values)).alias(col)
for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
1 baz 3 baz 7
2 foo 1 foo 5
3 foo 1 foo 8
4 foo 5 foo 5
5 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_key_columns = [scol_for(left_table, col) for col in left_keys] # type: ignore
right_key_columns = [scol_for(right_table, col) for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.data_columns)
& set(right._internal.data_columns))
left_index_columns = set(self._internal.index_columns)
right_index_columns = set(right._internal.index_columns)
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = scol_for(left_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
right_scol = scol_for(right_table, col)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = scol_for(right_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
# Retain indices if they are used for joining
if left_index:
if right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = self._internal.index_map + [idx for idx in right._internal.index_map
if idx not in self._internal.index_map]
else:
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = right._internal.index_map
elif right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
# Merge left and right indices after the join by replacing missing values in the left index
# with values from the right index and dropping
if (how == 'right' or how == 'full') and right_index:
for left_index_col, right_index_col in zip(self._internal.index_columns,
right._internal.index_columns):
selected_columns = selected_columns.withColumn(
'left_table.' + left_index_col,
F.when(F.col('left_table.`{}`'.format(left_index_col)).isNotNull(),
F.col('left_table.`{}`'.format(left_index_col)))
.otherwise(F.col('right_table.`{}`'.format(right_index_col)))
).withColumnRenamed(
'left_table.' + left_index_col, left_index_col
).drop(F.col('left_table.`{}`'.format(left_index_col)))
if not (left_index and not right_index):
for right_index_col in right_index_columns:
if right_index_col in left_index_columns:
selected_columns = \
selected_columns.drop(F.col('right_table.`{}`'.format(right_index_col)))
if index_map:
data_columns = [c for c in selected_columns.columns
if c not in [idx[0] for idx in index_map]]
internal = _InternalFrame(
sdf=selected_columns, data_columns=data_columns, index_map=index_map)
return DataFrame(internal)
else:
return DataFrame(selected_columns)
def join(self, right: 'DataFrame', on: Optional[Union[str, List[str]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_values(by=join_kdf.columns)
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 None
"""
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.data_columns)
.intersection(set(other._internal.data_columns)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_name in update_columns:
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, column_name + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns]))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_scols + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = self._internal.data_columns
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(name).alias(prefix + name)
for name in data_columns])
internal = self._internal.copy(
sdf=sdf, data_columns=[prefix + name for name in data_columns])
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = self._internal.data_columns
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(name).alias(name + suffix)
for name in data_columns])
internal = self._internal.copy(
sdf=sdf, data_columns=[name + suffix for name in data_columns])
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
# This is used for cummin, cummax, cumxum, etc.
if func == F.min:
func = "cummin"
elif func == F.max:
func = "cummax"
elif func == F.sum:
func = "cumsum"
elif func.__name__ == "cumprod":
func = "cumprod"
if len(self._internal.index_columns) == 0:
raise ValueError("Index must be set.")
applied = []
for column in self.columns:
applied.append(getattr(self[column], func)(skipna))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.data_columns
elif not isinstance(subset, list):
subset = [subset]
sdf = self._sdf.drop_duplicates(subset=subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
if axis in ('index', 0, None):
index = labels
elif axis in ('columns', 1):
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
index_column = self._internal.index_columns
assert len(index_column) <= 1, "Index should be single column or not set."
if len(index_column) == 1:
kser = ks.Series(list(index))
index_column = index_column[0]
labels = kser._kdf._sdf.select(kser._scol.alias(index_column))
else:
index_column = None
labels = ks.Series(index).to_frame()._sdf
joined_df = self._sdf.join(labels, on=index_column, how="right")
new_data_columns = filter(lambda x: x not in index_column, joined_df.columns)
if index_column is not None:
index_map = [(index_column, None)] # type: List[IndexMap]
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns),
index_map=index_map)
else:
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns))
return internal
def _reindex_columns(self, columns):
label_columns = list(columns)
null_columns = [
F.lit(np.nan).alias(label_column) for label_column
in label_columns if label_column not in self.columns]
# Concatenate all fields
sdf = self._sdf.select(
self._internal.index_scols +
list(map(self._internal.scol_for, self.columns)) +
null_columns)
# Only select label_columns (with index columns)
sdf = sdf.select(self._internal.index_scols + [scol_for(sdf, col) for col in label_columns])
return self._internal.copy(
sdf=sdf,
data_columns=label_columns)
def melt(self, id_vars=None, value_vars=None, var_name='variable',
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
if not isinstance(id_vars, (list, tuple, np.ndarray)):
id_vars = list(id_vars)
data_columns = self._internal.data_columns
if value_vars is None:
value_vars = []
if not isinstance(value_vars, (list, tuple, np.ndarray)):
value_vars = list(value_vars)
if len(value_vars) == 0:
value_vars = data_columns
data_columns = [data_column for data_column in data_columns if data_column not in id_vars]
sdf = self._sdf
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(column).alias(var_name)] +
[self._internal.scol_for(column).alias(value_name)])
) for column in data_columns if column in value_vars]))
columns = (id_vars +
[F.col("pairs.%s" % var_name), F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
data_columns = self._internal.data_columns
for column in data_columns:
col = self[column]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_0__"
value_column = "value"
cols = []
for data_column, applied_col in zip(data_columns, applied):
cols.append(F.struct(
F.lit(data_column).alias(internal_index_column),
applied_col.alias(value_column)))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column, None)])
ser = DataFrame(internal)[value_column].rename("all")
return ser
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
data_columns = self._internal.data_columns
for column in data_columns:
col = self[column]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_0__"
value_column = "value"
cols = []
for data_column, applied_col in zip(data_columns, applied):
cols.append(F.struct(
F.lit(data_column).alias(internal_index_column),
applied_col.alias(value_column)))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column, None)])
ser = DataFrame(internal)[value_column].rename("any")
return ser
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].rank(method=method, ascending=ascending))
sdf = self._sdf.select(self._internal.index_columns + [column._scol for column in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[column.name for column in applied])
return DataFrame(internal)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
if axis not in ('index', 0, 'columns', 1, None):
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
index_scols = self._internal.index_scols
sdf = self._sdf
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
sdf = sdf.filter(col)
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
return self[items]
elif like is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].contains(like))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
output_columns = [c for c in data_columns if like in c]
return self[output_columns]
elif regex is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].rlike(regex))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
matcher = re.compile(regex)
output_columns = [c for c in data_columns if matcher.search(c) is not None]
return self[output_columns]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def _get_from_multiindex_column(self, key):
""" Select columns from multi-index columns.
:param key: the multi-index column keys represented by tuple
:return: DataFrame or Series
"""
from databricks.koalas.series import Series
assert isinstance(key, tuple)
columns = list(zip(self._internal.data_columns, self._internal.column_index))
for k in key:
columns = [(column, idx[1:]) for column, idx in columns if idx[0] == k]
if len(columns) == 0:
raise KeyError(k)
recursive = False
if all(len(idx) == 0 or idx[0] == '' for _, idx in columns):
# If idx is empty or the head is '', drill down recursively.
recursive = True
for i, (col, idx) in enumerate(columns):
columns[i] = (col, tuple([str(key), *idx[1:]]))
column_index_names = None
if self._internal.column_index_names is not None:
# Manage column index names
column_index_level = set(len(idx) for _, idx in columns)
assert len(column_index_level) == 1
column_index_level = list(column_index_level)[0]
column_index_names = self._internal.column_index_names[-column_index_level:]
if all(len(idx) == 1 for _, idx in columns):
# If len(idx) == 1, then the result is not MultiIndex anymore
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col).alias(idx[0])
for col, idx in columns])
kdf_or_ser = DataFrame(self._internal.copy(
sdf=sdf,
data_columns=[idx[0] for _, idx in columns],
column_index=None,
column_index_names=column_index_names))
else:
# Otherwise, the result is still MultiIndex and need to manage column_index.
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col) for col, _ in columns])
kdf_or_ser = DataFrame(self._internal.copy(
sdf=sdf,
data_columns=[col for col, _ in columns],
column_index=[idx for _, idx in columns],
column_index_names=column_index_names))
if recursive:
kdf_or_ser = kdf_or_ser._pd_getitem(str(key))
if isinstance(kdf_or_ser, Series):
kdf_or_ser.name = str(key)
return kdf_or_ser
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
if self._internal.column_index is not None:
return self._get_from_multiindex_column((key,))
else:
try:
return Series(self._internal.copy(scol=self._internal.scol_for(key)),
anchor=self)
except AnalysisException:
raise KeyError(key)
if isinstance(key, tuple):
if self._internal.column_index is not None:
return self._get_from_multiindex_column(key)
else:
raise NotImplementedError(key)
elif np.isscalar(key):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def __repr__(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
if self._internal.column_index is not None:
try:
return self._get_from_multiindex_column((key,))
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
if key not in self._internal.data_columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 10,940 | This is actually matched with pandas's result since the output is sorted. | databricks-koalas | py |
@@ -37,12 +37,12 @@ var ConfigCmd = &cobra.Command{
// Config defines the config schema
type Config struct {
- Endpoint string `yaml:"endpoint""`
- WalletList map[string]string `yaml:"walletList"`
+ Endpoint string `yaml:"endpoint""`
+ AccountList map[string]string `yaml:"walletList"`
}
func init() {
- ConfigDir = os.Getenv("HOME") + "/.config/ioctl"
+ ConfigDir = os.Getenv("HOME") + "/.config/ioctl/default"
if err := os.MkdirAll(ConfigDir, 0700); err != nil {
fmt.Println(err.Error())
os.Exit(1) | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"fmt"
"io/ioutil"
"os"
"strings"
"gopkg.in/yaml.v2"
"github.com/spf13/cobra"
)
var (
// ConfigDir is the directory to store config file
ConfigDir string
// DefaultConfigFile is the default config file name
DefaultConfigFile string
)
// ConfigCmd represents the config command
var ConfigCmd = &cobra.Command{
Use: "config",
Short: "Set or get configuration for ioctl",
ValidArgs: []string{"set", "get"},
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
// Config defines the config schema
type Config struct {
Endpoint string `yaml:"endpoint""`
WalletList map[string]string `yaml:"walletList"`
}
func init() {
ConfigDir = os.Getenv("HOME") + "/.config/ioctl"
if err := os.MkdirAll(ConfigDir, 0700); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
DefaultConfigFile = ConfigDir + "/config.default"
ConfigCmd.AddCommand(configGetEndpointCmd)
ConfigCmd.AddCommand(configSetEndpointCmd)
}
// LoadConfig loads config file in yaml format
func LoadConfig() (Config, error) {
w := Config{
WalletList: make(map[string]string),
}
in, err := ioutil.ReadFile(DefaultConfigFile)
if err == nil {
if err := yaml.Unmarshal(in, &w); err != nil {
return w, err
}
} else if !os.IsNotExist(err) {
return w, err
}
return w, nil
}
| 1 | 15,893 | struct field tag `yaml:"endpoint""` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces (from `govet`) | iotexproject-iotex-core | go |
@@ -41,6 +41,8 @@ TopicImpl::TopicImpl(
, qos_(&qos == &TOPIC_QOS_DEFAULT ? participant_->get_default_topic_qos() : qos)
, listener_(listen)
, user_topic_(nullptr)
+ , handle_()
+ , num_refs_(0u)
{
}
| 1 | // Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* TopicImpl.cpp
*
*/
#include <fastdds/dds/topic/Topic.hpp>
#include <fastdds/topic/TopicImpl.hpp>
#include <fastdds/domain/DomainParticipantImpl.hpp>
#include <fastdds/dds/topic/TypeSupport.hpp>
#include <fastdds/dds/log/Log.hpp>
#include <functional>
namespace eprosima {
namespace fastdds {
namespace dds {
TopicImpl::TopicImpl(
DomainParticipantImpl* p,
TypeSupport type_support,
const TopicQos& qos,
TopicListener* listen)
: participant_(p)
, type_support_(type_support)
, qos_(&qos == &TOPIC_QOS_DEFAULT ? participant_->get_default_topic_qos() : qos)
, listener_(listen)
, user_topic_(nullptr)
{
}
TopicImpl::~TopicImpl()
{
delete user_topic_;
}
ReturnCode_t TopicImpl::check_qos(
const TopicQos& qos)
{
if (PERSISTENT_DURABILITY_QOS == qos.durability().kind)
{
logError(DDS_QOS_CHECK, "PERSISTENT Durability not supported");
return ReturnCode_t::RETCODE_UNSUPPORTED;
}
if (BY_SOURCE_TIMESTAMP_DESTINATIONORDER_QOS == qos.destination_order().kind)
{
logError(DDS_QOS_CHECK, "BY SOURCE TIMESTAMP DestinationOrder not supported");
return ReturnCode_t::RETCODE_UNSUPPORTED;
}
if (BEST_EFFORT_RELIABILITY_QOS == qos.reliability().kind &&
EXCLUSIVE_OWNERSHIP_QOS == qos.ownership().kind)
{
logError(DDS_QOS_CHECK, "BEST_EFFORT incompatible with EXCLUSIVE ownership");
return ReturnCode_t::RETCODE_INCONSISTENT_POLICY;
}
if (AUTOMATIC_LIVELINESS_QOS == qos.liveliness().kind ||
MANUAL_BY_PARTICIPANT_LIVELINESS_QOS == qos.liveliness().kind)
{
if (qos.liveliness().lease_duration < eprosima::fastrtps::c_TimeInfinite &&
qos.liveliness().lease_duration <= qos.liveliness().announcement_period)
{
logError(DDS_QOS_CHECK, "lease_duration <= announcement period.");
return ReturnCode_t::RETCODE_INCONSISTENT_POLICY;
}
}
return ReturnCode_t::RETCODE_OK;
}
void TopicImpl::set_qos(
TopicQos& to,
const TopicQos& from,
bool first_time)
{
(void)first_time;
to = from;
// Topic Qos is only used to create other Qos, so it can always be updated
}
bool TopicImpl::can_qos_be_updated(
const TopicQos& to,
const TopicQos& from)
{
(void)to;
(void)from;
return true;
}
const TopicQos& TopicImpl::get_qos() const
{
return qos_;
}
ReturnCode_t TopicImpl::set_qos(
const TopicQos& qos)
{
if (&qos == &TOPIC_QOS_DEFAULT)
{
const TopicQos& default_qos = participant_->get_default_topic_qos();
if (!can_qos_be_updated(qos_, default_qos))
{
return ReturnCode_t::RETCODE_IMMUTABLE_POLICY;
}
set_qos(qos_, default_qos, false);
return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t ret_val = check_qos(qos);
if (!ret_val)
{
return ret_val;
}
if (!can_qos_be_updated(qos_, qos))
{
return ReturnCode_t::RETCODE_IMMUTABLE_POLICY;
}
set_qos(qos_, qos, false);
return ReturnCode_t::RETCODE_OK;
}
const TopicListener* TopicImpl::get_listener() const
{
return listener_;
}
ReturnCode_t TopicImpl::set_listener(
TopicListener* listener)
{
listener_ = listener;
return ReturnCode_t::RETCODE_OK;
}
DomainParticipant* TopicImpl::get_participant() const
{
return participant_->get_participant();
}
const Topic* TopicImpl::get_topic() const
{
return user_topic_;
}
const TypeSupport& TopicImpl::get_type() const
{
return type_support_;
}
} // dds
} // fastdds
} // eprosima
| 1 | 18,300 | This is not necessary. Topic inherits from DomainEntity which already has instance_handle_ | eProsima-Fast-DDS | cpp |
@@ -31,7 +31,7 @@ func TestICMPPortUnreachable(t *testing.T) {
_, ipv4, _, _, pktBytes, err := testPacketUDPDefault()
Expect(err).NotTo(HaveOccurred())
- runBpfUnitTest(t, "icmp_port_unreachable.c", func(bpfrun bpfProgRunFn) {
+ runBpfUnitTest(t, "icmp_port_unreachable.c", false, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.Retval).To(Equal(0)) | 1 | // Copyright (c) 2019-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ut_test
import (
"fmt"
"testing"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
. "github.com/onsi/gomega"
"github.com/projectcalico/felix/bpf/nat"
)
func TestICMPPortUnreachable(t *testing.T) {
RegisterTestingT(t)
_, ipv4, _, _, pktBytes, err := testPacketUDPDefault()
Expect(err).NotTo(HaveOccurred())
runBpfUnitTest(t, "icmp_port_unreachable.c", func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.Retval).To(Equal(0))
Expect(res.dataOut).To(HaveLen(134)) // eth + ip + 64 + udp + ip + icmp
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
}
func TestNATNoBackendFromHEP(t *testing.T) {
RegisterTestingT(t)
iphdr := *ipv4Default
_, ipv4, l4, _, pktBytes, err := testPacket(nil, &iphdr, nil, nil)
Expect(err).NotTo(HaveOccurred())
udp := l4.(*layers.UDP)
// Test with count as 1 but no backend. This results in a NAT backend lookup failure
natkey := nat.NewNATKey(ipv4.DstIP, uint16(udp.DstPort), uint8(ipv4.Protocol)).AsBytes()
err = natMap.Update(
natkey,
nat.NewNATValue(0, 1, 0, 0).AsBytes(),
)
Expect(err).NotTo(HaveOccurred())
defer func() {
err := natMap.Delete(natkey)
Expect(err).NotTo(HaveOccurred())
}()
runBpfTest(t, "calico_from_host_ep", nil, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.RetvalStr()).To(Equal("TC_ACT_UNSPEC"), "expected program to return TC_ACT_UNSPEC")
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
// Test with count as 0. This results in a no backend after frontend lookup as count is 0.
err = natMap.Update(
natkey,
nat.NewNATValue(0, 0, 0, 0).AsBytes(),
)
Expect(err).NotTo(HaveOccurred())
runBpfTest(t, "calico_from_host_ep", nil, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.RetvalStr()).To(Equal("TC_ACT_UNSPEC"), "expected program to return TC_ACT_UNSPEC")
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
}
func checkICMPPortUnreachable(pktR gopacket.Packet, ipv4 *layers.IPv4) {
ipv4L := pktR.Layer(layers.LayerTypeIPv4)
Expect(ipv4L).NotTo(BeNil())
ipv4R := ipv4L.(*layers.IPv4)
Expect(ipv4R.Protocol).To(Equal(layers.IPProtocolICMPv4))
Expect(ipv4R.SrcIP.String()).To(Equal(intfIP.String()))
Expect(ipv4R.DstIP).To(Equal(ipv4.SrcIP))
icmpL := pktR.Layer(layers.LayerTypeICMPv4)
Expect(ipv4L).NotTo(BeNil())
icmpR := icmpL.(*layers.ICMPv4)
Expect(icmpR.TypeCode).To(Equal(
layers.CreateICMPv4TypeCode(
layers.ICMPv4TypeDestinationUnreachable,
layers.ICMPv4CodePort,
)))
}
| 1 | 19,322 | Do we need the forXDP parameter in runBpfUnitTest? If not, I think better to revert in order to save a few changes. | projectcalico-felix | go |
@@ -68,8 +68,8 @@ func (c *cstorSnapshotCommand) validateOptions() error {
return nil
}
-// getSnapshotObj returns a filled object of CASSnapshot
-func (c *cstorSnapshotCommand) getSnapshotObj() *apis.CASSnapshot {
+// casSnapshot returns a filled object of CASSnapshot
+func (c *cstorSnapshotCommand) casSnapshot() *apis.CASSnapshot {
volName, _ := c.Data["volname"].(string)
snapName, _ := c.Data["snapname"].(string)
return &apis.CASSnapshot{ | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// cstorSnapshotCommand represents a cstor snapshot runtask command
//
// NOTE:
// This is an implementation of CommandRunner
type cstorSnapshotCommand struct {
*RunCommand
}
// instance returns specific cstor snapshot runtask command implementation based
// on the command's action
func (c *cstorSnapshotCommand) instance() (r Runner) {
switch c.Action {
case CreateCommandAction:
r = &cstorSnapshotCreate{c}
case DeleteCommandAction:
r = &cstorSnapshotDelete{c}
default:
r = ¬SupportedActionCommand{c.RunCommand}
}
return
}
// Run executes various cstor volume related operations
func (c *cstorSnapshotCommand) Run() (r RunCommandResult) {
return c.instance().Run()
}
// validateOptions checks if the required params are missing
func (c *cstorSnapshotCommand) validateOptions() error {
ip, _ := c.Data["ip"].(string)
volName, _ := c.Data["volname"].(string)
snapName, _ := c.Data["snapname"].(string)
if len(ip) == 0 {
return errors.Errorf("missing ip address")
}
if len(volName) == 0 {
return errors.Errorf("missing volume name")
}
if len(snapName) == 0 {
return errors.Errorf("missing snapshot name")
}
return nil
}
// getSnapshotObj returns a filled object of CASSnapshot
func (c *cstorSnapshotCommand) getSnapshotObj() *apis.CASSnapshot {
volName, _ := c.Data["volname"].(string)
snapName, _ := c.Data["snapname"].(string)
return &apis.CASSnapshot{
Spec: apis.SnapshotSpec{
VolumeName: volName,
},
ObjectMeta: metav1.ObjectMeta{
Name: snapName,
},
}
}
| 1 | 10,052 | All the code that reference to this function must also be changed. | openebs-maya | go |
@@ -49,4 +49,12 @@ public interface UserManager {
public Role getRole(String roleName);
public boolean validateProxyUser(String proxyUser, User realUser);
+
+ /**
+ * @param username e.g. user alias
+ * @param groupName e.g. name of hadoop headless group / LDAP group
+ * @return Returns true if the user belongs to a group. This is used when verifying user
+ * permission by checking its group membership
+ */
+ public boolean validateUserGroupMembership(String username, String groupName);
} | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
/**
* Interface for the UserManager. Implementors will have to handle the retrieval of the User object
* given the username and password.
*
* The constructor will be called with a azkaban.utils.Props object passed as the only parameter. If
* such a constructor doesn't exist, than the UserManager instantiation may fail.
*/
public interface UserManager {
/**
* Retrieves the user given the username and password to authenticate against.
*
* @throws UserManagerException If the username/password combination doesn't exist.
*/
public User getUser(String username, String password)
throws UserManagerException;
/**
* Returns true if the user is valid. This is used when adding permissions for users
*/
public boolean validateUser(String username);
/**
* Returns true if the group is valid. This is used when adding permissions for groups.
*/
public boolean validateGroup(String group);
/**
* Returns the user role. This may return null.
*/
public Role getRole(String roleName);
public boolean validateProxyUser(String proxyUser, User realUser);
}
| 1 | 23,000 | How is this new interface different from existing `validateProxyUser` method? If I understand correctly the `proxyUser` parameter is essentially a group and the method should verify if `realUser` belongs to it. | azkaban-azkaban | java |
@@ -38,6 +38,7 @@ using Nethermind.Store;
using Nethermind.Store.Bloom;
using Nethermind.TxPool;
using NUnit.Framework;
+using BlockTree = Nethermind.Blockchain.BlockTree;
namespace Nethermind.JsonRpc.Test.Modules.Trace
{ | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Blockchain;
using Nethermind.Blockchain.Processing;
using Nethermind.Blockchain.Receipts;
using Nethermind.Blockchain.Rewards;
using Nethermind.Blockchain.Synchronization;
using Nethermind.Blockchain.Tracing;
using Nethermind.Blockchain.Validators;
using Nethermind.Core;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Core.Test.Builders;
using Nethermind.Crypto;
using Nethermind.Db;
using Nethermind.Evm;
using Nethermind.Evm.Tracing;
using Nethermind.JsonRpc.Modules.Trace;
using Nethermind.Logging;
using Nethermind.Specs;
using Nethermind.State;
using Nethermind.State.Repositories;
using Nethermind.Store;
using Nethermind.Store.Bloom;
using Nethermind.TxPool;
using NUnit.Framework;
namespace Nethermind.JsonRpc.Test.Modules.Trace
{
[Parallelizable(ParallelScope.Self)]
[TestFixture]
public class ParityStyleTracerTests
{
private BlockchainProcessor _processor;
private BlockTree _blockTree;
private Tracer _tracer;
[SetUp]
public void Setup()
{
IDb blocksDb = new MemDb();
IDb blocksInfoDb = new MemDb();
IDb headersDb = new MemDb();
ChainLevelInfoRepository repository = new ChainLevelInfoRepository(blocksInfoDb);
ISpecProvider specProvider = MainnetSpecProvider.Instance;
_blockTree = new BlockTree(blocksDb, headersDb, blocksInfoDb, repository, specProvider, NullTxPool.Instance, NullBloomStorage.Instance, new SyncConfig(), LimboLogs.Instance);
ISnapshotableDb stateDb = new StateDb();
ISnapshotableDb codeDb = new StateDb();
StateProvider stateProvider = new StateProvider(stateDb, codeDb, LimboLogs.Instance);
StorageProvider storageProvider = new StorageProvider(stateDb, stateProvider, LimboLogs.Instance);
BlockhashProvider blockhashProvider = new BlockhashProvider(_blockTree, LimboLogs.Instance);
VirtualMachine virtualMachine = new VirtualMachine(stateProvider, storageProvider, blockhashProvider, specProvider, LimboLogs.Instance);
TransactionProcessor transactionProcessor = new TransactionProcessor(specProvider, stateProvider, storageProvider, virtualMachine, LimboLogs.Instance);
BlockProcessor blockProcessor = new BlockProcessor(specProvider, Always.Valid, NoBlockRewards.Instance, transactionProcessor, stateDb, codeDb, stateProvider, storageProvider, NullTxPool.Instance, NullReceiptStorage.Instance, LimboLogs.Instance);
_processor = new BlockchainProcessor(_blockTree, blockProcessor, new CompositeDataRecoveryStep(new TxSignaturesRecoveryStep(new EthereumEcdsa(MainnetSpecProvider.Instance, LimboLogs.Instance), NullTxPool.Instance, LimboLogs.Instance)), LimboLogs.Instance, false);
Block genesis = Build.A.Block.Genesis.TestObject;
_blockTree.SuggestBlock(genesis);
_processor.Process(genesis, ProcessingOptions.None, NullBlockTracer.Instance);
_tracer = new Tracer(stateProvider, _processor);
}
[Test]
public void Can_trace_raw_parity_style()
{
TraceModule traceModule = new TraceModule(NullReceiptStorage.Instance, _tracer, _blockTree);
ResultWrapper<ParityTxTraceFromReplay> result = traceModule.trace_rawTransaction(Bytes.FromHexString("f889808609184e72a00082271094000000000000000000000000000000000000000080a47f74657374320000000000000000000000000000000000000000000000000000006000571ca08a8bbf888cfa37bbf0bb965423625641fc956967b81d12e23709cead01446075a01ce999b56a8a88504be365442ea61239198e23d1fce7d00fcfc5cd3b44b7215f"), new[] {"trace"});
Assert.NotNull(result.Data);
}
}
} | 1 | 23,765 | Do we have namespace conflicts? | NethermindEth-nethermind | .cs |
@@ -224,7 +224,7 @@ func (s *server) run(ctx context.Context, t cli.Telemetry) error {
if s.useFakeResponse {
service = api.NewFakeWebAPI()
} else {
- service = api.NewWebAPI(ds, sls, alss, cmds, cfg.ProjectMap(), encryptDecrypter, t.Logger)
+ service = api.NewWebAPI(ctx, ds, sls, alss, cmds, cfg.ProjectMap(), encryptDecrypter, cfg.Cache.TTLDuration(), t.Logger)
}
opts := []rpc.Option{
rpc.WithPort(s.webAPIPort), | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
"github.com/NYTimes/gziphandler"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/pipe-cd/pipe/pkg/admin"
"github.com/pipe-cd/pipe/pkg/app/api/api"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/authhandler"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/pipedtokenverifier"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/cli"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/crypto"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/datastore/firestore"
"github.com/pipe-cd/pipe/pkg/datastore/mongodb"
"github.com/pipe-cd/pipe/pkg/filestore"
"github.com/pipe-cd/pipe/pkg/filestore/gcs"
"github.com/pipe-cd/pipe/pkg/filestore/minio"
"github.com/pipe-cd/pipe/pkg/jwt"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc"
"github.com/pipe-cd/pipe/pkg/version"
)
var (
defaultSigningMethod = jwtgo.SigningMethodHS256
)
type httpHandler interface {
Register(func(pattern string, handler func(http.ResponseWriter, *http.Request)))
}
type server struct {
pipedAPIPort int
webAPIPort int
httpPort int
adminPort int
staticDir string
cacheAddress string
gracePeriod time.Duration
tls bool
certFile string
keyFile string
insecureCookie bool
encryptionKeyFile string
configFile string
useFakeResponse bool
enableGRPCReflection bool
}
// NewCommand creates a new cobra command for executing api server.
func NewCommand() *cobra.Command {
s := &server{
pipedAPIPort: 9080,
webAPIPort: 9081,
httpPort: 9082,
adminPort: 9085,
staticDir: "pkg/app/web/public_files",
cacheAddress: "cache:6379",
gracePeriod: 30 * time.Second,
}
cmd := &cobra.Command{
Use: "server",
Short: "Start running server.",
RunE: cli.WithContext(s.run),
}
cmd.Flags().IntVar(&s.pipedAPIPort, "piped-api-port", s.pipedAPIPort, "The port number used to run a grpc server that serving serves incoming piped requests.")
cmd.Flags().IntVar(&s.webAPIPort, "web-api-port", s.webAPIPort, "The port number used to run a grpc server that serves incoming web requests.")
cmd.Flags().IntVar(&s.httpPort, "http-port", s.httpPort, "The port number used to run a http server that serves incoming http requests such as auth callbacks or webhook events.")
cmd.Flags().IntVar(&s.adminPort, "admin-port", s.adminPort, "The port number used to run a HTTP server for admin tasks such as metrics, healthz.")
cmd.Flags().StringVar(&s.staticDir, "static-dir", s.staticDir, "The directory where contains static assets.")
cmd.Flags().StringVar(&s.cacheAddress, "cache-address", s.cacheAddress, "The address to cache service.")
cmd.Flags().DurationVar(&s.gracePeriod, "grace-period", s.gracePeriod, "How long to wait for graceful shutdown.")
cmd.Flags().BoolVar(&s.tls, "tls", s.tls, "Whether running the gRPC server with TLS or not.")
cmd.Flags().StringVar(&s.certFile, "cert-file", s.certFile, "The path to the TLS certificate file.")
cmd.Flags().StringVar(&s.keyFile, "key-file", s.keyFile, "The path to the TLS key file.")
cmd.Flags().BoolVar(&s.insecureCookie, "insecure-cookie", s.insecureCookie, "Allow cookie to be sent over an unsecured HTTP connection.")
cmd.Flags().StringVar(&s.encryptionKeyFile, "encryption-key-file", s.encryptionKeyFile, "The path to file containing a random string of bits used to encrypt sensitive data.")
cmd.MarkFlagRequired("encryption-key-file")
cmd.Flags().StringVar(&s.configFile, "config-file", s.configFile, "The path to the configuration file.")
cmd.MarkFlagRequired("config-file")
// For debugging early in development
cmd.Flags().BoolVar(&s.useFakeResponse, "use-fake-response", s.useFakeResponse, "Whether the server responds fake response or not.")
cmd.Flags().BoolVar(&s.enableGRPCReflection, "enable-grpc-reflection", s.enableGRPCReflection, "Whether to enable the reflection service or not.")
return cmd
}
func (s *server) run(ctx context.Context, t cli.Telemetry) error {
group, ctx := errgroup.WithContext(ctx)
// Load control plane configuration from the specified file.
cfg, err := s.loadConfig()
if err != nil {
t.Logger.Error("failed to load control-plane configuration",
zap.String("config-file", s.configFile),
zap.Error(err),
)
return err
}
t.Logger.Info("successfully loaded control-plane configuration")
var (
pipedAPIServer *rpc.Server
webAPIServer *rpc.Server
)
ds, err := s.createDatastore(ctx, cfg, t.Logger)
if err != nil {
t.Logger.Error("failed to create datastore", zap.Error(err))
return err
}
defer func() {
if err := ds.Close(); err != nil {
t.Logger.Error("failed to close datastore client", zap.Error(err))
}
}()
t.Logger.Info("succesfully connected to data store")
fs, err := s.createFilestore(ctx, cfg, t.Logger)
if err != nil {
t.Logger.Error("failed to create filestore", zap.Error(err))
return err
}
defer func() {
if err := fs.Close(); err != nil {
t.Logger.Error("failed to close filestore client", zap.Error(err))
}
}()
t.Logger.Info("successfully connected to file store")
rd := redis.NewRedis(s.cacheAddress, "")
defer func() {
if err := rd.Close(); err != nil {
t.Logger.Error("failed to close redis client", zap.Error(err))
}
}()
cache := rediscache.NewTTLCache(rd, cfg.Cache.TTLDuration())
sls := stagelogstore.NewStore(fs, cache, t.Logger)
alss := applicationlivestatestore.NewStore(fs, cache, t.Logger)
cmds := commandstore.NewStore(ds, cache, t.Logger)
// Start a gRPC server for handling PipedAPI requests.
{
var (
verifier = pipedtokenverifier.NewVerifier(ctx, cfg, ds)
service = api.NewPipedAPI(ds, sls, alss, cmds, t.Logger)
opts = []rpc.Option{
rpc.WithPort(s.pipedAPIPort),
rpc.WithGracePeriod(s.gracePeriod),
rpc.WithLogger(t.Logger),
rpc.WithLogUnaryInterceptor(t.Logger),
rpc.WithPipedTokenAuthUnaryInterceptor(verifier, t.Logger),
rpc.WithRequestValidationUnaryInterceptor(),
}
)
if s.tls {
opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile))
}
if s.enableGRPCReflection {
opts = append(opts, rpc.WithGRPCReflection())
}
pipedAPIServer = rpc.NewServer(service, opts...)
group.Go(func() error {
return pipedAPIServer.Run(ctx)
})
}
encryptDecrypter, err := crypto.NewAESEncryptDecrypter(s.encryptionKeyFile)
if err != nil {
t.Logger.Error("failed to create a new AES EncryptDecrypter", zap.Error(err))
return err
}
// Start a gRPC server for handling WebAPI requests.
{
verifier, err := jwt.NewVerifier(defaultSigningMethod, s.encryptionKeyFile)
if err != nil {
t.Logger.Error("failed to create a new JWT verifier", zap.Error(err))
return err
}
var service rpc.Service
if s.useFakeResponse {
service = api.NewFakeWebAPI()
} else {
service = api.NewWebAPI(ds, sls, alss, cmds, cfg.ProjectMap(), encryptDecrypter, t.Logger)
}
opts := []rpc.Option{
rpc.WithPort(s.webAPIPort),
rpc.WithGracePeriod(s.gracePeriod),
rpc.WithLogger(t.Logger),
rpc.WithJWTAuthUnaryInterceptor(verifier, webservice.NewRBACAuthorizer(), t.Logger),
rpc.WithRequestValidationUnaryInterceptor(),
}
if s.tls {
opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile))
}
if s.enableGRPCReflection {
opts = append(opts, rpc.WithGRPCReflection())
}
webAPIServer = rpc.NewServer(service, opts...)
group.Go(func() error {
return webAPIServer.Run(ctx)
})
}
// Start an http server for handling incoming http requests
// such as auth callbacks, webhook events and
// serving static assets for web.
{
signer, err := jwt.NewSigner(defaultSigningMethod, s.encryptionKeyFile)
if err != nil {
t.Logger.Error("failed to create a new signer", zap.Error(err))
return err
}
mux := http.NewServeMux()
httpServer := &http.Server{
Addr: fmt.Sprintf(":%d", s.httpPort),
Handler: mux,
}
handlers := []httpHandler{
authhandler.NewHandler(
signer,
encryptDecrypter,
cfg.Address,
cfg.StateKey,
cfg.ProjectMap(),
cfg.SharedSSOConfigMap(),
datastore.NewProjectStore(ds),
!s.insecureCookie,
t.Logger,
),
}
for _, h := range handlers {
h.Register(mux.HandleFunc)
}
fs := http.FileServer(http.Dir(filepath.Join(s.staticDir, "assets")))
assetsHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "public, max-age=31536000")
http.StripPrefix("/assets/", fs).ServeHTTP(w, r)
})
mux.Handle("/assets/", gziphandler.GzipHandler(assetsHandler))
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(s.staticDir, "favicon.ico"))
})
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(s.staticDir, "/index.html"))
})
group.Go(func() error {
return runHTTPServer(ctx, httpServer, s.gracePeriod, t.Logger)
})
}
// Start running admin server.
{
var (
ver = []byte(version.Get().Version)
admin = admin.NewAdmin(s.adminPort, s.gracePeriod, t.Logger)
)
admin.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
w.Write(ver)
})
admin.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
})
admin.Handle("/metrics", t.PrometheusMetricsHandler())
group.Go(func() error {
return admin.Run(ctx)
})
}
// Wait until all components have finished.
// A terminating signal or a finish of any components
// could trigger the finish of server.
// This ensures that all components are good or no one.
if err := group.Wait(); err != nil {
t.Logger.Error("failed while running", zap.Error(err))
return err
}
return nil
}
func runHTTPServer(ctx context.Context, httpServer *http.Server, gracePeriod time.Duration, logger *zap.Logger) error {
doneCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
logger.Info(fmt.Sprintf("start running http server on %s", httpServer.Addr))
if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Error("failed to listen and http server", zap.Error(err))
doneCh <- err
}
doneCh <- nil
}()
<-ctx.Done()
ctx, _ = context.WithTimeout(context.Background(), gracePeriod)
logger.Info("stopping http server")
if err := httpServer.Shutdown(ctx); err != nil {
logger.Error("failed to shutdown http server", zap.Error(err))
}
return <-doneCh
}
func (s *server) loadConfig() (*config.ControlPlaneSpec, error) {
cfg, err := config.LoadFromYAML(s.configFile)
if err != nil {
return nil, err
}
if cfg.Kind != config.KindControlPlane {
return nil, fmt.Errorf("wrong configuration kind for control-plane: %v", cfg.Kind)
}
return cfg.ControlPlaneSpec, nil
}
func (s *server) createDatastore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (datastore.DataStore, error) {
switch cfg.Datastore.Type {
case model.DataStoreFirestore:
fsConfig := cfg.Datastore.FirestoreConfig
options := []firestore.Option{
firestore.WithCredentialsFile(fsConfig.CredentialsFile),
firestore.WithLogger(logger),
}
return firestore.NewFireStore(ctx, fsConfig.Project, fsConfig.Namespace, fsConfig.Environment, options...)
case model.DataStoreDynamoDB:
return nil, errors.New("dynamodb is unimplemented yet")
case model.DataStoreMongoDB:
mdConfig := cfg.Datastore.MongoDBConfig
options := []mongodb.Option{
mongodb.WithLogger(logger),
}
return mongodb.NewMongoDB(ctx, mdConfig.URL, mdConfig.Database, options...)
default:
return nil, fmt.Errorf("unknown datastore type %q", cfg.Datastore.Type)
}
}
func (s *server) createFilestore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (filestore.Store, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
switch cfg.Filestore.Type {
case model.FileStoreGCS:
gcsCfg := cfg.Filestore.GCSConfig
options := []gcs.Option{
gcs.WithLogger(logger),
}
if gcsCfg.CredentialsFile != "" {
options = append(options, gcs.WithCredentialsFile(gcsCfg.CredentialsFile))
}
return gcs.NewStore(ctx, gcsCfg.Bucket, options...)
case model.FileStoreS3:
return nil, errors.New("s3 is unimplemented yet")
case model.FileStoreMINIO:
minioCfg := cfg.Filestore.MinioConfig
options := []minio.Option{
minio.WithLogger(logger),
}
s, err := minio.NewStore(minioCfg.Endpoint, minioCfg.Bucket, minioCfg.AccessKeyFile, minioCfg.SecretKeyFile, options...)
if err != nil {
return nil, fmt.Errorf("failed to generate minio store: %w", err)
}
if minioCfg.AutoCreateBucket {
if err := s.EnsureBucket(ctx); err != nil {
return nil, fmt.Errorf("failed to ensure bucket: %w", err)
}
}
return s, nil
default:
return nil, fmt.Errorf("unknown filestore type %q", cfg.Filestore.Type)
}
}
| 1 | 11,088 | We have several groups of caches so I think we should not use this shared configuration value for all of them. Instead of that, each cache group should have a separate configuration field, and the added `validation cache` should have its own field too. But this point, they can be fixed by `24h`. | pipe-cd-pipe | go |
@@ -26,12 +26,12 @@ class Replicate implements AdapterInterface, MetadataSupporter
/**
* @var AdapterInterface
*/
- protected $master;
+ protected $primary;
/**
* @var AdapterInterface
*/
- protected $slave;
+ protected $secondary;
/**
* @var LoggerInterface | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Filesystem;
use Gaufrette\Adapter as AdapterInterface;
use Gaufrette\Adapter\MetadataSupporter;
use Gaufrette\Filesystem;
use Psr\Log\LoggerInterface;
/**
* @final since sonata-project/media-bundle 3.21.0
*/
class Replicate implements AdapterInterface, MetadataSupporter
{
/**
* @var AdapterInterface
*/
protected $master;
/**
* @var AdapterInterface
*/
protected $slave;
/**
* @var LoggerInterface
*/
protected $logger;
/**
* @param LoggerInterface $logger
*/
public function __construct(AdapterInterface $master, AdapterInterface $slave, ?LoggerInterface $logger = null)
{
$this->master = $master;
$this->slave = $slave;
$this->logger = $logger;
}
public function delete($key)
{
$ok = true;
try {
$this->slave->delete($key);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to delete %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
try {
$this->master->delete($key);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to delete %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
return $ok;
}
public function mtime($key)
{
return $this->master->mtime($key);
}
public function keys()
{
return $this->master->keys();
}
public function exists($key)
{
return $this->master->exists($key);
}
public function write($key, $content, ?array $metadata = null)
{
$ok = true;
$return = false;
try {
$return = $this->master->write($key, $content, $metadata);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to write %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
try {
$return = $this->slave->write($key, $content, $metadata);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to write %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
return $ok && $return;
}
public function read($key)
{
return $this->master->read($key);
}
public function rename($key, $new)
{
$ok = true;
try {
$this->master->rename($key, $new);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to rename %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
try {
$this->slave->rename($key, $new);
} catch (\Exception $e) {
if ($this->logger) {
$this->logger->critical(sprintf('Unable to rename %s, error: %s', $key, $e->getMessage()));
}
$ok = false;
}
return $ok;
}
/**
* If one of the adapters can allow inserting metadata.
*
* @return bool true if supports metadata, false if not
*/
public function supportsMetadata()
{
return $this->master instanceof MetadataSupporter || $this->slave instanceof MetadataSupporter;
}
public function setMetadata($key, $metadata)
{
if ($this->master instanceof MetadataSupporter) {
$this->master->setMetadata($key, $metadata);
}
if ($this->slave instanceof MetadataSupporter) {
$this->slave->setMetadata($key, $metadata);
}
}
public function getMetadata($key)
{
if ($this->master instanceof MetadataSupporter) {
return $this->master->getMetadata($key);
} elseif ($this->slave instanceof MetadataSupporter) {
return $this->slave->getMetadata($key);
}
return [];
}
/**
* Gets the class names as an array for both adapters.
*
* @return string[]
*/
public function getAdapterClassNames()
{
return [
\get_class($this->master),
\get_class($this->slave),
];
}
public function createFile($key, Filesystem $filesystem)
{
return $this->master->createFile($key, $filesystem);
}
public function createFileStream($key, Filesystem $filesystem)
{
return $this->master->createFileStream($key, $filesystem);
}
public function listDirectory($directory = '')
{
return $this->master->listDirectory($directory);
}
public function isDirectory($key)
{
return $this->master->isDirectory($key);
}
}
| 1 | 11,361 | This is a BC break for extending classes, it can't be done on 3.x | sonata-project-SonataMediaBundle | php |
@@ -950,10 +950,13 @@ class Key(object):
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
- if self.etag != '"%s"' % md5:
- raise provider.storage_data_error(
- 'ETag from S3 did not match computed MD5. '
- '%s vs. %s' % (self.etag, self.md5))
+ # If you use customer-provided encryption keys, the ETag value that Amazon S3 returns in the response will not be the MD5 of the object.
+ server_side_encryption_customer_algorithm = response.getheader('x-amz-server-side-encryption-customer-algorithm', None)
+ if server_side_encryption_customer_algorithm is None:
+ if self.etag != '"%s"' % md5:
+ raise provider.storage_data_error(
+ 'ETag from S3 did not match computed MD5. '
+ '%s vs. %s' % (self.etag, self.md5))
return True
| 1 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import with_statement
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
import boto.utils
from boto.compat import BytesIO, six, urllib
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'content-language':
self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
elif name.lower() == 'content-disposition':
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
headers['_sha256'] = compute_hash(fp, hash_algorithm=hashlib.sha256)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| 1 | 10,222 | Line length (PEP8, should be 80 chars max) | boto-boto | py |
@@ -42,6 +42,8 @@ public abstract class MessageBuilder {
private final Context context;
private final MessageIdGenerator messageIdGenerator;
private final BoundaryGenerator boundaryGenerator;
+ private static final String highPriorityTag = "X-Priority";
+ private static final String xPriorityContent ="priority_high";
private String subject; | 1 | package com.fsck.k9.message;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import android.app.Activity;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.os.AsyncTask;
import timber.log.Timber;
import com.fsck.k9.Account.QuoteStyle;
import com.fsck.k9.Identity;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.activity.MessageReference;
import com.fsck.k9.activity.misc.Attachment;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.Body;
import com.fsck.k9.mail.BoundaryGenerator;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.internet.MessageIdGenerator;
import com.fsck.k9.mail.internet.MimeBodyPart;
import com.fsck.k9.mail.internet.MimeHeader;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeMessageHelper;
import com.fsck.k9.mail.internet.MimeMultipart;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mail.internet.TextBody;
import com.fsck.k9.mailstore.TempFileBody;
import com.fsck.k9.message.quote.InsertableHtmlContent;
import org.apache.james.mime4j.codec.EncoderUtil;
import org.apache.james.mime4j.util.MimeUtil;
public abstract class MessageBuilder {
private final Context context;
private final MessageIdGenerator messageIdGenerator;
private final BoundaryGenerator boundaryGenerator;
private String subject;
private Date sentDate;
private boolean hideTimeZone;
private Address[] to;
private Address[] cc;
private Address[] bcc;
private String inReplyTo;
private String references;
private boolean requestReadReceipt;
private Identity identity;
private SimpleMessageFormat messageFormat;
private String text;
private List<Attachment> attachments;
private String signature;
private QuoteStyle quoteStyle;
private QuotedTextMode quotedTextMode;
private String quotedText;
private InsertableHtmlContent quotedHtmlContent;
private boolean isReplyAfterQuote;
private boolean isSignatureBeforeQuotedText;
private boolean identityChanged;
private boolean signatureChanged;
private int cursorPosition;
private MessageReference messageReference;
private boolean isDraft;
private boolean isPgpInlineEnabled;
protected MessageBuilder(Context context, MessageIdGenerator messageIdGenerator, BoundaryGenerator boundaryGenerator) {
this.context = context;
this.messageIdGenerator = messageIdGenerator;
this.boundaryGenerator = boundaryGenerator;
}
/**
* Build the message to be sent (or saved). If there is another message quoted in this one, it will be baked
* into the message here.
*/
protected MimeMessage build() throws MessagingException {
//FIXME: check arguments
MimeMessage message = new MimeMessage();
buildHeader(message);
buildBody(message);
return message;
}
private void buildHeader(MimeMessage message) throws MessagingException {
message.addSentDate(sentDate, hideTimeZone);
Address from = new Address(identity.getEmail(), identity.getName());
message.setFrom(from);
message.setRecipients(RecipientType.TO, to);
message.setRecipients(RecipientType.CC, cc);
message.setRecipients(RecipientType.BCC, bcc);
message.setSubject(subject);
if (requestReadReceipt) {
message.setHeader("Disposition-Notification-To", from.toEncodedString());
message.setHeader("X-Confirm-Reading-To", from.toEncodedString());
message.setHeader("Return-Receipt-To", from.toEncodedString());
}
if (!K9.hideUserAgent()) {
message.setHeader("User-Agent", context.getString(R.string.message_header_mua));
}
final String replyTo = identity.getReplyTo();
if (replyTo != null) {
message.setReplyTo(new Address[] { new Address(replyTo) });
}
if (inReplyTo != null) {
message.setInReplyTo(inReplyTo);
}
if (references != null) {
message.setReferences(references);
}
String messageId = messageIdGenerator.generateMessageId(message);
message.setMessageId(messageId);
if (isDraft && isPgpInlineEnabled) {
message.setFlag(Flag.X_DRAFT_OPENPGP_INLINE, true);
}
}
protected MimeMultipart createMimeMultipart() {
String boundary = boundaryGenerator.generateBoundary();
return new MimeMultipart(boundary);
}
private void buildBody(MimeMessage message) throws MessagingException {
// Build the body.
// TODO FIXME - body can be either an HTML or Text part, depending on whether we're in
// HTML mode or not. Should probably fix this so we don't mix up html and text parts.
TextBody body = buildText(isDraft);
// text/plain part when messageFormat == MessageFormat.HTML
TextBody bodyPlain = null;
final boolean hasAttachments = !attachments.isEmpty();
if (messageFormat == SimpleMessageFormat.HTML) {
// HTML message (with alternative text part)
// This is the compiled MIME part for an HTML message.
MimeMultipart composedMimeMessage = createMimeMultipart();
composedMimeMessage.setSubType("alternative");
// Let the receiver select either the text or the HTML part.
bodyPlain = buildText(isDraft, SimpleMessageFormat.TEXT);
composedMimeMessage.addBodyPart(new MimeBodyPart(bodyPlain, "text/plain"));
composedMimeMessage.addBodyPart(new MimeBodyPart(body, "text/html"));
if (hasAttachments) {
// If we're HTML and have attachments, we have a MimeMultipart container to hold the
// whole message (mp here), of which one part is a MimeMultipart container
// (composedMimeMessage) with the user's composed messages, and subsequent parts for
// the attachments.
MimeMultipart mp = createMimeMultipart();
mp.addBodyPart(new MimeBodyPart(composedMimeMessage));
addAttachmentsToMessage(mp);
MimeMessageHelper.setBody(message, mp);
} else {
// If no attachments, our multipart/alternative part is the only one we need.
MimeMessageHelper.setBody(message, composedMimeMessage);
}
} else if (messageFormat == SimpleMessageFormat.TEXT) {
// Text-only message.
if (hasAttachments) {
MimeMultipart mp = createMimeMultipart();
mp.addBodyPart(new MimeBodyPart(body, "text/plain"));
addAttachmentsToMessage(mp);
MimeMessageHelper.setBody(message, mp);
} else {
// No attachments to include, just stick the text body in the message and call it good.
MimeMessageHelper.setBody(message, body);
}
}
// If this is a draft, add metadata for thawing.
if (isDraft) {
// Add the identity to the message.
message.addHeader(K9.IDENTITY_HEADER, buildIdentityHeader(body, bodyPlain));
}
}
private String buildIdentityHeader(TextBody body, TextBody bodyPlain) {
return new IdentityHeaderBuilder()
.setCursorPosition(cursorPosition)
.setIdentity(identity)
.setIdentityChanged(identityChanged)
.setMessageFormat(messageFormat)
.setMessageReference(messageReference)
.setQuotedHtmlContent(quotedHtmlContent)
.setQuoteStyle(quoteStyle)
.setQuoteTextMode(quotedTextMode)
.setSignature(signature)
.setSignatureChanged(signatureChanged)
.setBody(body)
.setBodyPlain(bodyPlain)
.build();
}
/**
* Add attachments as parts into a MimeMultipart container.
* @param mp MimeMultipart container in which to insert parts.
* @throws MessagingException
*/
private void addAttachmentsToMessage(final MimeMultipart mp) throws MessagingException {
for (Attachment attachment : attachments) {
if (attachment.state != Attachment.LoadingState.COMPLETE) {
continue;
}
String contentType = attachment.contentType;
if (MimeUtil.isMessage(contentType)) {
contentType = "application/octet-stream";
// TODO reencode message body to 7 bit
// body = new TempFileMessageBody(attachment.filename);
}
Body body = new TempFileBody(attachment.filename);
MimeBodyPart bp = new MimeBodyPart(body);
/*
* Correctly encode the filename here. Otherwise the whole
* header value (all parameters at once) will be encoded by
* MimeHeader.writeTo().
*/
bp.addHeader(MimeHeader.HEADER_CONTENT_TYPE, String.format("%s;\r\n name=\"%s\"",
contentType,
EncoderUtil.encodeIfNecessary(attachment.name,
EncoderUtil.Usage.WORD_ENTITY, 7)));
bp.setEncoding(MimeUtility.getEncodingforType(contentType));
/*
* TODO: Oh the joys of MIME...
*
* From RFC 2183 (The Content-Disposition Header Field):
* "Parameter values longer than 78 characters, or which
* contain non-ASCII characters, MUST be encoded as specified
* in [RFC 2184]."
*
* Example:
*
* Content-Type: application/x-stuff
* title*1*=us-ascii'en'This%20is%20even%20more%20
* title*2*=%2A%2A%2Afun%2A%2A%2A%20
* title*3="isn't it!"
*/
bp.addHeader(MimeHeader.HEADER_CONTENT_DISPOSITION, String.format(Locale.US,
"attachment;\r\n filename=\"%s\";\r\n size=%d",
attachment.name, attachment.size));
mp.addBodyPart(bp);
}
}
/**
* Build the Body that will contain the text of the message. We'll decide where to
* include it later. Draft messages are treated somewhat differently in that signatures are not
* appended and HTML separators between composed text and quoted text are not added.
* @param isDraft If we should build a message that will be saved as a draft (as opposed to sent).
*/
private TextBody buildText(boolean isDraft) {
return buildText(isDraft, messageFormat);
}
/**
* Build the {@link Body} that will contain the text of the message.
*
* <p>
* Draft messages are treated somewhat differently in that signatures are not appended and HTML
* separators between composed text and quoted text are not added.
* </p>
*
* @param isDraft
* If {@code true} we build a message that will be saved as a draft (as opposed to
* sent).
* @param simpleMessageFormat
* Specifies what type of message to build ({@code text/plain} vs. {@code text/html}).
*
* @return {@link TextBody} instance that contains the entered text and possibly the quoted
* original message.
*/
private TextBody buildText(boolean isDraft, SimpleMessageFormat simpleMessageFormat) {
String messageText = text;
TextBodyBuilder textBodyBuilder = new TextBodyBuilder(messageText);
/*
* Find out if we need to include the original message as quoted text.
*
* We include the quoted text in the body if the user didn't choose to
* hide it. We always include the quoted text when we're saving a draft.
* That's so the user is able to "un-hide" the quoted text if (s)he
* opens a saved draft.
*/
boolean includeQuotedText = (isDraft || quotedTextMode == QuotedTextMode.SHOW);
boolean isReplyAfterQuote = (quoteStyle == QuoteStyle.PREFIX && this.isReplyAfterQuote);
textBodyBuilder.setIncludeQuotedText(false);
if (includeQuotedText) {
if (simpleMessageFormat == SimpleMessageFormat.HTML && quotedHtmlContent != null) {
textBodyBuilder.setIncludeQuotedText(true);
textBodyBuilder.setQuotedTextHtml(quotedHtmlContent);
textBodyBuilder.setReplyAfterQuote(isReplyAfterQuote);
}
if (simpleMessageFormat == SimpleMessageFormat.TEXT && quotedText.length() > 0) {
textBodyBuilder.setIncludeQuotedText(true);
textBodyBuilder.setQuotedText(quotedText);
textBodyBuilder.setReplyAfterQuote(isReplyAfterQuote);
}
}
textBodyBuilder.setInsertSeparator(!isDraft);
boolean useSignature = (!isDraft && identity.getSignatureUse());
if (useSignature) {
textBodyBuilder.setAppendSignature(true);
textBodyBuilder.setSignature(signature);
textBodyBuilder.setSignatureBeforeQuotedText(isSignatureBeforeQuotedText);
} else {
textBodyBuilder.setAppendSignature(false);
}
TextBody body;
if (simpleMessageFormat == SimpleMessageFormat.HTML) {
body = textBodyBuilder.buildTextHtml();
} else {
body = textBodyBuilder.buildTextPlain();
}
return body;
}
public MessageBuilder setSubject(String subject) {
this.subject = subject;
return this;
}
public MessageBuilder setSentDate(Date sentDate) {
this.sentDate = sentDate;
return this;
}
public MessageBuilder setHideTimeZone(boolean hideTimeZone) {
this.hideTimeZone = hideTimeZone;
return this;
}
public MessageBuilder setTo(List<Address> to) {
this.to = to.toArray(new Address[to.size()]);
return this;
}
public MessageBuilder setCc(List<Address> cc) {
this.cc = cc.toArray(new Address[cc.size()]);
return this;
}
public MessageBuilder setBcc(List<Address> bcc) {
this.bcc = bcc.toArray(new Address[bcc.size()]);
return this;
}
public MessageBuilder setInReplyTo(String inReplyTo) {
this.inReplyTo = inReplyTo;
return this;
}
public MessageBuilder setReferences(String references) {
this.references = references;
return this;
}
public MessageBuilder setRequestReadReceipt(boolean requestReadReceipt) {
this.requestReadReceipt = requestReadReceipt;
return this;
}
public MessageBuilder setIdentity(Identity identity) {
this.identity = identity;
return this;
}
public MessageBuilder setMessageFormat(SimpleMessageFormat messageFormat) {
this.messageFormat = messageFormat;
return this;
}
public MessageBuilder setText(String text) {
this.text = text;
return this;
}
public MessageBuilder setAttachments(List<Attachment> attachments) {
this.attachments = attachments;
return this;
}
public MessageBuilder setSignature(String signature) {
this.signature = signature;
return this;
}
public MessageBuilder setQuoteStyle(QuoteStyle quoteStyle) {
this.quoteStyle = quoteStyle;
return this;
}
public MessageBuilder setQuotedTextMode(QuotedTextMode quotedTextMode) {
this.quotedTextMode = quotedTextMode;
return this;
}
public MessageBuilder setQuotedText(String quotedText) {
this.quotedText = quotedText;
return this;
}
public MessageBuilder setQuotedHtmlContent(InsertableHtmlContent quotedHtmlContent) {
this.quotedHtmlContent = quotedHtmlContent;
return this;
}
public MessageBuilder setReplyAfterQuote(boolean isReplyAfterQuote) {
this.isReplyAfterQuote = isReplyAfterQuote;
return this;
}
public MessageBuilder setSignatureBeforeQuotedText(boolean isSignatureBeforeQuotedText) {
this.isSignatureBeforeQuotedText = isSignatureBeforeQuotedText;
return this;
}
public MessageBuilder setIdentityChanged(boolean identityChanged) {
this.identityChanged = identityChanged;
return this;
}
public MessageBuilder setSignatureChanged(boolean signatureChanged) {
this.signatureChanged = signatureChanged;
return this;
}
public MessageBuilder setCursorPosition(int cursorPosition) {
this.cursorPosition = cursorPosition;
return this;
}
public MessageBuilder setMessageReference(MessageReference messageReference) {
this.messageReference = messageReference;
return this;
}
public MessageBuilder setDraft(boolean isDraft) {
this.isDraft = isDraft;
return this;
}
public MessageBuilder setIsPgpInlineEnabled(boolean isPgpInlineEnabled) {
this.isPgpInlineEnabled = isPgpInlineEnabled;
return this;
}
public boolean isDraft() {
return isDraft;
}
private Callback asyncCallback;
private final Object callbackLock = new Object();
// Postponed results, to be delivered upon reattachment of callback. There should only ever be one of these!
private MimeMessage queuedMimeMessage;
private MessagingException queuedException;
private PendingIntent queuedPendingIntent;
private int queuedRequestCode;
/** This method builds the message asynchronously, calling *exactly one* of the methods
* on the callback on the UI thread after it finishes. The callback may thread-safely
* be detached and reattached intermittently. */
final public void buildAsync(Callback callback) {
synchronized (callbackLock) {
asyncCallback = callback;
queuedMimeMessage = null;
queuedException = null;
queuedPendingIntent = null;
}
new AsyncTask<Void,Void,Void>() {
@Override
protected Void doInBackground(Void... params) {
buildMessageInternal();
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
deliverResult();
}
}.execute();
}
final public void onActivityResult(final int requestCode, int resultCode, final Intent data, Callback callback) {
synchronized (callbackLock) {
asyncCallback = callback;
queuedMimeMessage = null;
queuedException = null;
queuedPendingIntent = null;
}
if (resultCode != Activity.RESULT_OK) {
asyncCallback.onMessageBuildCancel();
return;
}
new AsyncTask<Void,Void,Void>() {
@Override
protected Void doInBackground(Void... params) {
buildMessageOnActivityResult(requestCode, data);
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
deliverResult();
}
}.execute();
}
/** This method is called in a worker thread, and should build the actual message. To deliver
* its computation result, it must call *exactly one* of the queueMessageBuild* methods before
* it finishes. */
abstract protected void buildMessageInternal();
abstract protected void buildMessageOnActivityResult(int requestCode, Intent data);
/** This method may be used to temporarily detach the callback. If a result is delivered
* while the callback is detached, it will be delivered upon reattachment. */
final public void detachCallback() {
synchronized (callbackLock) {
asyncCallback = null;
}
}
/** This method attaches a new callback, and must only be called after a previous one was
* detached. If the computation finished while the callback was detached, it will be
* delivered immediately upon reattachment. */
final public void reattachCallback(Callback callback) {
synchronized (callbackLock) {
if (asyncCallback != null) {
throw new IllegalStateException("need to detach callback before new one can be attached!");
}
asyncCallback = callback;
deliverResult();
}
}
final protected void queueMessageBuildSuccess(MimeMessage message) {
synchronized (callbackLock) {
queuedMimeMessage = message;
}
}
final protected void queueMessageBuildException(MessagingException exception) {
synchronized (callbackLock) {
queuedException = exception;
}
}
final protected void queueMessageBuildPendingIntent(PendingIntent pendingIntent, int requestCode) {
synchronized (callbackLock) {
queuedPendingIntent = pendingIntent;
queuedRequestCode = requestCode;
}
}
final protected void deliverResult() {
synchronized (callbackLock) {
if (asyncCallback == null) {
Timber.d("Keeping message builder result in queue for later delivery");
return;
}
if (queuedMimeMessage != null) {
asyncCallback.onMessageBuildSuccess(queuedMimeMessage, isDraft);
queuedMimeMessage = null;
} else if (queuedException != null) {
asyncCallback.onMessageBuildException(queuedException);
queuedException = null;
} else if (queuedPendingIntent != null) {
asyncCallback.onMessageBuildReturnPendingIntent(queuedPendingIntent, queuedRequestCode);
queuedPendingIntent = null;
}
asyncCallback = null;
}
}
public interface Callback {
void onMessageBuildSuccess(MimeMessage message, boolean isDraft);
void onMessageBuildCancel();
void onMessageBuildException(MessagingException exception);
void onMessageBuildReturnPendingIntent(PendingIntent pendingIntent, int requestCode);
}
}
| 1 | 15,540 | this is duplicate now. I suggest moving it into `MimeHeader`, we already have a bunch of other headers there | k9mail-k-9 | java |
@@ -551,11 +551,15 @@ func checkPassword(password string) (string, error) {
if !utf8.ValidString(password) {
return "", errors.New("password contains invalid utf8 characters")
}
- // Remove leading+trailing whitespace
- password = strings.TrimSpace(password)
+ // Check for leading/trailing whitespace
+ trimmedPassword := strings.TrimSpace(password)
+ // Warn user if password has leading+trailing whitespace
+ if len(password) != len(trimmedPassword) {
+ fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
+ }
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
- if len(password) == 0 {
+ if len(password) == 0 || len(trimmedPassword) == 0 {
return "", errors.New("no characters in password")
}
return password, nil | 1 | // Read, write and edit the config file
package fs
import (
"bufio"
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/user"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/Unknwon/goconfig"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/text/unicode/norm"
)
const (
configFileName = "rclone.conf"
hiddenConfigFileName = "." + configFileName
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAuthURL is the config key used to store the auth server endpoint
ConfigAuthURL = "auth_url"
// ConfigTokenURL is the config key used to store the token server endpoint
ConfigTokenURL = "token_url"
// ConfigAutomatic indicates that we want non-interactive configuration
ConfigAutomatic = "config_automatic"
)
// Global
var (
// configData is the config file data structure
configData *goconfig.ConfigFile
// ConfigPath points to the config file
ConfigPath = makeConfigPath()
// Config is the global config
Config = &ConfigInfo{}
// Flags
verbose = CountP("verbose", "v", "Print lots more stuff (repeat for more)")
quiet = BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = StringP("config", "", ConfigPath, "Config file.")
checkSum = BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
ignoreTimes = BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
ignoreExisting = BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
dryRun = BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
dumpAuth = BoolP("dump-auth", "", false, "Dump HTTP headers with auth info")
skipVerify = BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
AskPassword = BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
deleteBefore = BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
deleteDuring = BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
deleteAfter = BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
trackRenames = BoolP("track-renames", "", false, "When synchronizing, track file renames and do a server side move if possible")
lowLevelRetries = IntP("low-level-retries", "", 10, "Number of low level retries to do.")
updateOlder = BoolP("update", "u", false, "Skip files that are newer on the destination.")
noGzip = BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
maxDepth = IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
ignoreSize = BoolP("ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
ignoreChecksum = BoolP("ignore-checksum", "", false, "Skip post copy check of checksums.")
noTraverse = BoolP("no-traverse", "", false, "Don't traverse destination file system on copy.")
noUpdateModTime = BoolP("no-update-modtime", "", false, "Don't update destination mod-time if files identical.")
backupDir = StringP("backup-dir", "", "", "Make backups into hierarchy based in DIR.")
suffix = StringP("suffix", "", "", "Suffix for use with --backup-dir.")
useListR = BoolP("fast-list", "", false, "Use recursive list if available. Uses more memory but fewer transactions.")
tpsLimit = Float64P("tpslimit", "", 0, "Limit HTTP transactions per second to this.")
tpsLimitBurst = IntP("tpslimit-burst", "", 1, "Max burst of transactions for --tpslimit.")
bindAddr = StringP("bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
disableFeatures = StringP("disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
userAgent = StringP("user-agent", "", "rclone/"+Version, "Set the user-agent to a specified string. The default is rclone/ version")
immutable = BoolP("immutable", "", false, "Do not modify files. Fail if existing files have been modified.")
streamingUploadCutoff = SizeSuffix(100 * 1024)
logLevel = LogLevelNotice
statsLogLevel = LogLevelInfo
bwLimit BwTimetable
bufferSize SizeSuffix = 16 << 20
// Key to use for password en/decryption.
// When nil, no encryption will be used for saving.
configKey []byte
)
func init() {
VarP(&logLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
VarP(&statsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
VarP(&bufferSize, "buffer-size", "", "Buffer size when copying files.")
VarP(&streamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
}
// crypt internals
var (
cryptKey = []byte{
0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
}
cryptBlock cipher.Block
cryptRand = rand.Reader
)
// crypt transforms in to out using iv under AES-CTR.
//
// in and out may be the same buffer.
//
// Note encryption and decryption are the same operation
func crypt(out, in, iv []byte) error {
if cryptBlock == nil {
var err error
cryptBlock, err = aes.NewCipher(cryptKey)
if err != nil {
return err
}
}
stream := cipher.NewCTR(cryptBlock, iv)
stream.XORKeyStream(out, in)
return nil
}
// Obscure a value
//
// This is done by encrypting with AES-CTR
func Obscure(x string) (string, error) {
plaintext := []byte(x)
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(cryptRand, iv); err != nil {
return "", errors.Wrap(err, "failed to read iv")
}
if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
return "", errors.Wrap(err, "encrypt failed")
}
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
}
// MustObscure obscures a value, exiting with a fatal error if it failed
func MustObscure(x string) string {
out, err := Obscure(x)
if err != nil {
log.Fatalf("Obscure failed: %v", err)
}
return out
}
// Reveal an obscured value
func Reveal(x string) (string, error) {
ciphertext, err := base64.RawURLEncoding.DecodeString(x)
if err != nil {
return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
}
if len(ciphertext) < aes.BlockSize {
return "", errors.New("input too short when revealing password - is it obscured?")
}
buf := ciphertext[aes.BlockSize:]
iv := ciphertext[:aes.BlockSize]
if err := crypt(buf, buf, iv); err != nil {
return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
}
return string(buf), nil
}
// MustReveal reveals an obscured value, exiting with a fatal error if it failed
func MustReveal(x string) string {
out, err := Reveal(x)
if err != nil {
log.Fatalf("Reveal failed: %v", err)
}
return out
}
// ConfigInfo is filesystem config options
type ConfigInfo struct {
LogLevel LogLevel
StatsLogLevel LogLevel
DryRun bool
CheckSum bool
SizeOnly bool
IgnoreTimes bool
IgnoreExisting bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
DumpHeaders bool
DumpBodies bool
DumpAuth bool
Filter *Filter
InsecureSkipVerify bool // Skip server certificate verification
DeleteMode DeleteMode
TrackRenames bool // Track file renames.
LowLevelRetries int
UpdateOlder bool // Skip files that are newer on the destination
NoGzip bool // Disable compression
MaxDepth int
IgnoreSize bool
IgnoreChecksum bool
NoTraverse bool
NoUpdateModTime bool
DataRateUnit string
BackupDir string
Suffix string
UseListR bool
BufferSize SizeSuffix
TPSLimit float64
TPSLimitBurst int
BindAddr net.IP
DisableFeatures []string
Immutable bool
StreamingUploadCutoff SizeSuffix
}
// Return the path to the configuration file
func makeConfigPath() string {
// Find user's home directory
usr, err := user.Current()
var homedir string
if err == nil {
homedir = usr.HomeDir
} else {
// Fall back to reading $HOME - work around user.Current() not
// working for cross compiled binaries on OSX.
// https://github.com/golang/go/issues/6376
homedir = os.Getenv("HOME")
}
// Possibly find the user's XDG config paths
// See XDG Base Directory specification
// https://specifications.freedesktop.org/basedir-spec/latest/
xdgdir := os.Getenv("XDG_CONFIG_HOME")
var xdgcfgdir string
if xdgdir != "" {
xdgcfgdir = filepath.Join(xdgdir, "rclone")
} else if homedir != "" {
xdgdir = filepath.Join(homedir, ".config")
xdgcfgdir = filepath.Join(xdgdir, "rclone")
}
// Use $XDG_CONFIG_HOME/rclone/rclone.conf if already existing
var xdgconf string
if xdgcfgdir != "" {
xdgconf = filepath.Join(xdgcfgdir, configFileName)
_, err := os.Stat(xdgconf)
if err == nil {
return xdgconf
}
}
// Use $HOME/.rclone.conf if already existing
var homeconf string
if homedir != "" {
homeconf = filepath.Join(homedir, hiddenConfigFileName)
_, err := os.Stat(homeconf)
if err == nil {
return homeconf
}
}
// Try to create $XDG_CONFIG_HOME/rclone/rclone.conf
if xdgconf != "" {
// xdgconf != "" implies xdgcfgdir != ""
err := os.MkdirAll(xdgcfgdir, os.ModePerm)
if err == nil {
return xdgconf
}
}
// Try to create $HOME/.rclone.conf
if homeconf != "" {
return homeconf
}
// Default to ./.rclone.conf (current working directory)
Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
Errorf(nil, "Defaulting to storing config in current directory.")
Errorf(nil, "Use -config flag to workaround.")
Errorf(nil, "Error was: %v", err)
return hiddenConfigFileName
}
// DeleteMode describes the possible delete modes in the config
type DeleteMode byte
// DeleteMode constants
const (
DeleteModeOff DeleteMode = iota
DeleteModeBefore
DeleteModeDuring
DeleteModeAfter
DeleteModeOnly
DeleteModeDefault = DeleteModeAfter
)
// LoadConfig loads the config file
func LoadConfig() {
// Read some flags if set
//
// FIXME read these from the config file too
Config.LogLevel = LogLevelNotice
if *verbose >= 2 {
Config.LogLevel = LogLevelDebug
} else if *verbose >= 1 {
Config.LogLevel = LogLevelInfo
}
if *quiet {
if *verbose > 0 {
log.Fatalf("Can't set -v and -q")
}
Config.LogLevel = LogLevelError
}
logLevelFlag := pflag.Lookup("log-level")
if logLevelFlag != nil && logLevelFlag.Changed {
if *verbose > 0 {
log.Fatalf("Can't set -v and --log-level")
}
if *quiet {
log.Fatalf("Can't set -q and --log-level")
}
Config.LogLevel = logLevel
}
Config.StatsLogLevel = statsLogLevel
Config.ModifyWindow = *modifyWindow
Config.Checkers = *checkers
Config.Transfers = *transfers
Config.DryRun = *dryRun
Config.Timeout = *timeout
Config.ConnectTimeout = *connectTimeout
Config.CheckSum = *checkSum
Config.SizeOnly = *sizeOnly
Config.IgnoreTimes = *ignoreTimes
Config.IgnoreExisting = *ignoreExisting
Config.DumpHeaders = *dumpHeaders
Config.DumpBodies = *dumpBodies
Config.DumpAuth = *dumpAuth
Config.InsecureSkipVerify = *skipVerify
Config.LowLevelRetries = *lowLevelRetries
Config.UpdateOlder = *updateOlder
Config.NoGzip = *noGzip
Config.MaxDepth = *maxDepth
Config.IgnoreSize = *ignoreSize
Config.IgnoreChecksum = *ignoreChecksum
Config.NoTraverse = *noTraverse
Config.NoUpdateModTime = *noUpdateModTime
Config.BackupDir = *backupDir
Config.Suffix = *suffix
Config.UseListR = *useListR
Config.TPSLimit = *tpsLimit
Config.TPSLimitBurst = *tpsLimitBurst
Config.Immutable = *immutable
Config.BufferSize = bufferSize
Config.StreamingUploadCutoff = streamingUploadCutoff
Config.TrackRenames = *trackRenames
switch {
case *deleteBefore && (*deleteDuring || *deleteAfter),
*deleteDuring && *deleteAfter:
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
case *deleteBefore:
Config.DeleteMode = DeleteModeBefore
case *deleteDuring:
Config.DeleteMode = DeleteModeDuring
case *deleteAfter:
Config.DeleteMode = DeleteModeAfter
default:
Config.DeleteMode = DeleteModeDefault
}
if Config.IgnoreSize && Config.SizeOnly {
log.Fatalf(`Can't use --size-only and --ignore-size together.`)
}
if Config.Suffix != "" && Config.BackupDir == "" {
log.Fatalf(`Can only use --suffix with --backup-dir.`)
}
if *bindAddr != "" {
addrs, err := net.LookupIP(*bindAddr)
if err != nil {
log.Fatalf("--bind: Failed to parse %q as IP address: %v", *bindAddr, err)
}
if len(addrs) != 1 {
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", *bindAddr, len(addrs))
}
Config.BindAddr = addrs[0]
}
if *disableFeatures != "" {
if *disableFeatures == "help" {
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(Features).List(), ", "))
}
Config.DisableFeatures = strings.Split(*disableFeatures, ",")
}
// Load configuration file.
var err error
ConfigPath, err = filepath.Abs(*configFile)
if err != nil {
ConfigPath = *configFile
}
configData, err = loadConfigFile()
if err == errorConfigFileNotFound {
Logf(nil, "Config file %q not found - using defaults", ConfigPath)
configData, _ = goconfig.LoadFromReader(&bytes.Buffer{})
} else if err != nil {
log.Fatalf("Failed to load config file %q: %v", ConfigPath, err)
} else {
Debugf(nil, "Using config file from %q", ConfigPath)
}
// Load filters
Config.Filter, err = NewFilter()
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}
// Start the token bucket limiter
startTokenBucket()
// Start the bandwidth update ticker
startTokenTicker()
// Start the transactions per second limiter
startHTTPTokenBucket()
}
var errorConfigFileNotFound = errors.New("config file not found")
// loadConfigFile will load a config file, and
// automatically decrypt it.
func loadConfigFile() (*goconfig.ConfigFile, error) {
b, err := ioutil.ReadFile(ConfigPath)
if err != nil {
if os.IsNotExist(err) {
return nil, errorConfigFileNotFound
}
return nil, err
}
// Find first non-empty line
r := bufio.NewReader(bytes.NewBuffer(b))
for {
line, _, err := r.ReadLine()
if err != nil {
if err == io.EOF {
return goconfig.LoadFromReader(bytes.NewBuffer(b))
}
return nil, err
}
l := strings.TrimSpace(string(line))
if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
continue
}
// First non-empty or non-comment must be ENCRYPT_V0
if l == "RCLONE_ENCRYPT_V0:" {
break
}
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
return nil, errors.New("unsupported configuration encryption - update rclone for support")
}
return goconfig.LoadFromReader(bytes.NewBuffer(b))
}
// Encrypted content is base64 encoded.
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := ioutil.ReadAll(dec)
if err != nil {
return nil, errors.Wrap(err, "failed to load base64 encoded data")
}
if len(box) < 24+secretbox.Overhead {
return nil, errors.New("Configuration data too short")
}
envpw := os.Getenv("RCLONE_CONFIG_PASS")
var out []byte
for {
if len(configKey) == 0 && envpw != "" {
err := setConfigPassword(envpw)
if err != nil {
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
} else {
Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
}
}
if len(configKey) == 0 {
if !*AskPassword {
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
}
getConfigPassword("Enter configuration password:")
}
// Nonce is first 24 bytes of the ciphertext
var nonce [24]byte
copy(nonce[:], box[:24])
var key [32]byte
copy(key[:], configKey[:32])
// Attempt to decrypt
var ok bool
out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
if ok {
break
}
// Retry
Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
configKey = nil
envpw = ""
}
return goconfig.LoadFromReader(bytes.NewBuffer(out))
}
// checkPassword normalises and validates the password
func checkPassword(password string) (string, error) {
if !utf8.ValidString(password) {
return "", errors.New("password contains invalid utf8 characters")
}
// Remove leading+trailing whitespace
password = strings.TrimSpace(password)
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
if len(password) == 0 {
return "", errors.New("no characters in password")
}
return password, nil
}
// GetPassword asks the user for a password with the prompt given.
func GetPassword(prompt string) string {
fmt.Fprintln(os.Stderr, prompt)
for {
fmt.Fprint(os.Stderr, "password:")
password := ReadPassword()
password, err := checkPassword(password)
if err == nil {
return password
}
fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
}
}
// ChangePassword will query the user twice for the named password. If
// the same password is entered it is returned.
func ChangePassword(name string) string {
for {
a := GetPassword(fmt.Sprintf("Enter %s password:", name))
b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
if a == b {
return a
}
fmt.Println("Passwords do not match!")
}
}
// getConfigPassword will query the user for a password the
// first time it is required.
func getConfigPassword(q string) {
if len(configKey) != 0 {
return
}
for {
password := GetPassword(q)
err := setConfigPassword(password)
if err == nil {
return
}
fmt.Fprintln(os.Stderr, "Error:", err)
}
}
// setConfigPassword will set the configKey to the hash of
// the password. If the length of the password is
// zero after trimming+normalization, an error is returned.
func setConfigPassword(password string) error {
password, err := checkPassword(password)
if err != nil {
return err
}
// Create SHA256 has of the password
sha := sha256.New()
_, err = sha.Write([]byte("[" + password + "][rclone-config]"))
if err != nil {
return err
}
configKey = sha.Sum(nil)
return nil
}
// changeConfigPassword will query the user twice
// for a password. If the same password is entered
// twice the key is updated.
func changeConfigPassword() {
err := setConfigPassword(ChangePassword("NEW configuration"))
if err != nil {
fmt.Printf("Failed to set config password: %v\n", err)
return
}
}
// SaveConfig saves configuration file.
// if configKey has been set, the file will be encrypted.
func SaveConfig() {
dir, name := filepath.Split(ConfigPath)
f, err := ioutil.TempFile(dir, name)
if err != nil {
log.Fatalf("Failed to create temp file for new config: %v", err)
return
}
defer func() {
if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) {
Errorf(nil, "Failed to remove temp config file: %v", err)
}
}()
var buf bytes.Buffer
err = goconfig.SaveConfigData(configData, &buf)
if err != nil {
log.Fatalf("Failed to save config file: %v", err)
}
if len(configKey) == 0 {
if _, err := buf.WriteTo(f); err != nil {
log.Fatalf("Failed to write temp config file: %v", err)
}
} else {
fmt.Fprintln(f, "# Encrypted rclone configuration File")
fmt.Fprintln(f, "")
fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
// Generate new nonce and write it to the start of the ciphertext
var nonce [24]byte
n, _ := rand.Read(nonce[:])
if n != 24 {
log.Fatalf("nonce short read: %d", n)
}
enc := base64.NewEncoder(base64.StdEncoding, f)
_, err = enc.Write(nonce[:])
if err != nil {
log.Fatalf("Failed to write temp config file: %v", err)
}
var key [32]byte
copy(key[:], configKey[:32])
b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
_, err = enc.Write(b)
if err != nil {
log.Fatalf("Failed to write temp config file: %v", err)
}
_ = enc.Close()
}
err = f.Close()
if err != nil {
log.Fatalf("Failed to close config file: %v", err)
}
var fileMode os.FileMode = 0600
info, err := os.Stat(ConfigPath)
if err != nil {
Debugf(nil, "Using default permissions for config file: %v", fileMode)
} else if info.Mode() != fileMode {
Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode())
fileMode = info.Mode()
}
attemptCopyGroup(ConfigPath, f.Name())
err = os.Chmod(f.Name(), fileMode)
if err != nil {
Errorf(nil, "Failed to set permissions on config file: %v", err)
}
if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) {
log.Fatalf("Failed to move previous config to backup location: %v", err)
}
if err = os.Rename(f.Name(), ConfigPath); err != nil {
log.Fatalf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
}
if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) {
Errorf(nil, "Failed to remove backup config file: %v", err)
}
}
// ConfigSetValueAndSave sets the key to the value and saves just that
// value in the config file. It loads the old config file in from
// disk first and overwrites the given value only.
func ConfigSetValueAndSave(name, key, value string) (err error) {
// Set the value in config in case we fail to reload it
configData.SetValue(name, key, value)
// Reload the config file
reloadedConfigFile, err := loadConfigFile()
if err == errorConfigFileNotFound {
// Config file not written yet so ignore reload
return nil
} else if err != nil {
return err
}
_, err = reloadedConfigFile.GetSection(name)
if err != nil {
// Section doesn't exist yet so ignore reload
return err
}
// Update the config file with the reloaded version
configData = reloadedConfigFile
// Set the value in the reloaded version
reloadedConfigFile.SetValue(name, key, value)
// Save it again
SaveConfig()
return nil
}
// ShowRemotes shows an overview of the config file
func ShowRemotes() {
remotes := configData.GetSectionList()
if len(remotes) == 0 {
return
}
sort.Strings(remotes)
fmt.Printf("%-20s %s\n", "Name", "Type")
fmt.Printf("%-20s %s\n", "====", "====")
for _, remote := range remotes {
fmt.Printf("%-20s %s\n", remote, ConfigFileGet(remote, "type"))
}
}
// ChooseRemote chooses a remote name
func ChooseRemote() string {
remotes := configData.GetSectionList()
sort.Strings(remotes)
return Choose("remote", remotes, nil, false)
}
// ReadLine reads some input
var ReadLine = func() string {
buf := bufio.NewReader(os.Stdin)
line, err := buf.ReadString('\n')
if err != nil {
log.Fatalf("Failed to read line: %v", err)
}
return strings.TrimSpace(line)
}
// Command - choose one
func Command(commands []string) byte {
opts := []string{}
for _, text := range commands {
fmt.Printf("%c) %s\n", text[0], text[1:])
opts = append(opts, text[:1])
}
optString := strings.Join(opts, "")
optHelp := strings.Join(opts, "/")
for {
fmt.Printf("%s> ", optHelp)
result := strings.ToLower(ReadLine())
if len(result) != 1 {
continue
}
i := strings.Index(optString, string(result[0]))
if i >= 0 {
return result[0]
}
}
}
// Confirm asks the user for Yes or No and returns true or false
func Confirm() bool {
return Command([]string{"yYes", "nNo"}) == 'y'
}
// Choose one of the defaults or type a new string if newOk is set
func Choose(what string, defaults, help []string, newOk bool) string {
valueDescripton := "an existing"
if newOk {
valueDescripton = "your own"
}
fmt.Printf("Choose a number from below, or type in %s value\n", valueDescripton)
for i, text := range defaults {
var lines []string
if help != nil {
parts := strings.Split(help[i], "\n")
lines = append(lines, parts...)
}
lines = append(lines, fmt.Sprintf("%q", text))
pos := i + 1
if len(lines) == 1 {
fmt.Printf("%2d > %s\n", pos, text)
} else {
mid := (len(lines) - 1) / 2
for i, line := range lines {
var sep rune
switch i {
case 0:
sep = '/'
case len(lines) - 1:
sep = '\\'
default:
sep = '|'
}
number := " "
if i == mid {
number = fmt.Sprintf("%2d", pos)
}
fmt.Printf("%s %c %s\n", number, sep, line)
}
}
}
for {
fmt.Printf("%s> ", what)
result := ReadLine()
i, err := strconv.Atoi(result)
if err != nil {
if newOk {
return result
}
for _, v := range defaults {
if result == v {
return result
}
}
continue
}
if i >= 1 && i <= len(defaults) {
return defaults[i-1]
}
}
}
// ChooseNumber asks the user to enter a number between min and max
// inclusive prompting them with what.
func ChooseNumber(what string, min, max int) int {
for {
fmt.Printf("%s> ", what)
result := ReadLine()
i, err := strconv.Atoi(result)
if err != nil {
fmt.Printf("Bad number: %v\n", err)
continue
}
if i < min || i > max {
fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
continue
}
return i
}
}
// ShowRemote shows the contents of the remote
func ShowRemote(name string) {
fmt.Printf("--------------------\n")
fmt.Printf("[%s]\n", name)
fs := MustFindByName(name)
for _, key := range configData.GetKeyList(name) {
isPassword := false
for _, option := range fs.Options {
if option.Name == key && option.IsPassword {
isPassword = true
break
}
}
value := ConfigFileGet(name, key)
if isPassword && value != "" {
fmt.Printf("%s = *** ENCRYPTED ***\n", key)
} else {
fmt.Printf("%s = %s\n", key, value)
}
}
fmt.Printf("--------------------\n")
}
// OkRemote prints the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
ShowRemote(name)
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
case 'y':
return true
case 'e':
return false
case 'd':
configData.DeleteSection(name)
return true
default:
Errorf(nil, "Bad choice %c", i)
}
return false
}
// MustFindByName finds the RegInfo for the remote name passed in or
// exits with a fatal error.
func MustFindByName(name string) *RegInfo {
fsType := ConfigFileGet(name, "type")
if fsType == "" {
log.Fatalf("Couldn't find type of fs for %q", name)
}
return MustFind(fsType)
}
// RemoteConfig runs the config helper for the remote if needed
func RemoteConfig(name string) {
fmt.Printf("Remote config\n")
f := MustFindByName(name)
if f.Config != nil {
f.Config(name)
}
}
// ChooseOption asks the user to choose an option
func ChooseOption(o *Option) string {
fmt.Println(o.Help)
if o.IsPassword {
actions := []string{"yYes type in my own password", "gGenerate random password"}
if o.Optional {
actions = append(actions, "nNo leave this optional password blank")
}
var password string
switch i := Command(actions); i {
case 'y':
password = ChangePassword("the")
case 'g':
for {
fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
bits := ChooseNumber("Bits", 64, 1024)
bytes := bits / 8
if bits%8 != 0 {
bytes++
}
var pw = make([]byte, bytes)
n, _ := rand.Read(pw)
if n != bytes {
log.Fatalf("password short read: %d", n)
}
password = base64.RawURLEncoding.EncodeToString(pw)
fmt.Printf("Your password is: %s\n", password)
fmt.Printf("Use this password?\n")
if Confirm() {
break
}
}
case 'n':
return ""
default:
Errorf(nil, "Bad choice %c", i)
}
return MustObscure(password)
}
if len(o.Examples) > 0 {
var values []string
var help []string
for _, example := range o.Examples {
values = append(values, example.Value)
help = append(help, example.Help)
}
return Choose(o.Name, values, help, true)
}
fmt.Printf("%s> ", o.Name)
return ReadLine()
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(name string, keyValues []string) error {
if len(keyValues)%2 != 0 {
return errors.New("found key without value")
}
// Set the config
for i := 0; i < len(keyValues); i += 2 {
configData.SetValue(name, keyValues[i], keyValues[i+1])
}
RemoteConfig(name)
ShowRemote(name)
SaveConfig()
return nil
}
// CreateRemote creates a new remote with name, provider and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(name string, provider string, keyValues []string) error {
// Delete the old config if it exists
configData.DeleteSection(name)
// Set the type
configData.SetValue(name, "type", provider)
// Show this is automatically configured
configData.SetValue(name, ConfigAutomatic, "yes")
// Set the remaining values
return UpdateRemote(name, keyValues)
}
// JSONListProviders prints all the providers and options in JSON format
func JSONListProviders() error {
b, err := json.MarshalIndent(fsRegistry, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal examples")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write providers list")
}
return nil
}
// fsOption returns an Option describing the possible remotes
func fsOption() *Option {
o := &Option{
Name: "Storage",
Help: "Type of storage to configure.",
}
for _, item := range fsRegistry {
example := OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// NewRemoteName asks the user for a name for a remote
func NewRemoteName() (name string) {
for {
fmt.Printf("name> ")
name = ReadLine()
parts := matcher.FindStringSubmatch(name + ":")
switch {
case name == "":
fmt.Printf("Can't use empty name.\n")
case isDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused a drive letter.\n", name)
case parts == nil:
fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
default:
return name
}
}
}
// NewRemote make a new remote from its name
func NewRemote(name string) {
newType := ChooseOption(fsOption())
configData.SetValue(name, "type", newType)
fs := MustFind(newType)
for _, option := range fs.Options {
configData.SetValue(name, option.Name, ChooseOption(&option))
}
RemoteConfig(name)
if OkRemote(name) {
SaveConfig()
return
}
EditRemote(fs, name)
}
// EditRemote gets the user to edit a remote
func EditRemote(fs *RegInfo, name string) {
ShowRemote(name)
fmt.Printf("Edit remote\n")
for {
for _, option := range fs.Options {
key := option.Name
value := ConfigFileGet(name, key)
fmt.Printf("Value %q = %q\n", key, value)
fmt.Printf("Edit? (y/n)>\n")
if Confirm() {
newValue := ChooseOption(&option)
configData.SetValue(name, key, newValue)
}
}
RemoteConfig(name)
if OkRemote(name) {
break
}
}
SaveConfig()
}
// DeleteRemote gets the user to delete a remote
func DeleteRemote(name string) {
configData.DeleteSection(name)
SaveConfig()
}
// copyRemote asks the user for a new remote name and copies name into
// it. Returns the new name.
func copyRemote(name string) string {
newName := NewRemoteName()
// Copy the keys
for _, key := range configData.GetKeyList(name) {
value := configData.MustValue(name, key, "")
configData.SetValue(newName, key, value)
}
return newName
}
// RenameRemote renames a config section
func RenameRemote(name string) {
fmt.Printf("Enter new name for %q remote.\n", name)
newName := copyRemote(name)
if name != newName {
configData.DeleteSection(name)
SaveConfig()
}
}
// CopyRemote copies a config section
func CopyRemote(name string) {
fmt.Printf("Enter name for copy of %q remote.\n", name)
copyRemote(name)
SaveConfig()
}
// ShowConfigLocation prints the location of the config file in use
func ShowConfigLocation() {
if _, err := os.Stat(ConfigPath); os.IsNotExist(err) {
fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
} else {
fmt.Println("Configuration file is stored at:")
}
fmt.Printf("%s\n", ConfigPath)
}
// ShowConfig prints the (unencrypted) config options
func ShowConfig() {
var buf bytes.Buffer
if err := goconfig.SaveConfigData(configData, &buf); err != nil {
log.Fatalf("Failed to serialize config: %v", err)
}
str := buf.String()
if str == "" {
str = "; empty config\n"
}
fmt.Printf("%s", str)
}
// EditConfig edits the config file interactively
func EditConfig() {
for {
haveRemotes := len(configData.GetSectionList()) != 0
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
if haveRemotes {
fmt.Printf("Current remotes:\n\n")
ShowRemotes()
fmt.Printf("\n")
} else {
fmt.Printf("No remotes found - make a new one\n")
// take 2nd item and last 2 items of menu list
what = append(what[1:2], what[len(what)-2:]...)
}
switch i := Command(what); i {
case 'e':
name := ChooseRemote()
fs := MustFindByName(name)
EditRemote(fs, name)
case 'n':
NewRemote(NewRemoteName())
case 'd':
name := ChooseRemote()
DeleteRemote(name)
case 'r':
RenameRemote(ChooseRemote())
case 'c':
CopyRemote(ChooseRemote())
case 's':
SetPassword()
case 'q':
return
}
}
}
// SetPassword will allow the user to modify the current
// configuration encryption settings.
func SetPassword() {
for {
if len(configKey) > 0 {
fmt.Println("Your configuration is encrypted.")
what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
switch i := Command(what); i {
case 'c':
changeConfigPassword()
SaveConfig()
fmt.Println("Password changed")
continue
case 'u':
configKey = nil
SaveConfig()
continue
case 'q':
return
}
} else {
fmt.Println("Your configuration is not encrypted.")
fmt.Println("If you add a password, you will protect your login information to cloud services.")
what := []string{"aAdd Password", "qQuit to main menu"}
switch i := Command(what); i {
case 'a':
changeConfigPassword()
SaveConfig()
fmt.Println("Password set")
continue
case 'q':
return
}
}
}
}
// Authorize is for remote authorization of headless machines.
//
// It expects 1 or 3 arguments
//
// rclone authorize "fs name"
// rclone authorize "fs name" "client id" "client secret"
func Authorize(args []string) {
switch len(args) {
case 1, 3:
default:
log.Fatalf("Invalid number of arguments: %d", len(args))
}
newType := args[0]
fs := MustFind(newType)
if fs.Config == nil {
log.Fatalf("Can't authorize fs %q", newType)
}
// Name used for temporary fs
name := "**temp-fs**"
// Make sure we delete it
defer DeleteRemote(name)
// Indicate that we want fully automatic configuration.
configData.SetValue(name, ConfigAutomatic, "yes")
if len(args) == 3 {
configData.SetValue(name, ConfigClientID, args[1])
configData.SetValue(name, ConfigClientSecret, args[2])
}
fs.Config(name)
}
// configToEnv converts an config section and name, eg ("myremote",
// "ignore-size") into an environment name
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
func configToEnv(section, name string) string {
return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
}
// ConfigFileGet gets the config key under section returning the
// default or empty string if not set.
//
// It looks up defaults in the environment if they are present
func ConfigFileGet(section, key string, defaultVal ...string) string {
envKey := configToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
defaultVal = []string{newValue}
}
return configData.MustValue(section, key, defaultVal...)
}
// ConfigFileGetBool gets the config key under section returning the
// default or false if not set.
//
// It looks up defaults in the environment if they are present
func ConfigFileGetBool(section, key string, defaultVal ...bool) bool {
envKey := configToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
newBool, err := strconv.ParseBool(newValue)
if err != nil {
Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
} else {
defaultVal = []bool{newBool}
}
}
return configData.MustBool(section, key, defaultVal...)
}
// ConfigFileGetInt gets the config key under section returning the
// default or 0 if not set.
//
// It looks up defaults in the environment if they are present
func ConfigFileGetInt(section, key string, defaultVal ...int) int {
envKey := configToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
newInt, err := strconv.Atoi(newValue)
if err != nil {
Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
} else {
defaultVal = []int{newInt}
}
}
return configData.MustInt(section, key, defaultVal...)
}
// ConfigFileSet sets the key in section to value. It doesn't save
// the config file.
func ConfigFileSet(section, key, value string) {
configData.SetValue(section, key, value)
}
// ConfigFileDeleteKey deletes the config key in the config file.
// It returns true if the key was deleted,
// or returns false if the section or key didn't exist.
func ConfigFileDeleteKey(section, key string) bool {
return configData.DeleteKey(section, key)
}
var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
// ConfigFileSections returns the sections in the config file
// including any defined by environment variables.
func ConfigFileSections() []string {
sections := configData.GetSectionList()
for _, item := range os.Environ() {
matches := matchEnv.FindStringSubmatch(item)
if len(matches) == 2 {
sections = append(sections, strings.ToLower(matches[1]))
}
}
return sections
}
// ConfigDump dumps all the config as a JSON file
func ConfigDump() error {
dump := make(map[string]map[string]string)
for _, name := range configData.GetSectionList() {
params := make(map[string]string)
for _, key := range configData.GetKeyList(name) {
params[key] = ConfigFileGet(name, key)
}
dump[name] = params
}
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal config dump")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write config dump")
}
return nil
}
| 1 | 6,416 | Can you expand the warning slightly "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped" Just so users know they might have to type their password without a leading space. | rclone-rclone | go |
@@ -596,7 +596,7 @@ namespace pwiz.Skyline
var message = TextUtil.SpaceSeparate(
Resources.SkylineWindow_FindIrtDatabase_The_database_file_specified_could_not_be_opened,
e.Message);
- MessageBox.Show(message);
+ MessageDlg.Show(this, message);
}
}
else | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Net;
using System.Reflection;
using System.Text;
using System.Windows.Forms;
using System.Xml;
using System.Xml.Serialization;
using Ionic.Zip;
using Newtonsoft.Json.Linq;
using pwiz.Common.Collections;
using pwiz.Common.DataBinding;
using pwiz.Common.SystemUtil;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.Alerts;
using pwiz.Skyline.Controls;
using pwiz.Skyline.Controls.Databinding;
using pwiz.Skyline.Controls.SeqNode;
using pwiz.Skyline.Controls.Startup;
using pwiz.Skyline.EditUI;
using pwiz.Skyline.FileUI;
using pwiz.Skyline.FileUI.PeptideSearch;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.AuditLog;
using pwiz.Skyline.Model.Databinding;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.DocSettings.Extensions;
using pwiz.Skyline.Model.ElementLocators.ExportAnnotations;
using pwiz.Skyline.Model.Esp;
using pwiz.Skyline.Model.IonMobility;
using pwiz.Skyline.Model.Irt;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Model.Lib.BlibData;
using pwiz.Skyline.Model.Lib.Midas;
using pwiz.Skyline.Model.Optimization;
using pwiz.Skyline.Model.Proteome;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
using DatabaseOpeningException = pwiz.Skyline.Model.Irt.DatabaseOpeningException;
namespace pwiz.Skyline
{
public partial class SkylineWindow
{
public static string GetViewFile(string fileName)
{
return fileName + @".view";
}
private void fileMenu_DropDownOpening(object sender, EventArgs e)
{
ToolStripMenuItem menu = fileToolStripMenuItem;
List<string> mruList = Settings.Default.MruList;
string curDir = Settings.Default.ActiveDirectory;
int start = menu.DropDownItems.IndexOf(mruBeforeToolStripSeparator) + 1;
while (!ReferenceEquals(menu.DropDownItems[start], mruAfterToolStripSeparator))
menu.DropDownItems.RemoveAt(start);
int len = Math.Min(mruList.Count, Settings.Default.MruLength);
for (int i = 0; i < len; i++)
{
MruChosenHandler handler = new MruChosenHandler(this, mruList[i]);
ToolStripMenuItem item = new ToolStripMenuItem(GetMruName(i, mruList[i], curDir), null,
handler.ToolStripMenuItemClick);
if (!item.Text.EndsWith(mruList[i]))
{
item.ToolTipText = mruList[i];
}
menu.DropDownItems.Insert(start + i, item);
}
mruAfterToolStripSeparator.Visible = (mruList.Count > 0);
}
private static string GetMruName(int index, string path, string curDir)
{
string name = path;
if (curDir == Path.GetDirectoryName(path))
name = Path.GetFileName(path);
// Make index 1-based
index++;
if (index < 9)
name = string.Format(@"&{0} {1}", index, name);
return name;
}
private class MruChosenHandler
{
private readonly SkylineWindow _skyline;
private readonly string _path;
public MruChosenHandler(SkylineWindow skyline, string path)
{
_skyline = skyline;
_path = path;
}
public void ToolStripMenuItemClick(object sender, EventArgs e)
{
if (!_skyline.CheckSaveDocument())
return;
_skyline.OpenFile(_path);
}
}
private void startPageMenuItem_Click(object sender, EventArgs e) { OpenStartPage(); }
private void newMenuItem_Click(object sender, EventArgs e) { NewDocument(); }
public void OpenStartPage()
{
if (!CheckSaveDocument())
return;
using (var startupForm = new StartPage())
{
if (startupForm.ShowDialog(this) == DialogResult.OK)
{
startupForm.Action(this);
}
}
}
public void NewDocument()
{
NewDocument(false);
}
public void NewDocument(bool forced)
{
if (!forced && !CheckSaveDocument())
return;
// Create a new document with the default settings.
SrmDocument document = ConnectDocument(this, new SrmDocument(Settings.Default.SrmSettingsList[0]), null) ??
new SrmDocument(SrmSettingsList.GetDefault());
if (document.Settings.DataSettings.AuditLogging)
{
var entry = AuditLogEntry.GetAuditLoggingStartExistingDocEntry(document, ModeUI);
document = entry?.AppendEntryToDocument(document) ?? document;
}
// Make sure settings lists contain correct values for
// this document.
document.Settings.UpdateLists(null);
// Switch over to the new document
SwitchDocument(document, null);
}
private void openContainingFolderMenuItem_Click(object sender, EventArgs e)
{
string args = string.Format(@"/select, ""{0}""", DocumentFilePath);
Process.Start(@"explorer.exe", args);
}
private void openMenuItem_Click(object sender, EventArgs e)
{
if (!CheckSaveDocument())
return;
using (OpenFileDialog dlg = new OpenFileDialog
{
InitialDirectory = Settings.Default.ActiveDirectory,
CheckPathExists = true,
SupportMultiDottedExtensions = true,
DefaultExt = SrmDocument.EXT,
Filter = TextUtil.FileDialogFiltersAll(SrmDocument.FILTER_DOC_AND_SKY_ZIP, SrmDocumentSharing.FILTER_SHARING, SkypFile.FILTER_SKYP)
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.ActiveDirectory = Path.GetDirectoryName(dlg.FileName);
if (dlg.FileName.EndsWith(SrmDocumentSharing.EXT))
{
OpenSharedFile(dlg.FileName);
}
else if (dlg.FileName.EndsWith(SkypFile.EXT))
{
OpenSkypFile(dlg.FileName);
}
else
{
OpenFile(dlg.FileName);
}
}
}
}
public bool OpenSharedFile(string zipPath, FormEx parentWindow = null)
{
try
{
var sharing = new SrmDocumentSharing(zipPath);
using (var longWaitDlg = new LongWaitDlg
{
Text = Resources.SkylineWindow_OpenSharedFile_Extracting_Files,
})
{
longWaitDlg.PerformWork(parentWindow ?? this, 1000, sharing.Extract);
if (longWaitDlg.IsCanceled)
return false;
}
// Remember the directory containing the newly extracted file
// as the active directory for the next open command.
Settings.Default.ActiveDirectory = Path.GetDirectoryName(sharing.DocumentPath);
return OpenFile(sharing.DocumentPath, parentWindow);
}
catch (ZipException zipException)
{
MessageDlg.ShowWithException(parentWindow ?? this, string.Format(Resources.SkylineWindow_OpenSharedFile_The_zip_file__0__cannot_be_read,
zipPath), zipException);
return false;
}
catch (Exception e)
{
var message = TextUtil.LineSeparate(string.Format(
Resources.SkylineWindow_OpenSharedFile_Failure_extracting_Skyline_document_from_zip_file__0__,
zipPath), e.Message);
MessageDlg.ShowWithException(parentWindow ?? this, message, e);
return false;
}
}
public bool OpenSkypFile(string skypPath, FormEx parentWindow = null)
{
var skypSupport = new SkypSupport(this);
return skypSupport.Open(skypPath, Settings.Default.ServerList, parentWindow);
}
private AuditLogEntry AskForLogEntry(FormEx parentWindow)
{
AuditLogEntry result = null;
Invoke((Action)(() =>
{
using (var alert = new AlertDlg(
AuditLogStrings
.SkylineWindow_AskForLogEntry_The_audit_log_does_not_match_the_current_document__Would_you_like_to_add_a_log_entry_describing_the_changes_made_to_the_document_,
MessageBoxButtons.YesNo))
{
if (alert.ShowDialog(parentWindow ?? this) == DialogResult.Yes)
{
using (var docChangeEntryDlg = new DocumentChangeLogEntryDlg())
{
docChangeEntryDlg.ShowDialog(parentWindow ?? this);
result = docChangeEntryDlg.Entry;
return;
}
}
result = AuditLogEntry.CreateUndocumentedChangeEntry();
}
}));
return result;
}
/// <summary>
/// Used in testing to know whether a document changed event comes from opening a file.
/// </summary>
private bool IsOpeningFile { get; set; }
public bool OpenFile(string path, FormEx parentWindow = null)
{
// Remove any extraneous temporary chromatogram spill files.
// ReSharper disable LocalizableElement
var spillDirectory = Path.Combine(Path.GetDirectoryName(path) ?? "", "xic");
// ReSharper restore LocalizableElement
if (Directory.Exists(spillDirectory))
DirectoryEx.SafeDelete(spillDirectory);
Exception exception = null;
SrmDocument document = null;
// A fairly common support question is "why won't this Skyline file open?" when they are actually
// trying to open a .skyd file or somesuch. Probably an artifact of Windows hiding file extensions.
// Try to work around it by finding a plausible matching .sky file when asked to open a .sky? file.
if (!path.EndsWith(SrmDocument.EXT) && !SrmDocument.IsSkylineFile(path, out _)) // Tolerate rename, eg foo.ski
{
path = SrmDocument.FindSiblingSkylineFile(path);
}
try
{
using (var longWaitDlg = new LongWaitDlg(this)
{
Text = Resources.SkylineWindow_OpenFile_Loading___,
Message = Path.GetFileName(path),
ProgressValue = 0
})
{
longWaitDlg.PerformWork(parentWindow ?? this, 500, progressMonitor =>
{
string skylineDocumentHash;
using (var hashingStreamReader = new HashingStreamReaderWithProgress(path, progressMonitor))
{
// Wrap stream in XmlReader so that BaseUri is known
var reader = XmlReader.Create(hashingStreamReader,
new XmlReaderSettings() { IgnoreWhitespace = true },
path);
XmlSerializer ser = new XmlSerializer(typeof (SrmDocument));
document = (SrmDocument) ser.Deserialize(reader);
skylineDocumentHash = hashingStreamReader.Stream.Done();
}
try
{
document = document.ReadAuditLog(path, skylineDocumentHash, ()=>AskForLogEntry(parentWindow));
}
catch (Exception e)
{
throw new AuditLogException(
string.Format(AuditLogStrings.AuditLogException_Error_when_loading_document_audit_log__0, path), e);
}
});
if (longWaitDlg.IsCanceled)
document = null;
}
}
catch (Exception x)
{
var ex = x;
if (AuditLogException.IsAuditLogInvolved(x))
{
MessageDlg.ShowWithException(parentWindow ?? this,
AuditLogException.GetMultiLevelMessage(x),
x);
}
else
{
exception = x;
// Was that even a Skyline file?
if (!SrmDocument.IsSkylineFile(path, out var explained))
{
exception = new IOException(
explained); // Offer a more helpful explanation than that from the failed XML parser
}
}
}
if (exception == null)
{
if (document == null)
return false;
try
{
document = ConnectDocument(parentWindow ?? this, document, path);
if (document == null || !CheckResults(document, path, parentWindow))
return false;
// Make sure settings lists contain correct values for
// this document.
// ReSharper disable once PossibleNullReferenceException
document.Settings.UpdateLists(path);
}
catch (Exception x)
{
exception = x;
}
}
if (exception == null)
{
try
{
IsOpeningFile = true;
using (new SequenceTreeForm.LockDoc(_sequenceTreeForm))
{
// Switch over to the opened document
SwitchDocument(document, path);
}
// Locking the sequenceTree can throw off the node count status
UpdateNodeCountStatus();
}
catch (Exception x)
{
exception = x;
}
finally
{
IsOpeningFile = false;
}
}
if (exception != null)
{
new MessageBoxHelper(parentWindow ?? this).ShowXmlParsingError(
string.Format(Resources.SkylineWindow_OpenFile_Failure_opening__0__, path), path, exception);
return false;
}
if (SequenceTree != null && SequenceTree.Nodes.Count > 0 && !SequenceTree.RestoredFromPersistentString)
SequenceTree.SelectedNode = SequenceTree.Nodes[0];
// Once user has opened an existing document, stop reminding them to set a default UI mode
if (string.IsNullOrEmpty(Settings.Default.UIMode))
{
// ReSharper disable PossibleNullReferenceException
var mode = document.DocumentType == SrmDocument.DOCUMENT_TYPE.none
? SrmDocument.DOCUMENT_TYPE.proteomic
: document.DocumentType;
// ReSharper restore PossibleNullReferenceException
Settings.Default.UIMode = mode.ToString();
}
return true;
}
private SrmDocument ConnectDocument(IWin32Window parent, SrmDocument document, string path)
{
document = ConnectLibrarySpecs(parent, document, path);
if (document != null)
document = ConnectBackgroundProteome(parent, document, path);
if (document != null)
document = ConnectIrtDatabase(parent, document, path);
if (document != null)
document = ConnectOptimizationDatabase(parent, document, path);
if (document != null)
document = ConnectIonMobilityLibrary(parent, document, path);
return document;
}
private SrmDocument ConnectLibrarySpecs(IWin32Window parent, SrmDocument document, string documentPath)
{
string docLibFile = null;
if (!string.IsNullOrEmpty(documentPath) && document.Settings.PeptideSettings.Libraries.HasDocumentLibrary)
{
docLibFile = BiblioSpecLiteSpec.GetLibraryFileName(documentPath);
if (!File.Exists(docLibFile))
{
MessageDlg.Show(parent, string.Format(Resources.SkylineWindow_ConnectLibrarySpecs_Could_not_find_the_spectral_library__0__for_this_document__Without_the_library__no_spectrum_ID_information_will_be_available_, docLibFile));
}
}
var settings = document.Settings.ConnectLibrarySpecs((library, librarySpec) =>
{
string name = library != null ? library.Name : librarySpec.Name;
LibrarySpec spec;
if (Settings.Default.SpectralLibraryList.TryGetValue(name, out spec))
{
if (File.Exists(spec.FilePath))
return spec;
}
if (documentPath == null)
return null;
string fileName = library != null ? library.FileNameHint : Path.GetFileName(librarySpec.FilePath);
if (fileName != null)
{
// First look for the file name in the document directory
string pathLibrary = PathEx.FindExistingRelativeFile(documentPath, fileName);
if (pathLibrary != null)
return CreateLibrarySpec(library, librarySpec, pathLibrary, true);
// In the user's default library directory
pathLibrary = Path.Combine(Settings.Default.LibraryDirectory ?? string.Empty, fileName);
if (File.Exists(pathLibrary))
return CreateLibrarySpec(library, librarySpec, pathLibrary, false);
}
using (var dlg = new MissingFileDlg
{
ItemName = name,
ItemType = Resources.SkylineWindow_ConnectLibrarySpecs_Spectral_Library,
Filter = library != null ? library.SpecFilter : librarySpec.Filter,
FileHint = fileName,
FileDlgInitialPath = Path.GetDirectoryName(documentPath),
Title = Resources.SkylineWindow_ConnectLibrarySpecs_Find_Spectral_Library
})
{
if (dlg.ShowDialog(parent) == DialogResult.OK)
{
Settings.Default.LibraryDirectory = Path.GetDirectoryName(dlg.FilePath);
return CreateLibrarySpec(library, librarySpec, dlg.FilePath, false);
}
}
return null;
}, docLibFile);
if (settings == null)
return null; // User cancelled
if (ReferenceEquals(settings, document.Settings))
return document;
// If the libraries were moved to disconnected state, then avoid updating
// the document tree for this change, or it will strip all the library
// information off the document nodes.
if (settings.PeptideSettings.Libraries.DisconnectedLibraries != null)
return document.ChangeSettingsNoDiff(settings);
return document.ChangeSettings(settings);
}
private static LibrarySpec CreateLibrarySpec(Library library, LibrarySpec librarySpec, string pathLibrary, bool local)
{
var newLibrarySpec = library != null
? library.CreateSpec(pathLibrary)
: librarySpec.ChangeFilePath(pathLibrary);
if (local)
newLibrarySpec = newLibrarySpec.ChangeDocumentLocal(true);
return newLibrarySpec;
}
private SrmDocument ConnectIrtDatabase(IWin32Window parent, SrmDocument document, string documentPath)
{
var settings = document.Settings.ConnectIrtDatabase(calc => FindIrtDatabase(parent, documentPath, calc));
if (settings == null)
return null;
if (ReferenceEquals(settings, document.Settings))
return document;
return document.ChangeSettings(settings);
}
private RCalcIrt FindIrtDatabase(IWin32Window parent, string documentPath, RCalcIrt irtCalc)
{
RetentionScoreCalculatorSpec result;
if (Settings.Default.RTScoreCalculatorList.TryGetValue(irtCalc.Name, out result))
{
var calc = result as RCalcIrt;
if (calc != null && File.Exists(calc.DatabasePath))
return calc;
}
if (documentPath == null)
return null;
// First look for the file name in the document directory
string filePath = PathEx.FindExistingRelativeFile(documentPath, irtCalc.DatabasePath);
if (filePath != null)
{
try
{
return irtCalc.ChangeDatabasePath(filePath);
}
catch (CalculatorException)
{
//Todo: should this fail silenty or raise another dialog box?
}
}
do
{
using (var dlg = new MissingFileDlg
{
ItemName = irtCalc.Name,
ItemType = Resources.SkylineWindow_FindIrtDatabase_iRT_Calculator,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_FindIrtDatabase_iRT_Database_Files, IrtDb.EXT),
FileHint = Path.GetFileName(irtCalc.DatabasePath),
FileDlgInitialPath = Path.GetDirectoryName(documentPath),
Title = Resources.SkylineWindow_FindIrtDatabase_Find_iRT_Calculator
})
{
if (dlg.ShowDialog(parent) == DialogResult.OK)
{
if (dlg.FilePath == null)
return RCalcIrt.NONE;
try
{
return irtCalc.ChangeDatabasePath(dlg.FilePath);
}
catch (DatabaseOpeningException e)
{
var message = TextUtil.SpaceSeparate(
Resources.SkylineWindow_FindIrtDatabase_The_database_file_specified_could_not_be_opened,
e.Message);
MessageBox.Show(message);
}
}
else
{
return null;
}
}
}
while (true);
}
private SrmDocument ConnectOptimizationDatabase(IWin32Window parent, SrmDocument document, string documentPath)
{
var settings = document.Settings.ConnectOptimizationDatabase(lib => FindOptimizationDatabase(parent, documentPath, lib));
if (settings == null)
return null;
if (ReferenceEquals(settings, document.Settings))
return document;
return document.ChangeSettings(settings);
}
private OptimizationLibrary FindOptimizationDatabase(IWin32Window parent, string documentPath, OptimizationLibrary optLib)
{
if (optLib.IsNone)
return optLib;
OptimizationLibrary lib;
if (Settings.Default.OptimizationLibraryList.TryGetValue(optLib.Name, out lib))
{
if (lib != null && File.Exists(lib.DatabasePath))
return lib;
}
if (documentPath == null)
return null;
// First look for the file name in the document directory
string filePath = PathEx.FindExistingRelativeFile(documentPath, optLib.DatabasePath);
if (filePath != null)
{
try
{
return optLib.ChangeDatabasePath(filePath);
}
// ReSharper disable once EmptyGeneralCatchClause
catch (Exception)
{
//Todo: should this fail silenty or raise another dialog box?
}
}
do
{
using (var dlg = new MissingFileDlg
{
ItemName = optLib.Name,
ItemType = Resources.SkylineWindow_FindOptimizationDatabase_Optimization_Library,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_FindOptimizationDatabase_Optimization_Library_Files, OptimizationDb.EXT),
FileHint = Path.GetFileName(optLib.DatabasePath),
FileDlgInitialPath = Path.GetDirectoryName(documentPath),
Title = Resources.SkylineWindow_FindOptimizationDatabase_Find_Optimization_Library
})
{
if (dlg.ShowDialog(parent) == DialogResult.OK)
{
if (dlg.FilePath == null)
return OptimizationLibrary.NONE;
try
{
return optLib.ChangeDatabasePath(dlg.FilePath);
}
catch (OptimizationsOpeningException e)
{
var message = TextUtil.SpaceSeparate(
Resources.SkylineWindow_FindOptimizationDatabase_The_database_file_specified_could_not_be_opened_,
e.Message);
MessageBox.Show(message);
}
}
else
{
return null;
}
}
}
while (true);
}
private SrmDocument ConnectIonMobilityLibrary(IWin32Window parent, SrmDocument document, string documentPath)
{
var settings = document.Settings.ConnectIonMobilityLibrary(imsdb => FindIonMobilityLibrary(parent, documentPath, imsdb));
if (settings == null)
return null;
if (ReferenceEquals(settings, document.Settings))
return document;
return document.ChangeSettings(settings);
}
private IonMobilityLibrary FindIonMobilityLibrary(IWin32Window parent, string documentPath, IonMobilityLibrary ionMobilityLibrary)
{
IonMobilityLibrary result;
if (Settings.Default.IonMobilityLibraryList.TryGetValue(ionMobilityLibrary.Name, out result))
{
if (result != null && File.Exists(result.FilePath))
return result;
}
if (documentPath == null)
return null;
// First look for the file name in the document directory
string filePath = PathEx.FindExistingRelativeFile(documentPath, ionMobilityLibrary.FilePath);
if (filePath != null)
{
try
{
return ionMobilityLibrary.ChangeDatabasePath(filePath);
}
// ReSharper disable once EmptyGeneralCatchClause
catch
{
//Todo: should this fail silenty or raise another dialog box?
}
}
do
{
using (var dlg = new MissingFileDlg
{
ItemName = ionMobilityLibrary.Name,
ItemType = Resources.SkylineWindow_FindIonMobilityLibrary_Ion_Mobility_Library,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_FindIonMobilityDatabase_ion_mobility_library_files, IonMobilityDb.EXT),
FileHint = Path.GetFileName(ionMobilityLibrary.FilePath),
FileDlgInitialPath = Path.GetDirectoryName(documentPath),
Title = Resources.SkylineWindow_FindIonMobilityLibrary_Find_Ion_Mobility_Library
})
{
if (dlg.ShowDialog(parent) == DialogResult.OK)
{
if (dlg.FilePath == null)
return IonMobilityLibrary.NONE;
try
{
return ionMobilityLibrary.ChangeDatabasePath(dlg.FilePath);
}
catch (DatabaseOpeningException e)
{
var message = TextUtil.SpaceSeparate(
Resources.SkylineWindow_FindIonMobilityDatabase_The_ion_mobility_library_specified_could_not_be_opened_,
e.Message);
MessageBox.Show(message);
}
}
else
{
return null;
}
}
}
while (true);
}
private SrmDocument ConnectBackgroundProteome(IWin32Window parent ,SrmDocument document, string documentPath)
{
var settings = document.Settings.ConnectBackgroundProteome(backgroundProteomeSpec =>
FindBackgroundProteome(parent, documentPath, backgroundProteomeSpec));
if (settings == null)
return null;
if (ReferenceEquals(settings, document.Settings))
return document;
return document.ChangeSettings(settings);
}
private BackgroundProteomeSpec FindBackgroundProteome(IWin32Window parent, string documentPath, BackgroundProteomeSpec backgroundProteomeSpec)
{
var result = Settings.Default.BackgroundProteomeList.GetBackgroundProteomeSpec(backgroundProteomeSpec.Name);
if (result != null)
{
if (File.Exists(result.DatabasePath))
return result;
}
if (documentPath == null)
return null;
// Is the saved path correct? Then just use that.
if (File.Exists(backgroundProteomeSpec.DatabasePath))
return new BackgroundProteomeSpec(backgroundProteomeSpec.Name, backgroundProteomeSpec.DatabasePath);
// First look for the file name in the document directory
string pathBackgroundProteome = PathEx.FindExistingRelativeFile(documentPath, backgroundProteomeSpec.DatabasePath);
if (pathBackgroundProteome != null)
return new BackgroundProteomeSpec(backgroundProteomeSpec.Name, pathBackgroundProteome);
// In the user's default library directory
string fileName = Path.GetFileName(backgroundProteomeSpec.DatabasePath);
pathBackgroundProteome = Path.Combine(Settings.Default.ProteomeDbDirectory ?? string.Empty, fileName ?? string.Empty);
if (File.Exists(pathBackgroundProteome))
return new BackgroundProteomeSpec(backgroundProteomeSpec.Name, pathBackgroundProteome);
using (var dlg = new MissingFileDlg
{
FileHint = fileName,
ItemName = backgroundProteomeSpec.Name,
ItemType = Resources.SkylineWindow_FindBackgroundProteome_Background_Proteome,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_FindBackgroundProteome_Proteome_File, ProteomeDb.EXT_PROTDB),
FileDlgInitialPath = Settings.Default.ProteomeDbDirectory,
Title = Resources.SkylineWindow_FindBackgroundProteome_Find_Background_Proteome
})
{
if (dlg.ShowDialog(parent) == DialogResult.OK)
{
if (dlg.FilePath == null)
{
return BackgroundProteomeList.GetDefault();
}
Settings.Default.ProteomeDbDirectory = Path.GetDirectoryName(dlg.FilePath);
return new BackgroundProteomeSpec(backgroundProteomeSpec.Name, dlg.FilePath);
}
}
return null;
}
private bool CheckResults(SrmDocument document, string path, FormEx parent)
{
string pathCache = ChromatogramCache.FinalPathForName(path, null);
if (!document.Settings.HasResults)
{
// On open, make sure a document with no results does not have a
// data cache file, since one may have been left behind on a Save As.
FileEx.SafeDelete(pathCache, true);
}
else if (!File.Exists(pathCache) &&
// For backward compatibility, check to see if any per-replicate
// cache files exist.
!File.Exists(ChromatogramCache.FinalPathForName(path,
document.Settings.MeasuredResults.Chromatograms[0].Name)))
{
// It has become clear that showing a message box about rebuilding
// the cache on open is shocking to people, and they immediately
// worry that a "rebuild" will cause them to lose work. So, first
// figure out if any of the sample files are missing from places
// Skyline will find them.
var missingFiles = new List<string>();
//var foundFiles = new List<string>();
foreach (var chromSet in document.Settings.MeasuredResults.Chromatograms)
{
foreach (var pathFileSample in chromSet.MSDataFilePaths)
{
var msDataFilePath = pathFileSample as MsDataFilePath;
if (null == msDataFilePath)
{
continue;
}
string pathFile = msDataFilePath.FilePath;
if (missingFiles.Contains(pathFile))
continue;
string pathPartCache = ChromatogramCache.PartPathForName(path, pathFileSample);
if (File.Exists(pathFile) ||
Directory.Exists(pathFile) || // some sample "files" are actually directories (.d etc)
File.Exists(pathPartCache) ||
File.Exists(Path.Combine(Path.GetDirectoryName(path) ?? string.Empty, Path.GetFileName(pathFile) ?? string.Empty)))
{
//foundFiles.Add(pathFile);
}
else
{
missingFiles.Add(pathFile);
}
}
}
// If all necessary data is present, just start rebuilding without asking
// to avoid shocking the user.
if (missingFiles.Count == 0)
return true;
// TODO: Ask the user to locate the missing data files
string missingFilesString = TextUtil.LineSeparate(missingFiles);
string message = TextUtil.LineSeparate(string.Format(
Resources.SkylineWindow_CheckResults_The_data_file___0___is_missing__and_the_following_original_instrument_output_could_not_be_found_,
ChromatogramCache.FinalPathForName(path, null)),
string.Empty,
missingFilesString,
string.Empty,
Resources.SkylineWindow_CheckResults_Click_OK_to_open_the_document_anyway);
if (MultiButtonMsgDlg.Show(parent ?? this, message, MultiButtonMsgDlg.BUTTON_OK) == DialogResult.Cancel)
{
return false;
}
}
return true;
}
private void saveMenuItem_Click(object sender, EventArgs e)
{
SaveDocument();
}
private void saveAsMenuItem_Click(object sender, EventArgs e)
{
SaveDocumentAs();
}
private bool CheckSaveDocument()
{
if (Dirty)
{
var result = MultiButtonMsgDlg.Show(this,
Resources.SkylineWindow_CheckSaveDocument_Do_you_want_to_save_changes,
Resources.SkylineWindow_CheckSaveDocument_Yes, Resources.SkylineWindow_CheckSaveDocument_No, true);
switch (result)
{
case DialogResult.Yes:
return SaveDocument();
case DialogResult.Cancel:
return false;
}
}
return true;
}
public bool SaveDocument()
{
string fileName = DocumentFilePath;
if (string.IsNullOrEmpty(fileName))
return SaveDocumentAs();
return SaveDocument(fileName);
}
private bool SaveDocumentAs()
{
// Make sure results are loaded before performaing a Save As,
// since the results cache must be copied to the new location.
if (!DocumentUI.IsSavable)
{
MessageDlg.Show(this, Resources.SkylineWindow_SaveDocumentAs_The_document_must_be_fully_loaded_before_it_can_be_saved_to_a_new_name);
return false;
}
using (var dlg = new SaveFileDialog
{
InitialDirectory = Settings.Default.ActiveDirectory,
OverwritePrompt = true,
DefaultExt = SrmDocument.EXT,
Filter = TextUtil.FileDialogFiltersAll(SrmDocument.FILTER_DOC)
})
{
if (!string.IsNullOrEmpty(DocumentFilePath))
dlg.FileName = Path.GetFileName(DocumentFilePath);
if (dlg.ShowDialog(this) == DialogResult.OK)
{
if (SaveDocument(dlg.FileName))
return true;
}
}
return false;
}
public bool SaveDocument(String fileName, bool includingCacheFile = true)
{
if (string.IsNullOrEmpty(DocumentUI.Settings.DataSettings.DocumentGuid) ||
!Equals(DocumentFilePath, fileName))
{
SrmDocument docOriginal;
SrmDocument docNew;
do
{
docOriginal = Document;
docNew =
docOriginal.ChangeSettings(
docOriginal.Settings.ChangeDataSettings(
docOriginal.Settings.DataSettings.ChangeDocumentGuid()));
} while (!SetDocument(docNew, docOriginal));
}
SrmDocument document = Document;
try
{
using (var saver = new FileSaver(fileName))
{
saver.CheckException();
using (var longWaitDlg = new LongWaitDlg(this)
{
Text = Resources.SkylineWindow_SaveDocument_Saving___,
Message = Path.GetFileName(fileName)
})
{
longWaitDlg.PerformWork(this, 800, progressMonitor =>
{
document.SerializeToFile(saver.SafeName, fileName, SkylineVersion.CURRENT, progressMonitor);
// If the user has chosen "Save As", and the document has a
// document specific spectral library, copy this library to
// the new name.
if (!Equals(DocumentFilePath, fileName))
SaveDocumentLibraryAs(fileName);
saver.Commit();
});
// Sometimes this catches a cancellation that doesn't throw an OperationCanceledException.
if (longWaitDlg.IsCanceled)
return false;
}
}
}
catch (OperationCanceledException)
{
return false;
}
catch (Exception ex)
{
var message = TextUtil.LineSeparate(string.Format(Resources.SkylineWindow_SaveDocument_Failed_writing_to__0__, fileName), ex.Message);
MessageDlg.ShowWithException(this, message, ex);
return false;
}
DocumentFilePath = fileName;
_savedVersion = document.UserRevisionIndex;
SetActiveFile(fileName);
// Make sure settings lists contain correct values for this document.
document.Settings.UpdateLists(DocumentFilePath);
try
{
SaveLayout(fileName);
// CONSIDER: Is this really optional?
if (includingCacheFile)
{
using (var longWaitDlg = new LongWaitDlg(this)
{
Text = Resources.SkylineWindow_SaveDocument_Optimizing_data_file___,
Message = Path.GetFileName(fileName)
})
{
longWaitDlg.PerformWork(this, 800, () =>
OptimizeCache(fileName, longWaitDlg));
}
}
}
// We allow silent failures because it is OK for the cache to remain unoptimized
// or the layout to not be saved. These aren't critical as long as the document
// was saved correctly.
catch (UnauthorizedAccessException) {}
catch (IOException) {}
catch (OperationCanceledException) {}
catch (TargetInvocationException) {}
return true;
}
private void OptimizeCache(string fileName, ILongWaitBroker progress)
{
// Optimize the results cache to get rid of any unnecessary
// chromatogram data.
var settings = Document.Settings;
if (settings.HasResults)
{
var results = settings.MeasuredResults;
if (results.IsLoaded)
{
var resultsNew = results.OptimizeCache(fileName, _chromatogramManager.StreamManager, progress);
if (!ReferenceEquals(resultsNew, results))
{
SrmDocument docNew, docCurrent;
do
{
docCurrent = Document;
docNew = docCurrent.ChangeMeasuredResults(resultsNew);
}
while (!SetDocument(docNew, docCurrent));
}
}
}
else
{
string cachePath = ChromatogramCache.FinalPathForName(DocumentFilePath, null);
FileEx.SafeDelete(cachePath, true);
}
}
private void SaveDocumentLibraryAs(string newDocFilePath)
{
string oldDocLibFile = BiblioSpecLiteSpec.GetLibraryFileName(DocumentFilePath);
string oldRedundantDocLibFile = BiblioSpecLiteSpec.GetRedundantName(oldDocLibFile);
// If the document has a document-specific library, and the files for it
// exist on disk, and it's not stale due to conversion of document to small molecule representation
var document = Document;
string newDocLibFile = BiblioSpecLiteSpec.GetLibraryFileName(newDocFilePath);
if (document.Settings.PeptideSettings.Libraries.HasDocumentLibrary
&& File.Exists(oldDocLibFile)
&& !Equals(newDocLibFile.Replace(BiblioSpecLiteSpec.DotConvertedToSmallMolecules, string.Empty), oldDocLibFile))
{
using (var saverLib = new FileSaver(newDocLibFile))
{
FileSaver saverRedundant = null;
if (File.Exists(oldRedundantDocLibFile))
{
string newRedundantDocLibFile = BiblioSpecLiteSpec.GetRedundantName(newDocFilePath);
saverRedundant = new FileSaver(newRedundantDocLibFile);
}
using (saverRedundant)
{
saverLib.CopyFile(oldDocLibFile);
if (saverRedundant != null)
{
saverRedundant.CopyFile(oldRedundantDocLibFile);
}
saverLib.Commit();
if (saverRedundant != null)
{
saverRedundant.Commit();
}
}
}
// Update the document library settings to point to the new library.
SrmDocument docOriginal, docNew;
do
{
docOriginal = Document;
docNew = docOriginal.ChangeSettingsNoDiff(docOriginal.Settings.ChangePeptideLibraries(libraries =>
libraries.ChangeDocumentLibraryPath(newDocFilePath)));
}
while (!SetDocument(docNew, docOriginal));
}
}
private void SaveLayout(string fileName)
{
using (var saverUser = new FileSaver(GetViewFile(fileName)))
{
if (saverUser.CanSave())
{
dockPanel.SaveAsXml(saverUser.SafeName);
saverUser.Commit();
}
}
}
private void SetActiveFile(string path)
{
if (!string.IsNullOrEmpty(path))
{
// Remember the active directory.
Settings.Default.ActiveDirectory = Path.GetDirectoryName(path);
// Store the path in the MRU.
List<string> mruList = Settings.Default.MruList;
if (mruList.Count == 0 || !Equals(path, mruList[0]))
{
mruList.Remove(path);
mruList.Insert(0, path);
int len = Settings.Default.MruMemoryLength;
if (mruList.Count > len)
mruList.RemoveRange(len, mruList.Count - len);
}
}
UpdateTitle();
}
private void shareDocumentMenuItem_Click(object sender, EventArgs e)
{
ShareDocument();
}
public void ShareDocument()
{
var document = DocumentUI;
if (!document.IsLoaded)
{
try
{
// Get the description of what is not loaded into the "More Info" section of the message box
// This is helpful for diagnosis, but not yet presented in a form intended for the user
throw new IOException(TextUtil.LineSeparate(document.NonLoadedStateDescriptions));
}
catch (Exception e)
{
MessageDlg.ShowWithException(this, Resources.SkylineWindow_shareDocumentMenuItem_Click_The_document_must_be_fully_loaded_before_it_can_be_shared, e);
}
return;
}
bool saved = false;
string fileName = DocumentFilePath;
if (string.IsNullOrEmpty(fileName))
{
if (MessageBox.Show(this, Resources.SkylineWindow_shareDocumentMenuItem_Click_The_document_must_be_saved_before_it_can_be_shared,
Program.Name, MessageBoxButtons.OKCancel) == DialogResult.Cancel)
return;
if (!SaveDocumentAs())
return;
saved = true;
fileName = DocumentFilePath;
}
ShareType shareType;
using (var dlgType = new ShareTypeDlg(document))
{
if (dlgType.ShowDialog(this) == DialogResult.Cancel)
return;
shareType = dlgType.ShareType;
}
using (var dlg = new SaveFileDialog
{
Title = Resources.SkylineWindow_shareDocumentMenuItem_Click_Share_Document,
InitialDirectory = Path.GetDirectoryName(fileName),
FileName = Path.GetFileNameWithoutExtension(fileName) + SrmDocumentSharing.EXT_SKY_ZIP,
OverwritePrompt = true,
DefaultExt = SrmDocumentSharing.EXT_SKY_ZIP,
SupportMultiDottedExtensions = true,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_shareDocumentMenuItem_Click_Skyline_Shared_Documents, SrmDocumentSharing.EXT),
})
{
if (dlg.ShowDialog(this) == DialogResult.Cancel)
return;
// Make sure the document is completely saved before sharing
if (!saved && !SaveDocument())
return;
ShareDocument(dlg.FileName, shareType);
}
}
public bool ShareDocument(string fileDest, ShareType shareType)
{
try
{
bool success;
using (var longWaitDlg = new LongWaitDlg { Text = Resources.SkylineWindow_ShareDocument_Compressing_Files, })
{
var sharing = new SrmDocumentSharing(DocumentUI, DocumentFilePath, fileDest, shareType);
longWaitDlg.PerformWork(this, 1000, sharing.Share);
success = !longWaitDlg.IsCanceled;
}
return success;
}
catch (Exception x)
{
var message = TextUtil.LineSeparate(string.Format(Resources.SkylineWindow_ShareDocument_Failed_attempting_to_create_sharing_file__0__, fileDest),
x.Message);
MessageDlg.ShowWithException(this, message, x);
}
return false;
}
private void exportTransitionListMenuItem_Click(object sender, EventArgs e)
{
ShowExportMethodDialog(ExportFileType.List);
}
private void exportIsolationListMenuItem_Click(object sender, EventArgs e)
{
var isolationScheme = DocumentUI.Settings.TransitionSettings.FullScan.IsolationScheme;
if (Document.MoleculeCount == 0 && (isolationScheme == null || isolationScheme.FromResults))
{
MessageDlg.Show(this,
Resources.SkylineWindow_exportIsolationListMenuItem_Click_There_is_no_isolation_list_data_to_export);
return;
}
ShowExportMethodDialog(ExportFileType.IsolationList);
}
private void exportMethodMenuItem_Click(object sender, EventArgs e)
{
ShowExportMethodDialog(ExportFileType.Method);
}
public DialogResult ShowExportMethodDialog(ExportFileType fileType)
{
using (ExportMethodDlg dlg = new ExportMethodDlg(DocumentUI, fileType))
{
return dlg.ShowDialog(this);
}
}
private void exportSpectralLibraryMenuItem_Click(object sender, EventArgs e)
{
ShowExportSpectralLibraryDialog();
}
public void ShowExportSpectralLibraryDialog()
{
var libraryExporter = new SpectralLibraryExporter(Document, DocumentFilePath);
libraryExporter.ShowExportSpectralLibraryDialog(this);
}
private void exportReportMenuItem_Click(object sender, EventArgs e)
{
ShowExportReportDialog();
}
public void ShowExportReportDialog()
{
using (var dlg = new ExportLiveReportDlg(this))
{
dlg.ShowDialog(this);
}
}
private void espFeaturesMenuItem_Click(object sender, EventArgs e)
{
ShowExportEspFeaturesDialog();
}
public void ShowExportEspFeaturesDialog()
{
if (DocumentUI.MoleculeCount == 0)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowExportEspFeaturesDialog_The_document_must_contain_targets_for_which_to_export_features_);
return;
}
using (var dlg = new SaveFileDialog
{
Title = Resources.SkylineWindow_ShowExportEspFeaturesDialog_Export_ESP_Features,
OverwritePrompt = true,
DefaultExt = EspFeatureCalc.EXT,
Filter = TextUtil.FileDialogFilterAll(Resources.SkylineWindow_ShowExportEspFeaturesDialog_ESP_Feature_Files,EspFeatureCalc.EXT),
})
{
if (!string.IsNullOrEmpty(DocumentFilePath))
{
dlg.InitialDirectory = Path.GetDirectoryName(DocumentFilePath);
dlg.FileName = Path.GetFileNameWithoutExtension(DocumentFilePath) + EspFeatureCalc.EXT;
}
if (dlg.ShowDialog(this) == DialogResult.Cancel)
return;
try
{
EspFeatureCalc.WriteFeatures(dlg.FileName,
DocumentUI.Molecules.Select(nodePep => nodePep.Peptide.Target), LocalizationHelper.CurrentCulture);
}
catch (IOException x)
{
var message = TextUtil.LineSeparate(string.Format(Resources.SkylineWindow_ShowExportEspFeaturesDialog_Failed_attempting_to_save_ESP_features_to__0__, dlg.FileName),
x.Message);
MessageDlg.ShowWithException(this, message, x);
}
}
}
private void chromatogramsToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowChromatogramFeaturesDialog();
}
public void ShowChromatogramFeaturesDialog()
{
if (!DocumentUI.Settings.HasResults)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowChromatogramFeaturesDialog_The_document_must_have_imported_results_);
return;
}
if (DocumentUI.MoleculeCount == 0)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowChromatogramFeaturesDialog_The_document_must_have_targets_for_which_to_export_chromatograms_);
return;
}
using (var dlg = new ExportChromatogramDlg(DocumentUI, DocumentFilePath))
{
dlg.ShowDialog(this);
}
}
public void ShowReintegrateDialog()
{
RefineMenu.ShowReintegrateDialog();
}
private void mProphetFeaturesMenuItem_Click(object sender, EventArgs e)
{
ShowMProphetFeaturesDialog();
}
public void ShowMProphetFeaturesDialog()
{
if (!DocumentUI.Settings.HasResults)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowMProphetFeaturesDialog_The_document_must_have_imported_results_);
return;
}
if (DocumentUI.MoleculeCount == 0)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowMProphetFeaturesDialog_The_document_must_contain_targets_for_which_to_export_features_);
return;
}
using (var dlg = new MProphetFeaturesDlg(DocumentUI, DocumentFilePath))
{
dlg.ShowDialog(this);
}
}
private void peakBoundariesToolStripMenuItem_Click(object sender, EventArgs e)
{
if (!DocumentUI.Settings.HasResults)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowChromatogramFeaturesDialog_The_document_must_have_imported_results_);
}
using (OpenFileDialog dlg = new OpenFileDialog
{
Title = Resources.SkylineWindow_ImportPeakBoundaries_Import_PeakBoundaries,
CheckPathExists = true
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
ImportPeakBoundariesFile(dlg.FileName);
}
}
}
public void ImportPeakBoundariesFile(string peakBoundariesFile)
{
try
{
long lineCount = Helpers.CountLinesInFile(peakBoundariesFile);
ImportPeakBoundaries(peakBoundariesFile, lineCount, Resources.SkylineWindow_ImportPeakBoundaries_Import_PeakBoundaries);
}
catch (Exception x)
{
MessageDlg.ShowWithException(this,
TextUtil.LineSeparate(
string.Format(Resources.SkylineWindow_ImportPeakBoundariesFile_Failed_reading_the_file__0__,
peakBoundariesFile), x.Message), x);
}
}
private static void AddMessageInfo<T>(IList<MessageInfo> messageInfos, MessageType type, SrmDocument.DOCUMENT_TYPE docType, IEnumerable<T> items)
{
messageInfos.AddRange(items.Select(item => new MessageInfo(type, docType, item)));
}
private void ImportPeakBoundaries(string fileName, long lineCount, string description)
{
var docCurrent = DocumentUI;
SrmDocument docNew = null;
var peakBoundaryImporter = new PeakBoundaryImporter(docCurrent);
using (var longWaitDlg = new LongWaitDlg(this) { Text = description })
{
longWaitDlg.PerformWork(this, 1000, longWaitBroker =>
docNew = peakBoundaryImporter.Import(fileName, longWaitBroker, lineCount));
if (docNew == null)
return;
if (!peakBoundaryImporter.UnrecognizedPeptidesCancel(this))
return;
if (longWaitDlg.IsDocumentChanged(docCurrent))
{
MessageDlg.Show(this, Resources.SkylineWindow_ImportPeakBoundaries_Unexpected_document_change_during_operation);
return;
}
}
ModifyDocument(description, doc =>
{
if (!ReferenceEquals(doc, docCurrent))
throw new InvalidDataException(Resources.SkylineWindow_ImportPeakBoundaries_Unexpected_document_change_during_operation);
return docNew;
}, docPair =>
{
var allInfo = new List<MessageInfo>();
AddMessageInfo(allInfo, MessageType.removed_unrecognized_peptide, docPair.OldDocumentType, peakBoundaryImporter.UnrecognizedPeptides);
AddMessageInfo(allInfo, MessageType.removed_unrecognized_file, docPair.OldDocumentType,
peakBoundaryImporter.UnrecognizedFiles.Select(AuditLogPath.Create));
AddMessageInfo(allInfo, MessageType.removed_unrecognized_charge_state, docPair.OldDocumentType, peakBoundaryImporter.UnrecognizedChargeStates);
return AuditLogEntry.CreateSimpleEntry(MessageType.imported_peak_boundaries, docPair.OldDocumentType,
Path.GetFileName(fileName))
.AppendAllInfo(allInfo);
});
}
private void importFASTAMenuItem_Click(object sender, EventArgs e)
{
using (OpenFileDialog dlg = new OpenFileDialog
{
Title = Resources.SkylineWindow_ImportFastaFile_Import_FASTA,
InitialDirectory = Settings.Default.FastaDirectory,
CheckPathExists = true
// FASTA files often have no extension as well as .fasta and others
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.FastaDirectory = Path.GetDirectoryName(dlg.FileName);
ImportFastaFile(dlg.FileName);
}
}
}
public void ImportFastaFile(string fastaFile)
{
try
{
long lineCount = Helpers.CountLinesInFile(fastaFile);
using (var readerFasta = new StreamReader(fastaFile))
{
ImportFasta(readerFasta, lineCount, false, Resources.SkylineWindow_ImportFastaFile_Import_FASTA, new ImportFastaInfo(true, fastaFile));
}
}
catch (Exception x)
{
MessageDlg.ShowWithException(this, string.Format(Resources.SkylineWindow_ImportFastaFile_Failed_reading_the_file__0__1__,
fastaFile, x.Message), x);
}
}
public class ImportFastaInfo
{
public ImportFastaInfo(bool file, string text)
{
File = file;
Text = text;
}
public bool File { get; private set; }
public string Text { get; private set;}
}
public void ImportFasta(TextReader reader, long lineCount, bool peptideList, string description, ImportFastaInfo importInfo)
{
SrmTreeNode nodePaste = SequenceTree.SelectedNode as SrmTreeNode;
IdentityPath selectPath = null;
var to = nodePaste != null ? nodePaste.Path : null;
var docCurrent = DocumentUI;
ModificationMatcher matcher = null;
if(peptideList)
{
matcher = new ModificationMatcher();
List<string> sequences = new List<string>();
string line;
var header = reader.ReadLine(); // Read past header
while ((line = reader.ReadLine()) != null)
{
string sequence = FastaSequence.NormalizeNTerminalMod(line.Trim());
sequences.Add(sequence);
}
try
{
matcher.CreateMatches(docCurrent.Settings, sequences, Settings.Default.StaticModList, Settings.Default.HeavyModList);
var strNameMatches = matcher.FoundMatches;
if (!string.IsNullOrEmpty(strNameMatches))
{
var message = TextUtil.LineSeparate(Resources.SkylineWindow_ImportFasta_Would_you_like_to_use_the_Unimod_definitions_for_the_following_modifications,
string.Empty, strNameMatches);
if (DialogResult.Cancel == MultiButtonMsgDlg.Show(
this,
string.Format(message), Resources.SkylineWindow_ImportFasta_OK))
{
return;
}
}
}
catch(FormatException x)
{
MessageDlg.ShowException(this, x);
return;
}
reader = new StringReader(TextUtil.LineSeparate(header, TextUtil.LineSeparate(sequences.ToArray())));
}
SrmDocument docNew = null;
int emptyPeptideGroups = 0;
using (var longWaitDlg = new LongWaitDlg(this) { Text = description })
{
IdentityPath nextAdded;
longWaitDlg.PerformWork(this, 1000, longWaitBroker =>
docNew = docCurrent.ImportFasta(reader, longWaitBroker, lineCount, matcher, to, out selectPath, out nextAdded, out emptyPeptideGroups));
if (docNew == null)
return;
if (!ReferenceEquals(Document, docCurrent))
{
MessageDlg.ShowWithException(this, Resources.SkylineWindow_ImportFasta_Unexpected_document_change_during_operation, new DocumentChangedException(Document, docCurrent));
return;
}
}
var entryCreatorList = new AuditLogEntryCreatorList();
// If importing the FASTA produced any childless proteins
docNew = ImportFastaHelper.HandleEmptyPeptideGroups(this, emptyPeptideGroups, docNew, entryCreatorList);
if (docNew == null || Equals(docCurrent, docNew))
return;
selectPath = null;
using (var enumGroupsCurrent = docCurrent.MoleculeGroups.GetEnumerator())
{
// ReSharper disable once PossibleNullReferenceException
foreach (PeptideGroupDocNode nodePepGroup in docNew.MoleculeGroups)
{
if (enumGroupsCurrent.MoveNext() &&
!ReferenceEquals(nodePepGroup, enumGroupsCurrent.Current))
{
selectPath = new IdentityPath(nodePepGroup.Id);
break;
}
}
}
ModifyDocument(description, doc =>
{
if (!ReferenceEquals(doc, docCurrent))
throw new InvalidDataException(
Resources.SkylineWindow_ImportFasta_Unexpected_document_change_during_operation,
new DocumentChangedException(doc, docCurrent));
if (matcher != null)
{
var pepModsNew = matcher.GetDocModifications(docNew);
// ReSharper disable PossibleNullReferenceException
docNew = docNew.ChangeSettings(docNew.Settings.ChangePeptideModifications(mods => pepModsNew));
docNew.Settings.UpdateDefaultModifications(false);
// ReSharper restore PossibleNullReferenceException
}
return docNew;
}, docPair =>
{
if (importInfo == null)
return null;
MessageInfo info;
string extraInfo = null;
if (importInfo.File)
{
info = new MessageInfo(MessageType.imported_fasta, docPair.NewDocumentType, importInfo.Text);
}
else
{
info = new MessageInfo(peptideList
? MessageType.imported_peptide_list
: MessageType.imported_fasta_paste,
docPair.NewDocumentType);
extraInfo = importInfo.Text;
}
return AuditLogEntry.CreateSingleMessageEntry(info, extraInfo)
.Merge(docPair, entryCreatorList);
});
if (selectPath != null)
SequenceTree.SelectedPath = selectPath;
}
/// <summary>
/// More diagnostic information to try to catch cause of failing tests
/// </summary>
public class DocumentChangedException : Exception
{
public DocumentChangedException(SrmDocument docNow, SrmDocument docOriginal)
: base(GetMessage(docNow, docOriginal))
{
}
private static string GetMessage(SrmDocument docNow, SrmDocument docOriginal)
{
// ReSharper disable LocalizableElement
return TextUtil.LineSeparate(string.Format("DocRevision: before = {0}, after = {1}", docOriginal.RevisionIndex, docNow.RevisionIndex),
"Loaded before:", TextUtil.LineSeparate(docOriginal.NonLoadedStateDescriptionsFull),
"Loaded after:", TextUtil.LineSeparate(docNow.NonLoadedStateDescriptionsFull));
// ReSharper restore LocalizableElement
}
}
public void InsertSmallMoleculeTransitionList(string csvText, string description)
{
IdentityPath selectPath = null;
Exception modifyingDocumentException = null;
var transitionCount = 0;
ModifyDocument(description, doc =>
{
try
{
SrmDocument docNew = null;
selectPath = null;
using (var longWaitDlg = new LongWaitDlg(this) {Text = description})
{
var smallMoleculeTransitionListReader = new SmallMoleculeTransitionListCSVReader(MassListInputs.ReadLinesFromText(csvText));
IdentityPath firstAdded;
longWaitDlg.PerformWork(this, 1000,
() => docNew = smallMoleculeTransitionListReader.CreateTargets(doc, null, out firstAdded));
// CONSIDER: cancelable / progress monitor ? This is normally pretty quick.
transitionCount = smallMoleculeTransitionListReader.RowCount - 1;
if (docNew == null)
return doc;
}
using (var enumGroupsCurrent = doc.MoleculeGroups.GetEnumerator())
{
foreach (PeptideGroupDocNode nodePepGroup in docNew.MoleculeGroups)
{
if (enumGroupsCurrent.MoveNext() &&
!ReferenceEquals(nodePepGroup, enumGroupsCurrent.Current))
{
selectPath = new IdentityPath(nodePepGroup.Id);
break;
}
}
}
return docNew;
}
catch (Exception x)
{
modifyingDocumentException = x;
return doc;
}
}, docPair => AuditLogEntry.CreateSingleMessageEntry(new MessageInfo(
transitionCount == 1
? MessageType.pasted_single_small_molecule_transition
: MessageType.pasted_small_molecule_transition_list, docPair.NewDocumentType, transitionCount), csvText));
if (modifyingDocumentException != null)
{
// If the exception is an IOException, we rethrow it in case it has line/col information
if (modifyingDocumentException is IOException)
{
throw modifyingDocumentException;
}
// Otherwise, we wrap the exception to preserve the callstack
throw new AggregateException(modifyingDocumentException);
}
if (selectPath != null)
SequenceTree.SelectedPath = selectPath;
}
private void importAssayLibraryMenuItem_Click(object sender, EventArgs e)
{
using (var dlg = new OpenFileDialog
{
Title = Resources.SkylineWindow_importAssayLibraryMenuItem_Click_Import_Assay_Library,
InitialDirectory = Settings.Default.ActiveDirectory,
CheckPathExists = true,
SupportMultiDottedExtensions = true,
DefaultExt = TextUtil.EXT_CSV,
Filter = TextUtil.FileDialogFiltersAll(TextUtil.FileDialogFilter(
Resources.SkylineWindow_importAssayLibraryMenuItem_Click_Assay_Library, TextUtil.EXT_CSV, TextUtil.EXT_TSV))
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.ActiveDirectory = Path.GetDirectoryName(dlg.FileName);
ImportAssayLibrary(dlg.FileName);
}
}
}
public void ImportAssayLibrary(string fileName)
{
try
{
ImportAssayLibrary(new MassListInputs(fileName), Resources.SkylineWindow_importAssayLibraryMenuItem_Click_Import_Assay_Library);
}
catch (Exception x)
{
MessageDlg.ShowWithException(this, string.Format(Resources.SkylineWindow_ImportFastaFile_Failed_reading_the_file__0__1__, fileName, x.Message), x);
}
}
private void ImportAssayLibrary(MassListInputs inputs, string description)
{
if (DocumentFilePath == null &&
(MultiButtonMsgDlg.Show(this,
Resources.SkylineWindow_ImportAssayLibrary_You_must_save_the_Skyline_document_in_order_to_import_an_assay_library_, MultiButtonMsgDlg.BUTTON_OK) == DialogResult.Cancel ||
!SaveDocumentAs()))
{
return;
}
if (File.Exists(AssayLibraryFileName) &&
MultiButtonMsgDlg.Show(this,
string.Format(Resources.SkylineWindow_ImportAssayLibrary_There_is_an_existing_library_with_the_same_name__0__as_the_document_library_to_be_created__Overwrite_, AssayLibraryName),
MultiButtonMsgDlg.BUTTON_OK) == DialogResult.Cancel)
{
return;
}
else
{
FileEx.SafeDelete(AssayLibraryFileName);
FileEx.SafeDelete(Path.ChangeExtension(AssayLibraryFileName, BiblioSpecLiteSpec.EXT_REDUNDANT));
}
ImportMassList(inputs, description, true);
}
private void importMassListMenuItem_Click(object sender, EventArgs e)
{
using (OpenFileDialog dlg = new OpenFileDialog
{
Title = Resources.SkylineWindow_importMassListMenuItem_Click_Import_Transition_List_title,
InitialDirectory = Settings.Default.ActiveDirectory, // TODO: Better value?
CheckPathExists = true,
SupportMultiDottedExtensions = true,
DefaultExt = TextUtil.EXT_CSV,
Filter = TextUtil.FileDialogFiltersAll(TextUtil.FileDialogFilter(
Resources.SkylineWindow_importMassListMenuItem_Click_Transition_List, TextUtil.EXT_CSV, TextUtil.EXT_TSV)),
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.ActiveDirectory = Path.GetDirectoryName(dlg.FileName);
ImportMassList(dlg.FileName);
}
}
}
public void ImportMassList(string fileName)
{
try
{
ImportMassList(new MassListInputs(fileName), Resources.SkylineWindow_importMassListMenuItem_Click_Import_transition_list, false);
}
catch (Exception x)
{
MessageDlg.ShowWithException(this, string.Format(Resources.SkylineWindow_ImportFastaFile_Failed_reading_the_file__0__1__, fileName, x.Message), x);
}
}
public void ImportMassList(MassListInputs inputs, string description, bool assayLibrary)
{
SrmTreeNode nodePaste = SequenceTree.SelectedNode as SrmTreeNode;
IdentityPath insertPath = nodePaste != null ? nodePaste.Path : null;
IdentityPath selectPath = null;
List<MeasuredRetentionTime> irtPeptides = null;
List<SpectrumMzInfo> librarySpectra = null;
List<TransitionImportErrorInfo> errorList = null;
List<PeptideGroupDocNode> peptideGroups = null;
var docCurrent = DocumentUI;
SrmDocument docNew = null;
// PreImport of mass list
var importer = docCurrent.PreImportMassList(inputs, null);
if (importer == null)
return;
using (var columnDlg = new ImportTransitionListColumnSelectDlg(importer, docCurrent, inputs, insertPath))
{
var result = columnDlg.ShowDialog(this);
if (result == DialogResult.Cancel)
return;
}
using (var longWaitDlg = new LongWaitDlg(this) {Text = description})
{
var status = longWaitDlg.PerformWork(this, 1000, longWaitBroker =>
{
docNew = docCurrent.ImportMassList(inputs, importer, longWaitBroker,
insertPath, out selectPath, out irtPeptides, out librarySpectra, out errorList, out peptideGroups);
});
if (status.IsCanceled)
return;
}
if (assayLibrary)
{
var missingMessage = new List<string>();
if (!irtPeptides.Any())
missingMessage.Add(TextUtil.LineSeparate(Resources.SkylineWindow_ImportMassList_The_file_does_not_contain_iRTs__Valid_column_names_for_iRTs_are_,
TextUtil.LineSeparate(ColumnIndices.IrtColumnNames)));
if (!librarySpectra.Any())
missingMessage.Add(TextUtil.LineSeparate(Resources.SkylineWindow_ImportMassList_The_file_does_not_contain_intensities__Valid_column_names_for_intensities_are_,
TextUtil.LineSeparate(ColumnIndices.LibraryColumnNames)));
if (missingMessage.Any())
{
MessageDlg.Show(this, TextUtil.LineSeparate(missingMessage));
return;
}
}
bool isDocumentSame = ReferenceEquals(docNew, docCurrent);
// If nothing was imported (e.g. operation was canceled or zero error-free transitions) and also no errors, just return
if (isDocumentSame && !errorList.Any())
return;
// Formerly this is where we would show any errors and give the user the option to proceed with just the non-error transitions.
// Now we do that during the import window's close event. This affords the user the additional option of going back and fixing
// any issues like bad column selection rather than having to go through the whole process again.
RetentionTimeRegression retentionTimeRegressionStore;
MassListInputs irtInputs;
if (!ImportMassListIrts(ref docNew, irtPeptides, peptideGroups, librarySpectra, assayLibrary, out irtInputs, out retentionTimeRegressionStore))
return;
BiblioSpecLiteSpec docLibrarySpec = null;
BiblioSpecLiteLibrary docLibrary = null;
var indexOldLibrary = -1;
var entryCreators = new AuditLogEntryCreatorList();
var importIntensities = true;
if (librarySpectra.Any())
{
if (!assayLibrary)
{
var addLibraryMessage = Resources.SkylineWindow_ImportMassList_The_transition_list_appears_to_contain_spectral_library_intensities___Create_a_document_library_from_these_intensities_;
var addLibraryResult = MultiButtonMsgDlg.Show(this, addLibraryMessage,
Resources.SkylineWindow_ImportMassList__Create___, Resources.SkylineWindow_ImportMassList__Skip, true);
if (addLibraryResult == DialogResult.Cancel)
return;
importIntensities = addLibraryResult == DialogResult.Yes;
if(importIntensities)
entryCreators.Add(new MessageInfo(MessageType.imported_spectral_library_intensities, docNew.DocumentType));
}
if (importIntensities && !ImportMassListIntensities(ref docNew, librarySpectra, assayLibrary, out docLibrarySpec, out docLibrary, out indexOldLibrary))
return;
}
ModifyDocument(description, doc =>
{
if (ReferenceEquals(doc, docCurrent))
return docNew;
try
{
// If the document was changed during the operation, try all the changes again
// using the information given by the user.
docCurrent = DocumentUI;
doc = doc.ImportMassList(inputs, importer, insertPath, out selectPath);
if (irtInputs != null)
{
var iRTimporter = doc.PreImportMassList(irtInputs, null);
doc = doc.ImportMassList(irtInputs, iRTimporter, null, out selectPath);
}
var newSettings = doc.Settings;
if (retentionTimeRegressionStore != null)
{
newSettings = newSettings.ChangePeptidePrediction(prediction =>
prediction.ChangeRetentionTime(retentionTimeRegressionStore));
}
if (docLibrarySpec != null)
{
newSettings = newSettings.ChangePeptideLibraries(libs =>
libs.ChangeLibrary(docLibrary, docLibrarySpec, indexOldLibrary));
}
if (!ReferenceEquals(doc.Settings, newSettings))
doc = doc.ChangeSettings(newSettings);
}
catch (Exception x)
{
throw new InvalidDataException(string.Format(Resources.SkylineWindow_ImportMassList_Unexpected_document_change_during_operation___0_, x.Message, x));
}
return doc;
}, docPair =>
{
MessageType msgType;
object[] args;
string extraInfo = null;
// Imported from file
if (inputs.InputFilename != null)
{
msgType = assayLibrary ? MessageType.imported_assay_library_from_file : MessageType.imported_transition_list_from_file;
args = new object[] { AuditLogPath.Create(inputs.InputFilename) };
}
else
{
msgType = assayLibrary ? MessageType.imported_assay_library : MessageType.imported_transition_list;
args = new object[0];
extraInfo = inputs.InputText;
}
return AuditLogEntry.CreateSingleMessageEntry(new MessageInfo(msgType, docPair.NewDocumentType, args), extraInfo).Merge(docPair, entryCreators);
});
if (selectPath != null)
SequenceTree.SelectedPath = selectPath;
if (retentionTimeRegressionStore != null)
{
Settings.Default.RetentionTimeList.Add(retentionTimeRegressionStore);
Settings.Default.RTScoreCalculatorList.Add(retentionTimeRegressionStore.Calculator);
}
if (docLibrarySpec != null)
{
Settings.Default.SpectralLibraryList.Insert(0, docLibrarySpec);
}
}
public string AssayLibraryFileName
{
get
{
var docLib = BiblioSpecLiteSpec.GetLibraryFileName(DocumentFilePath);
// ReSharper disable once AssignNullToNotNullAttribute
return Path.Combine(Path.GetDirectoryName(docLib), Path.GetFileNameWithoutExtension(docLib) + BiblioSpecLiteSpec.ASSAY_NAME + BiblioSpecLiteSpec.EXT);
}
}
public string AssayLibraryName
{
get { return Path.GetFileNameWithoutExtension(DocumentFilePath) + BiblioSpecLiteSpec.ASSAY_NAME; }
}
private bool ImportMassListIrts(ref SrmDocument doc, IEnumerable<MeasuredRetentionTime> irtPeptides,
IEnumerable<PeptideGroupDocNode> peptideGroups, List<SpectrumMzInfo> librarySpectra, bool assayLibrary,
out MassListInputs irtInputs, out RetentionTimeRegression retentionTimeRegressionStore)
{
irtInputs = null;
retentionTimeRegressionStore = null;
var retentionTimeRegression = DocumentUI.Settings.PeptideSettings.Prediction.RetentionTime;
var calcIrt = retentionTimeRegression != null ? retentionTimeRegression.Calculator as RCalcIrt : null;
var dbIrtPeptides = irtPeptides.Select(rt => new DbIrtPeptide(rt.PeptideSequence, rt.RetentionTime, false, TimeSource.scan)).ToList();
if (!assayLibrary)
{
dbIrtPeptides = ImportAssayLibraryHelper.GetUnscoredIrtPeptides(dbIrtPeptides, calcIrt);
// If there are no iRT peptides or none with different values than the database, don't import any iRT's
if (!dbIrtPeptides.Any())
return true;
}
IrtDb db;
if (!assayLibrary)
{
// Ask whether or not to include iRT peptides in the paste
var useIrtMessage = calcIrt == null
? Resources.SkylineWindow_ImportMassList_The_transition_list_appears_to_contain_iRT_values__but_the_document_does_not_have_an_iRT_calculator___Create_a_new_calculator_and_add_these_iRT_values_
: Resources.SkylineWindow_ImportMassList_The_transition_list_appears_to_contain_iRT_library_values___Add_these_iRT_values_to_the_iRT_calculator_;
var yesButton = calcIrt == null
? Resources.SkylineWindow_ImportMassList__Create___
: Resources.SkylineWindow_ImportMassList_Add;
switch (MultiButtonMsgDlg.Show(this, useIrtMessage, yesButton, Resources.SkylineWindow_ImportMassList__Skip, true))
{
case DialogResult.No:
return true;
case DialogResult.Cancel:
return false;
}
if (calcIrt == null)
{
// If there is no iRT calculator, ask the user to create one
using (var dlg = new CreateIrtCalculatorDlg(doc, DocumentFilePath, Settings.Default.RTScoreCalculatorList, peptideGroups))
{
if (dlg.ShowDialog(this) != DialogResult.OK)
return false;
doc = dlg.Document;
calcIrt = (RCalcIrt) doc.Settings.PeptideSettings.Prediction.RetentionTime.Calculator;
dlg.UpdateLists(librarySpectra, dbIrtPeptides);
if (!string.IsNullOrEmpty(dlg.IrtFile))
irtInputs = new MassListInputs(dlg.IrtFile);
}
}
var dbPath = calcIrt.DatabasePath;
db = File.Exists(dbPath) ? IrtDb.GetIrtDb(dbPath, null) : IrtDb.CreateIrtDb(dbPath);
}
else
{
db = IrtDb.CreateIrtDb(AssayLibraryFileName);
var matchingStandards = IrtStandard.BestMatch(librarySpectra);
if (matchingStandards.Count == 1)
{
IrtPeptidePicker.SetStandards(dbIrtPeptides, matchingStandards[0]);
}
else
{
// Ask for standards
using (var dlg = new ChooseIrtStandardPeptidesDlg(doc, DocumentFilePath, dbIrtPeptides, peptideGroups))
{
if (dlg.ShowDialog(this) != DialogResult.OK)
return false;
const double slopeTolerance = 0.05;
var rescale = false;
if (dlg.Regression != null && !(1 - slopeTolerance <= dlg.Regression.Slope && dlg.Regression.Slope <= 1 + slopeTolerance))
{
using (var scaleDlg = new MultiButtonMsgDlg(
Resources.SkylineWindow_ImportMassListIrts_The_standard_peptides_do_not_appear_to_be_on_the_iRT_C18_scale__Would_you_like_to_recalibrate_them_to_this_scale_,
MultiButtonMsgDlg.BUTTON_YES, MultiButtonMsgDlg.BUTTON_NO, false))
{
if (scaleDlg.ShowDialog(this) == DialogResult.Yes)
rescale = true;
}
}
doc = dlg.Document;
dlg.UpdateLists(librarySpectra, dbIrtPeptides, rescale);
if (!string.IsNullOrEmpty(dlg.IrtFile))
irtInputs = new MassListInputs(dlg.IrtFile);
}
}
var calculator = new RCalcIrt(AssayLibraryName, AssayLibraryFileName);
// CONSIDER: Probably can't use just a static default like 10 below
retentionTimeRegression = new RetentionTimeRegression(calculator.Name, calculator, null, null, RetentionTimeRegression.DEFAULT_WINDOW, new List<MeasuredRetentionTime>());
doc = doc.ChangeSettings(doc.Settings.ChangePeptidePrediction(prediction => prediction.ChangeRetentionTime(retentionTimeRegression)));
}
var oldPeptides = db.GetPeptides().ToList();
IList<DbIrtPeptide.Conflict> conflicts;
dbIrtPeptides = DbIrtPeptide.MakeUnique(dbIrtPeptides);
DbIrtPeptide.FindNonConflicts(oldPeptides, dbIrtPeptides, null, out conflicts);
// Ask whether to keep or overwrite peptides that are present in the import and already in the database
var overwriteExisting = false;
if (conflicts.Any())
{
var messageOverwrite = string.Format(Resources.SkylineWindow_ImportMassList_The_iRT_calculator_already_contains__0__of_the_imported_peptides_, conflicts.Count);
var overwriteResult = MultiButtonMsgDlg.Show(this,
TextUtil.LineSeparate(messageOverwrite, conflicts.Count == 1
? Resources.SkylineWindow_ImportMassList_Keep_the_existing_iRT_value_or_overwrite_with_the_imported_value_
: Resources.SkylineWindow_ImportMassList_Keep_the_existing_iRT_values_or_overwrite_with_imported_values_),
Resources.SkylineWindow_ImportMassList__Keep, Resources.SkylineWindow_ImportMassList__Overwrite,
true);
if (overwriteResult == DialogResult.Cancel)
return false;
overwriteExisting = overwriteResult == DialogResult.No;
}
using (var longWaitDlg = new LongWaitDlg(this) { Text = Resources.SkylineWindow_ImportMassList_Adding_iRT_values_ })
{
var newDoc = doc;
longWaitDlg.PerformWork(this, 100, progressMonitor => newDoc = newDoc.AddIrtPeptides(dbIrtPeptides, overwriteExisting, progressMonitor));
doc = newDoc;
}
if (doc == null)
return false;
retentionTimeRegressionStore = doc.Settings.PeptideSettings.Prediction.RetentionTime;
return true;
}
private bool ImportMassListIntensities(ref SrmDocument doc, List<SpectrumMzInfo> librarySpectra, bool assayLibrary,
out BiblioSpecLiteSpec docLibrarySpec, out BiblioSpecLiteLibrary docLibrary, out int indexOldLibrary)
{
docLibrarySpec = null;
docLibrary = null;
indexOldLibrary = -1;
// Can't name a library after the document if the document is unsaved
// In this case, prompt to save
if (DocumentFilePath == null &&
(MultiButtonMsgDlg.Show(this,
Resources.SkylineWindow_ImportMassList_You_must_save_the_Skyline_document_in_order_to_create_a_spectral_library_from_a_transition_list_,
MultiButtonMsgDlg.BUTTON_OK) == DialogResult.Cancel ||
!SaveDocumentAs()))
{
return false;
}
librarySpectra = SpectrumMzInfo.RemoveDuplicateSpectra(librarySpectra);
indexOldLibrary = doc.Settings.PeptideSettings.Libraries.LibrarySpecs.IndexOf(spec => spec != null && spec.FilePath == AssayLibraryFileName);
var libraryLinkedToDoc = indexOldLibrary != -1;
if (libraryLinkedToDoc)
{
var oldName = doc.Settings.PeptideSettings.Libraries.LibrarySpecs[indexOldLibrary].Name;
var libraryOld = doc.Settings.PeptideSettings.Libraries.GetLibrary(oldName);
var additionalSpectra = SpectrumMzInfo.GetInfoFromLibrary(libraryOld);
additionalSpectra = SpectrumMzInfo.RemoveDuplicateSpectra(additionalSpectra);
librarySpectra = SpectrumMzInfo.MergeWithOverwrite(librarySpectra, additionalSpectra);
foreach (var stream in libraryOld.ReadStreams)
stream.CloseStream();
}
var libraryExists = File.Exists(AssayLibraryFileName);
if (!assayLibrary && libraryExists && !libraryLinkedToDoc)
{
var replaceLibraryMessage = string.Format(Resources.SkylineWindow_ImportMassList_There_is_an_existing_library_with_the_same_name__0__as_the_document_library_to_be_created___Overwrite_this_library_or_skip_import_of_library_intensities_, AssayLibraryName);
// If the document does not have an assay library linked to it, then ask if user wants to delete the one that we have found
var replaceLibraryResult = MultiButtonMsgDlg.Show(this, replaceLibraryMessage,
Resources.SkylineWindow_ImportMassList__Overwrite, Resources.SkylineWindow_ImportMassList__Skip, true);
if (replaceLibraryResult == DialogResult.Cancel)
return false;
if (replaceLibraryResult == DialogResult.No)
librarySpectra.Clear();
}
if (!librarySpectra.Any())
return true;
// Delete the existing library; either it's not tied to the document or we've already extracted the spectra
if (!assayLibrary && libraryExists)
{
FileEx.SafeDelete(AssayLibraryFileName);
FileEx.SafeDelete(Path.ChangeExtension(AssayLibraryFileName, BiblioSpecLiteSpec.EXT_REDUNDANT));
}
using (var blibDb = BlibDb.CreateBlibDb(AssayLibraryFileName))
{
docLibrarySpec = new BiblioSpecLiteSpec(AssayLibraryName ?? Path.GetFileNameWithoutExtension(AssayLibraryFileName), AssayLibraryFileName);
using (var longWaitDlg = new LongWaitDlg(this) { Text = Resources.SkylineWindow_ImportMassListIntensities_Creating_Spectral_Library })
{
var docNew = doc;
BiblioSpecLiteLibrary docLibraryNew = null;
var docLibrarySpec2 = docLibrarySpec;
var indexOldLibrary2 = indexOldLibrary;
longWaitDlg.PerformWork(this, 1000, progressMonitor =>
{
IProgressStatus status = new ProgressStatus(Resources .BlibDb_CreateLibraryFromSpectra_Creating_spectral_library_for_imported_transition_list);
docLibraryNew = blibDb.CreateLibraryFromSpectra(docLibrarySpec2, librarySpectra, AssayLibraryName ?? Path.GetFileNameWithoutExtension(AssayLibraryFileName), progressMonitor, ref status);
if (docLibraryNew == null)
return;
var newSettings = docNew.Settings.ChangePeptideLibraries(libs => libs.ChangeLibrary(docLibraryNew, docLibrarySpec2, indexOldLibrary2));
progressMonitor.UpdateProgress(status = status.ChangeMessage(Resources.SkylineWindow_ImportMassList_Finishing_up_import).ChangePercentComplete(0));
docNew = docNew.ChangeSettings(newSettings, new SrmSettingsChangeMonitor(progressMonitor, Resources.LibraryManager_LoadBackground_Updating_library_settings_for__0_, status));
});
doc = docNew;
docLibrary = docLibraryNew;
if (docLibrary == null)
return false;
}
}
return true;
}
private void importDocumentMenuItem_Click(object sender, EventArgs e)
{
using (OpenFileDialog dlg = new OpenFileDialog
{
Title = Resources.SkylineWindow_importDocumentMenuItem_Click_Import_Skyline_Document,
InitialDirectory = Settings.Default.ActiveDirectory,
CheckPathExists = true,
Multiselect = true,
SupportMultiDottedExtensions = true,
DefaultExt = SrmDocument.EXT,
Filter = TextUtil.FileDialogFiltersAll(SrmDocument.FILTER_DOC),
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
try
{
ImportFiles(dlg.FileNames);
}
catch (Exception x)
{
var failedImportingFiles = TextUtil.LineSeparate(Resources.SkylineWindow_importDocumentMenuItem_Click_Failed_importing_files, string.Empty,
TextUtil.LineSeparate(dlg.FileNames), string.Empty, x.Message);
string message = dlg.FileNames.Length == 1
? string.Format(Resources.SkylineWindow_importDocumentMenuItem_Click_Failed_importing_file__0__1__, dlg.FileNames[0], x.Message)
: failedImportingFiles;
MessageDlg.ShowWithException(this, message, x);
}
}
}
}
public void ImportFiles(params string[] filePaths)
{
var resultsAction = MeasuredResults.MergeAction.remove;
var mergePeptides = false;
var entryCreatorList = new AuditLogEntryCreatorList();
if (MeasuredResults.HasResults(filePaths))
{
using (var dlgResults = new ImportDocResultsDlg(!string.IsNullOrEmpty(DocumentFilePath)))
{
if (dlgResults.ShowDialog(this) != DialogResult.OK)
return;
resultsAction = dlgResults.Action;
mergePeptides = dlgResults.IsMergePeptides;
entryCreatorList.Add(dlgResults.FormSettings.EntryCreator);
}
}
SrmTreeNode nodeSel = SequenceTree.SelectedNode as SrmTreeNode;
IdentityPath selectPath = null;
var docCurrent = DocumentUI;
SrmDocument docNew = null;
using (var longWaitDlg = new LongWaitDlg(this)
{
Text = Resources.SkylineWindow_ImportFiles_Import_Skyline_document_data,
})
{
longWaitDlg.PerformWork(this, 1000, longWaitBroker =>
docNew = ImportFiles(docCurrent,
longWaitBroker,
filePaths,
resultsAction,
mergePeptides,
nodeSel != null ? nodeSel.Path : null,
out selectPath));
if (docNew == null || ReferenceEquals(docNew, docCurrent))
return;
if (longWaitDlg.IsDocumentChanged(docCurrent))
{
MessageDlg.Show(this, Resources.SkylineWindow_ImportFasta_Unexpected_document_change_during_operation);
return;
}
}
ModifyDocument(Resources.SkylineWindow_ImportFiles_Import_Skyline_document_data, doc =>
{
docNew.ValidateResults();
if (!ReferenceEquals(doc, docCurrent))
throw new InvalidDataException(Resources
.SkylineWindow_ImportFasta_Unexpected_document_change_during_operation);
return docNew;
}, docPair =>
{
var entry = AuditLogEntry.CreateCountChangeEntry(MessageType.imported_doc,
MessageType.imported_docs, docPair.NewDocumentType, filePaths.Select(AuditLogPath.Create), filePaths.Length,
MessageArgs.DefaultSingular, null);
if (filePaths.Length > 1)
entry.AppendAllInfo(filePaths.Select(file =>
new MessageInfo(MessageType.imported_doc, docPair.NewDocumentType, AuditLogPath.Create(file))));
return entry.Merge(docPair, entryCreatorList, false);
});
if (selectPath != null)
SequenceTree.SelectedPath = selectPath;
}
private SrmDocument ImportFiles(SrmDocument docOrig,
ILongWaitBroker longWaitBroker,
IList<string> filePaths,
MeasuredResults.MergeAction resultsAction,
bool mergePeptides,
IdentityPath to,
out IdentityPath firstAdded)
{
firstAdded = null;
var docResult = docOrig;
int filesRead = 0;
// Add files in reverse order, so their nodes will end up in the right order.
IdentityPath first = null;
foreach (var filePath in filePaths)
{
if (longWaitBroker != null)
{
if (longWaitBroker.IsCanceled || longWaitBroker.IsDocumentChanged(docOrig))
return docOrig;
longWaitBroker.ProgressValue = filesRead*100/filePaths.Count;
longWaitBroker.Message = string.Format(Resources.SkylineWindow_ImportFiles_Importing__0__, Path.GetFileName(filePath));
}
using (var reader = new StreamReader(PathEx.SafePath(filePath)))
{
IdentityPath firstAddedForFile, nextAdd;
docResult = docResult.ImportDocumentXml(reader,
filePath,
resultsAction,
mergePeptides,
FindSpectralLibrary,
Settings.Default.StaticModList,
Settings.Default.HeavyModList,
to,
out firstAddedForFile,
out nextAdd,
false);
// Add the next document at the specified location
to = nextAdd;
// Store the first added node only for the first document
if (first == null)
first = firstAddedForFile;
}
filesRead++;
}
firstAdded = first;
return docResult;
}
public string FindSpectralLibrary(string libraryName, string fileName)
{
string result = null;
RunUIAction(() =>
{
using (var dlg = new MissingFileDlg
{
ItemName = libraryName,
FileHint = fileName,
ItemType = Resources.SkylineWindow_ConnectLibrarySpecs_Spectral_Library,
Title = Resources.SkylineWindow_ConnectLibrarySpecs_Find_Spectral_Library
})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
result = dlg.FilePath;
}
});
return result;
}
private void importResultsMenuItem_Click(object sender, EventArgs e)
{
if (ImportingResultsWindow != null)
{
ShowAllChromatogramsGraph();
return;
}
ImportResults();
}
public void ImportResults()
{
if (DocumentUI.MoleculeTransitionCount == 0)
{
MessageDlg.Show(this, Resources.SkylineWindow_ImportResults_You_must_add_at_least_one_target_transition_before_importing_results_);
return;
}
if (!CheckDocumentExists(Resources.SkylineWindow_ImportResults_You_must_save_this_document_before_importing_results))
{
return;
}
var entryCreatorList = new AuditLogEntryCreatorList();
if (!CheckRetentionTimeFilter(DocumentUI, entryCreatorList))
{
return;
}
var missingIrtPeptides = CheckMissingIrtPeptides(DocumentUI).ToArray();
if (missingIrtPeptides.Any())
{
var numStandards = RCalcIrt.IrtPeptides(DocumentUI).Count();
var numDocument = numStandards - missingIrtPeptides.Length;
var numRequired = RCalcIrt.MinStandardCount(numStandards);
var message = TextUtil.LineSeparate(
Resources.SkylineWindow_ImportResults_The_following_iRT_standard_peptides_are_missing_from_the_document_,
string.Empty,
TextUtil.LineSeparate(missingIrtPeptides.Select(t=>t.ToString())),
string.Empty,
string.Format(Resources.SkylineWindow_ImportResults_With__0__standard_peptides___1__are_required_with_a_correlation_of__2__,
numStandards, numRequired, RCalcIrt.MIN_IRT_TO_TIME_CORRELATION));
if (numDocument < numRequired)
{
message = TextUtil.LineSeparate(
message,
numDocument > 0
? string.Format(Resources.SkylineWindow_ImportResults_The_document_only_contains__0__of_these_iRT_standard_peptides_, numDocument)
: Resources.SkylineWindow_ImportResults_The_document_does_not_contain_any_of_these_iRT_standard_peptides_,
string.Empty,
Resources.SkylineWindow_ImportResults_Add_missing_iRT_standard_peptides_to_your_document_or_change_the_retention_time_predictor_);
MessageDlg.Show(this, message);
return;
}
else
{
var numExceptions = numDocument - numRequired;
message = TextUtil.LineSeparate(
message,
string.Format(Resources.SkylineWindow_ImportResults_The_document_contains__0__of_these_iRT_standard_peptides_, numDocument),
numExceptions > 0
? string.Format(Resources.SkylineWindow_ImportResults_A_maximum_of__0__may_be_missing_and_or_outliers_for_a_successful_import_, numExceptions)
: Resources.SkylineWindow_ImportResults_None_may_be_missing_or_outliers_for_a_successful_import_,
string.Empty,
Resources.SkylineWindow_ImportResults_Do_you_want_to_continue_);
using (var dlg = new MultiButtonMsgDlg(message, MultiButtonMsgDlg.BUTTON_YES, MultiButtonMsgDlg.BUTTON_NO, false))
{
if (dlg.ShowDialog(this) == DialogResult.No)
return;
}
}
}
var decoyGroup = DocumentUI.PeptideGroups.FirstOrDefault(group => group.IsDecoy);
if (decoyGroup != null)
{
decoyGroup.CheckDecoys(DocumentUI, out var numNoSource, out var numWrongTransitionCount, out var proportionDecoysMatch);
if ((!decoyGroup.ProportionDecoysMatch.HasValue && proportionDecoysMatch <= 0.99) || // over 99% of decoys must match targets if proportion is not set
(decoyGroup.ProportionDecoysMatch.HasValue && proportionDecoysMatch < decoyGroup.ProportionDecoysMatch)) // proportion of decoys matching targets has decreased since generation
{
var sb = new StringBuilder();
sb.AppendLine(decoyGroup.PeptideCount == 1
? Resources.SkylineWindow_ImportResults_The_document_contains_a_decoy_that_does_not_match_the_targets_
: string.Format(Resources.SkylineWindow_ImportResults_The_document_contains_decoys_that_do_not_match_the_targets__Out_of__0__decoys_, decoyGroup.PeptideCount));
sb.AppendLine(string.Empty);
if (numNoSource == 1)
sb.AppendLine(Resources.SkylineWindow_ImportResults_1_decoy_does_not_have_a_matching_target);
else if (numNoSource > 1)
sb.AppendLine(string.Format(Resources.SkylineWindow_ImportResults__0__decoys_do_not_have_a_matching_target, numNoSource));
if (numWrongTransitionCount == 1)
sb.AppendLine(Resources.SkylineWindow_ImportResults_1_decoy_does_not_have_the_same_number_of_transitions_as_its_matching_target);
else if (numWrongTransitionCount > 0)
sb.AppendLine(string.Format(Resources.SkylineWindow_ImportResults__0__decoys_do_not_have_the_same_number_of_transitions_as_their_matching_targets, numWrongTransitionCount));
sb.AppendLine(string.Empty);
sb.AppendLine(Resources.SkylineWindow_ImportResults_Do_you_want_to_generate_new_decoys_or_continue_with_the_current_decoys_);
using (var dlg = new MultiButtonMsgDlg(sb.ToString(),
Resources.SkylineWindow_ImportResults_Generate, Resources.SkylineWindow_ImportResults_Continue, true))
{
switch (dlg.ShowDialog(this))
{
case DialogResult.Yes:
if (!ShowGenerateDecoysDlg(dlg))
return;
break;
case DialogResult.No:
using (var dlg2 = new MultiButtonMsgDlg(
Resources.SkylineWindow_ImportResults_Are_you_sure__Peak_scoring_models_trained_with_non_matching_targets_and_decoys_may_produce_incorrect_results_,
MultiButtonMsgDlg.BUTTON_YES, MultiButtonMsgDlg.BUTTON_NO, false))
{
if (dlg2.ShowDialog(dlg) == DialogResult.No)
return;
}
break;
case DialogResult.Cancel:
return;
}
}
}
}
using (ImportResultsDlg dlg = new ImportResultsDlg(DocumentUI, DocumentFilePath))
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
// No idea how this could happen, but it has caused unexpected errors
// so just return and do nothing if it does.
if (dlg.NamedPathSets == null)
{
// throw new NullReferenceException("Unexpected null path sets in ImportResults.");
return;
}
var namedResults = dlg.NamedPathSets.ToList();
string description = Resources.SkylineWindow_ImportResults_Import_results;
if (namedResults.Count == 1)
description = string.Format(Resources.SkylineWindow_ImportResults_Import__0__, namedResults[0].Key);
// Check with user for Waters lockmass settings if any, results written to Settings.Default
// If lockmass correction is desired, MsDataFileUri values in namedResults are modified by this call.
if (!ImportResultsLockMassDlg.UpdateNamedResultsParameters(this, DocumentUI, ref namedResults))
return; // User cancelled, no change
ModifyDocument(description,
doc => ImportResults(doc, namedResults, dlg.OptimizationName),
docPair => dlg.FormSettings.EntryCreator.Create(docPair).Merge(docPair, entryCreatorList));
// Select the first replicate to which results were added.
if (ComboResults.Visible)
ComboResults.SelectedItem = namedResults[0].Key;
}
}
}
/// <summary>
/// If the Transition Full Scan settings are such that the time window for extracting
/// chromatograms depends on a set of replicates, then this function shows the
/// ChooseSchedulingReplicatesDlg.
/// Returns false if the user cancels the dialog, or cannot import chromatograms.
/// </summary>
public bool CheckRetentionTimeFilter(SrmDocument document, AuditLogEntryCreatorList creatorList)
{
var settings = document.Settings;
var fullScan = settings.TransitionSettings.FullScan;
if (!fullScan.IsEnabled)
{
return true;
}
if (fullScan.RetentionTimeFilterType != RetentionTimeFilterType.scheduling_windows)
{
return true;
}
if (!fullScan.IsEnabledMsMs && !document.MoleculeTransitions.Any(transition => transition.IsMs1))
{
return true;
}
var prediction = settings.PeptideSettings.Prediction;
if (prediction.RetentionTime != null && prediction.RetentionTime.IsAutoCalculated)
{
return true;
}
bool anyImportedResults = settings.HasResults && settings.MeasuredResults.Chromatograms.Any();
bool canChooseReplicatesForCalibration = anyImportedResults &&
(prediction.UseMeasuredRTs ||
prediction.RetentionTime != null &&
prediction.RetentionTime.IsAutoCalculated);
if (null == prediction.RetentionTime)
{
if (!prediction.UseMeasuredRTs || !anyImportedResults)
{
MessageDlg.Show(this, Resources.SkylineWindow_CheckRetentionTimeFilter_NoPredictionAlgorithm);
return false;
}
}
else if (!prediction.RetentionTime.IsUsable)
{
if (!canChooseReplicatesForCalibration)
{
if (MessageBox.Show(this, Resources.SkylineWindow_CheckRetentionTimeFilter_NoReplicatesAvailableForPrediction,
Program.Name, MessageBoxButtons.OKCancel) == DialogResult.Cancel)
{
return false;
}
}
}
if (!canChooseReplicatesForCalibration)
{
return true;
}
using (var dlg = new ChooseSchedulingReplicatesDlg(this))
{
var ok = dlg.ShowDialog(this) == DialogResult.OK;
if(ok)
creatorList.Add(dlg.FormSettings.EntryCreator);
return ok;
}
}
private static IEnumerable<Target> CheckMissingIrtPeptides(SrmDocument document)
{
var existingPeptides = new LibKeyIndex(document.Molecules.Select(pep=>new LibKey(pep.ModifiedTarget, Adduct.EMPTY).LibraryKey));
return RCalcIrt.IrtPeptides(document)
.Where(target => !existingPeptides.ItemsMatching(new LibKey(target, Adduct.EMPTY).LibraryKey, false).Any());
}
public SrmDocument ImportResults(SrmDocument doc, List<KeyValuePair<string, MsDataFileUri[]>> namedResults, string optimize)
{
OptimizableRegression optimizationFunction = doc.Settings.TransitionSettings.Prediction.GetOptimizeFunction(optimize);
if (namedResults.Count == 1)
return ImportResults(doc, namedResults[0].Key, namedResults[0].Value, optimizationFunction);
// Add all chosen files as separate result sets.
var results = doc.Settings.MeasuredResults;
var listChrom = new List<ChromatogramSet>();
if (results != null)
listChrom.AddRange(results.Chromatograms);
foreach (var namedResult in namedResults)
{
string nameResult = namedResult.Key;
// Skip results that have already been loaded.
if (GetChromatogramByName(nameResult, results) != null)
continue;
// Delete caches that will be overwritten
FileEx.SafeDelete(ChromatogramCache.FinalPathForName(DocumentFilePath, nameResult), true);
listChrom.Add(new ChromatogramSet(nameResult, namedResult.Value, Annotations.EMPTY, optimizationFunction));
}
var arrayChrom = listChrom.ToArray();
if (arrayChrom.Length == 0)
{
results = null;
}
else
{
if (results == null)
{
results = new MeasuredResults(arrayChrom);
}
else
{
results = results.ChangeChromatograms(arrayChrom);
}
}
if (results != null && Program.DisableJoining)
results = results.ChangeIsJoiningDisabled(true);
return doc.ChangeMeasuredResults(results);
}
private SrmDocument ImportResults(SrmDocument doc, string nameResult, IEnumerable<MsDataFileUri> dataSources,
OptimizableRegression optimizationFunction)
{
var results = doc.Settings.MeasuredResults;
var chrom = GetChromatogramByName(nameResult, results);
if (chrom == null)
{
// If the chromatogram, is not in the current set, then delete the cache
// file to make sure it is not on disk before starting.
FileEx.SafeDelete(ChromatogramCache.FinalPathForName(DocumentFilePath, nameResult), true);
chrom = new ChromatogramSet(nameResult, dataSources, Annotations.EMPTY, optimizationFunction);
if (results == null)
results = new MeasuredResults(new[] {chrom});
else
{
// Add the new result to the end.
var listChrom = new List<ChromatogramSet>(results.Chromatograms) {chrom};
results = results.ChangeChromatograms(listChrom.ToArray());
}
}
else
{
// Append to an existing chromatogram set
var dataFilePaths = new List<MsDataFileUri>(chrom.MSDataFilePaths);
foreach (var sourcePath in dataSources)
{
if (!dataFilePaths.Contains(sourcePath))
dataFilePaths.Add(sourcePath);
}
// If no new paths added, just return without changing.
if (dataFilePaths.Count == chrom.FileCount)
return doc;
int replaceIndex = results.Chromatograms.IndexOf(chrom);
var arrayChrom = results.Chromatograms.ToArray();
arrayChrom[replaceIndex] = chrom.ChangeMSDataFilePaths(dataFilePaths);
results = results.ChangeChromatograms(arrayChrom);
}
if (results != null && Program.DisableJoining)
results = results.ChangeIsJoiningDisabled(true);
return doc.ChangeMeasuredResults(results);
}
private static ChromatogramSet GetChromatogramByName(string name, MeasuredResults results)
{
return (results == null ? null :
results.Chromatograms.FirstOrDefault(set => Equals(name, set.Name)));
}
public void ManageResults()
{
var documentUI = DocumentUI;
if (!documentUI.Settings.HasResults && !documentUI.Settings.HasDocumentLibrary)
{
MessageDlg.Show(this, Resources.SkylineWindow_ManageResults_The_document_must_contain_mass_spec_data_to_manage_results_);
return;
}
using (ManageResultsDlg dlg = new ManageResultsDlg(this, _libraryManager))
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
// Remove from the cache chromatogram data to be reimported. This done before changing
// anything else to avoid making other changes to the results cause cache changes before
// the document is saved.
try
{
ReimportChromatograms(documentUI, dlg.ReimportChromatograms);
}
catch (Exception exception)
{
MessageDlg.ShowWithException(this, Resources.SkylineWindow_ManageResults_A_failure_occurred_attempting_to_reimport_results, exception);
}
// And update the document to reflect real changes to the results structure
ModifyDocument(Resources.SkylineWindow_ManageResults_Manage_results, doc =>
{
if (dlg.IsRemoveAllLibraryRuns)
{
doc = doc.ChangeSettings(doc.Settings.ChangePeptideLibraries(lib =>
{
var libSpecs = new List<LibrarySpec>(lib.LibrarySpecs);
var libs = new List<Library>(lib.Libraries);
for (int i = 0; i < libSpecs.Count; i++)
{
if (libSpecs[i].IsDocumentLibrary || libSpecs[i] is MidasLibSpec)
{
libSpecs.RemoveAt(i);
libs.RemoveAt(i);
}
}
return lib.ChangeDocumentLibrary(false)
.ChangeLibraries(libSpecs.ToArray(), libs.ToArray());
}));
}
else if (dlg.LibraryRunsRemovedList.Count > 0)
{
var releaseLibraries = false;
BiblioSpecLiteLibrary docBlib;
if (DocumentUI.Settings.PeptideSettings.Libraries.TryGetDocumentLibrary(out docBlib))
{
try
{
docBlib.DeleteDataFiles(dlg.LibraryRunsRemovedList.ToArray(), this);
releaseLibraries = true;
}
catch (Exception x)
{
throw new IOException(TextUtil.LineSeparate(Resources.SkylineWindow_ManageResults_Failed_to_remove_library_runs_from_the_document_library_, x.Message));
}
}
foreach (var midasLib in DocumentUI.Settings.PeptideSettings.Libraries.MidasLibraries)
{
try
{
midasLib.RemoveResultsFiles(dlg.LibraryRunsRemovedList.ToArray());
releaseLibraries = true;
}
catch (Exception x)
{
throw new IOException(TextUtil.LineSeparate(Resources.SkylineWindow_ManageResults_Failed_to_remove_library_runs_from_the_MIDAS_library_, x.Message));
}
}
if (releaseLibraries)
{
var libSpecs = dlg.DocumentLibrarySpecs.ToArray();
var libSpecNames = libSpecs.Select(libSpec => libSpec.Name);
_libraryManager.ReleaseLibraries(libSpecs);
var settings = doc.Settings.ChangePeptideLibraries(lib =>
{
var listLib = new List<Library>(lib.Libraries);
var i = lib.LibrarySpecs.IndexOf(spec => libSpecNames.Contains(spec.Name));
if (i != -1)
listLib[i] = null;
return lib.ChangeLibraries(listLib);
});
doc = doc.ChangeSettings(settings);
}
}
var results = doc.Settings.MeasuredResults;
var listChrom = dlg.Chromatograms.ToArray();
if (results == null && listChrom.Length == 0)
return doc;
// Set HasMidasSpectra = false for file infos
listChrom = MidasLibrary.UnflagFiles(listChrom, dlg.LibraryRunsRemovedList.Select(Path.GetFileName)).ToArray();
if (ArrayUtil.ReferencesEqual(results?.Chromatograms, listChrom))
return doc;
MeasuredResults resultsNew = null;
if (listChrom.Length > 0)
{
if (results == null)
resultsNew = new MeasuredResults(listChrom);
else
resultsNew = results.ChangeChromatograms(listChrom.ToArray());
}
doc = doc.ChangeMeasuredResults(resultsNew);
doc.ValidateResults();
return doc;
}, dlg.FormSettings.EntryCreator.Create);
// Modify document will have closed the streams by now. So, it is safe to delete the files.
if (dlg.IsRemoveAllLibraryRuns)
{
try
{
string docLibPath = BiblioSpecLiteSpec.GetLibraryFileName(DocumentFilePath);
FileEx.SafeDelete(docLibPath);
string redundantDocLibPath = BiblioSpecLiteSpec.GetRedundantName(docLibPath);
FileEx.SafeDelete(redundantDocLibPath);
string docLibCachePath = BiblioSpecLiteLibrary.GetLibraryCachePath(docLibPath);
FileEx.SafeDelete(docLibCachePath);
string midasLibPath = MidasLibSpec.GetLibraryFileName(DocumentFilePath);
FileEx.SafeDelete(midasLibPath);
}
catch (FileEx.DeleteException deleteException)
{
MessageDlg.ShowException(this, deleteException);
}
}
}
}
}
public void ReimportChromatograms(SrmDocument document, IEnumerable<ChromatogramSet> chromatogramSets)
{
var setReimport = new HashSet<ChromatogramSet>(chromatogramSets);
if (setReimport.Count == 0)
return;
new LongOperationRunner
{
ParentControl = this,
JobTitle = Resources.SkylineWindow_ReimportChromatograms_Reimporting_chromatograms
}
.Run(longWaitBroker =>
{
// Remove all replicates to be re-imported
var results = document.Settings.MeasuredResults;
var chromRemaining = results.Chromatograms.Where(chrom => !setReimport.Contains(chrom)).ToArray();
MeasuredResults resultsNew = results.ChangeChromatograms(chromRemaining);
if (chromRemaining.Length > 0)
{
// Optimize the cache using this reduced set to remove their data from the cache
resultsNew = resultsNew.OptimizeCache(DocumentFilePath, _chromatogramManager.StreamManager, longWaitBroker);
}
else
{
// Or remove the cache entirely, if everything is being reimported
foreach (var readStream in results.ReadStreams)
readStream.CloseStream();
string cachePath = ChromatogramCache.FinalPathForName(DocumentFilePath, null);
FileEx.SafeDelete(cachePath, true);
}
// Restore the original set unchanged
resultsNew = resultsNew.ChangeChromatograms(results.Chromatograms);
// Update the document without adding an undo record, because the only information
// to change should be cache related.
SrmDocument docNew, docCurrent;
do
{
docCurrent = Document;
docNew = docCurrent.ChangeMeasuredResults(resultsNew);
} while (!SetDocument(docNew, docCurrent));
});
}
private void importPeptideSearchMenuItem_Click(object sender, EventArgs e)
{
ShowImportPeptideSearchDlg();
}
public void ShowImportPeptideSearchDlg(ImportPeptideSearchDlg.Workflow? workflowType)
{
if (!CheckDocumentExists(Resources.SkylineWindow_ShowImportPeptideSearchDlg_You_must_save_this_document_before_importing_a_peptide_search_))
{
return;
}
else if (!Document.IsLoaded)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowImportPeptideSearchDlg_The_document_must_be_fully_loaded_before_importing_a_peptide_search_);
return;
}
using (var dlg = !workflowType.HasValue
? new ImportPeptideSearchDlg(this, _libraryManager)
: new ImportPeptideSearchDlg(this, _libraryManager, workflowType.Value))
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
// Nothing to do; the dialog does all the work.
}
}
}
public void ShowImportPeptideSearchDlg()
{
ShowImportPeptideSearchDlg(null);
}
private bool CheckDocumentExists(String errorMsg)
{
if (string.IsNullOrEmpty(DocumentFilePath))
{
if (MultiButtonMsgDlg.Show(this,errorMsg,Resources.OK) == DialogResult.Cancel)
return false;
if (!SaveDocument())
return false;
}
return true;
}
private void publishMenuItem_Click(object sender, EventArgs e)
{
ShowPublishDlg(null);
}
public void ShowPublishDlg(IPanoramaPublishClient publishClient)
{
if (publishClient == null)
publishClient = new WebPanoramaPublishClient();
var document = DocumentUI;
if (!document.IsLoaded)
{
MessageDlg.Show(this, Resources.SkylineWindow_ShowPublishDlg_The_document_must_be_fully_loaded_before_it_can_be_uploaded_);
return;
}
string fileName = DocumentFilePath;
if (string.IsNullOrEmpty(fileName))
{
if (MessageBox.Show(this, Resources.SkylineWindow_ShowPublishDlg_The_document_must_be_saved_before_it_can_be_uploaded_,
Program.Name, MessageBoxButtons.OKCancel) == DialogResult.Cancel)
return;
if (!SaveDocumentAs())
return;
fileName = DocumentFilePath;
}
if (!SaveDocument())
return;
var servers = Settings.Default.ServerList;
if (servers.Count == 0)
{
DialogResult buttonPress = MultiButtonMsgDlg.Show(
this,
TextUtil.LineSeparate(
Resources.SkylineWindow_ShowPublishDlg_There_are_no_Panorama_servers_to_upload_to,
Resources.SkylineWindow_ShowPublishDlg_Press_Register_to_register_for_a_project_on_PanoramaWeb_,
Resources.SkylineWindow_ShowPublishDlg_Press_Continue_to_use_the_server_of_your_choice_),
Resources.SkylineWindow_ShowPublishDlg_Register, Resources.SkylineWindow_ShowPublishDlg_Continue,
true);
if (buttonPress == DialogResult.Cancel)
return;
object tag = null;
if (buttonPress == DialogResult.Yes)
{
// person intends to register
WebHelpers.OpenLink(this, @"http://proteome.gs.washington.edu/software/Skyline/panoramaweb-signup.html");
tag = true;
}
var serverPanoramaWeb = new Server(PanoramaUtil.PANORAMA_WEB, string.Empty, string.Empty);
var newServer = servers.EditItem(this, serverPanoramaWeb, null, tag);
if (newServer == null)
return;
servers.Add(newServer);
}
var panoramaSavedUri = document.Settings.DataSettings.PanoramaPublishUri;
var showPublishDocDlg = true;
// if the document has a saved uri prompt user for acton, check servers, and permissions, then publish
// if something fails in the attempt to publish to the saved uri will bring up the usual PublishDocumentDlg
if (panoramaSavedUri != null && !string.IsNullOrEmpty(panoramaSavedUri.ToString()))
{
showPublishDocDlg = !PublishToSavedUri(publishClient, panoramaSavedUri, fileName, servers);
}
// if no uri was saved to publish to or user chose to view the dialog show the dialog
if (showPublishDocDlg)
{
using (var publishDocumentDlg = new PublishDocumentDlg(this, servers, fileName))
{
publishDocumentDlg.PanoramaPublishClient = publishClient;
if (publishDocumentDlg.ShowDialog(this) == DialogResult.OK)
{
if (ShareDocument(publishDocumentDlg.FileName, publishDocumentDlg.ShareType))
publishDocumentDlg.Upload(this);
}
}
}
}
private bool PublishToSavedUri(IPanoramaPublishClient publishClient, Uri panoramaSavedUri, string fileName,
ServerList servers)
{
var message = TextUtil.LineSeparate(Resources.SkylineWindow_PublishToSavedUri_This_file_was_last_uploaded_to___0_,
Resources.SkylineWindow_PublishToSavedUri_Upload_to_the_same_location_);
if (MultiButtonMsgDlg.Show(this, string.Format(message, panoramaSavedUri),
MultiButtonMsgDlg.BUTTON_YES, MultiButtonMsgDlg.BUTTON_NO, false) != DialogResult.Yes)
return false;
var server = servers.FirstOrDefault(s => s.URI.Host.Equals(panoramaSavedUri.Host));
if (server == null)
return false;
JToken folders;
var folderPath = panoramaSavedUri.AbsolutePath;
var folderPathNoCtx = PanoramaServer.getFolderPath(server, panoramaSavedUri); // get folder path without the context path
try
{
folders = publishClient.GetInfoForFolders(server, folderPathNoCtx.TrimEnd('/').TrimStart('/'));
}
catch (WebException ex)
{
// Handle this only for PanoramaWeb. For the specific case where Skyline was upgraded
// to a version that does not assume the '/labkey' context path, BEFORE PanoramaWeb was
// re-configured to run as the ROOT webapp. In this case the panoramaSavedUri will contain '/labkey'
// but the server is no longer deployed at that context path.
if (!server.URI.Host.Contains(@"panoramaweb") || !folderPath.StartsWith(@"/labkey"))
{
return false;
}
var response = ex.Response as HttpWebResponse;
if (response == null || response.StatusCode != HttpStatusCode.NotFound) // 404
{
return false;
}
folderPathNoCtx = folderPath.Remove(0, @"/labkey".Length);
try
{
folders =
publishClient.GetInfoForFolders(server, folderPathNoCtx.TrimEnd('/').TrimStart('/'));
}
catch (Exception)
{
return false;
}
}
catch (PanoramaServerException)
{
return false;
}
// must escape uri string as panorama api does not and strings are escaped in schema
if (folders == null || !folderPath.Contains(Uri.EscapeUriString(folders[@"path"].ToString())))
return false;
if (!PanoramaUtil.CheckFolderPermissions(folders) || !PanoramaUtil.CheckFolderType(folders))
return false;
var fileInfo = new FolderInformation(server, true);
ShareType shareType;
try
{
shareType = publishClient.DecideShareType(fileInfo, DocumentUI);
}
catch (PanoramaServerException pse)
{
MessageDlg.ShowWithException(this, pse.Message, pse);
return false;
}
var zipFilePath = FileEx.GetTimeStampedFileName(fileName);
if (!ShareDocument(zipFilePath, shareType))
return false;
var serverRelativePath = folders[@"path"].ToString() + '/';
serverRelativePath = serverRelativePath.TrimStart('/');
publishClient.UploadSharedZipFile(this, server, zipFilePath, serverRelativePath);
return true; // success!
}
private void exportAnnotationsMenuItem_Click(object sender, EventArgs e)
{
ShowExportAnnotationsDlg();
}
public void ShowExportAnnotationsDlg()
{
using (var exportAnnotationsDlg =
new ExportAnnotationsDlg(new SkylineDataSchema(this, DataSchemaLocalizer.INVARIANT)))
{
exportAnnotationsDlg.ShowDialog(this);
}
}
public void ImportAnnotations(string filename)
{
try
{
lock (GetDocumentChangeLock())
{
var originalDocument = Document;
SrmDocument newDocument = null;
using (var longWaitDlg = new LongWaitDlg(this))
{
longWaitDlg.PerformWork(this, 1000, broker =>
{
var documentAnnotations = new DocumentAnnotations(originalDocument);
newDocument = documentAnnotations.ReadAnnotationsFromFile(broker.CancellationToken, filename);
});
}
if (newDocument != null)
{
ModifyDocument(Resources.SkylineWindow_ImportAnnotations_Import_Annotations, doc =>
{
if (!ReferenceEquals(doc, originalDocument))
{
throw new ApplicationException(Resources
.SkylineDataSchema_VerifyDocumentCurrent_The_document_was_modified_in_the_middle_of_the_operation_);
}
return newDocument;
}, docPair => AuditLogEntry.CreateSingleMessageEntry(new MessageInfo(MessageType.imported_annotations, docPair.NewDocumentType, filename)));
}
}
}
catch (Exception exception)
{
MessageDlg.ShowException(this, exception);
}
}
private void importAnnotationsMenuItem_Click(object sender, EventArgs e)
{
using (var dlg = new OpenFileDialog
{
DefaultExt = TextUtil.EXT_CSV,
Filter = TextUtil.FileDialogFiltersAll(TextUtil.FILTER_CSV),
InitialDirectory = Settings.Default.ExportDirectory,
})
{
if (dlg.ShowDialog(this) != DialogResult.OK)
{
return;
}
ImportAnnotations(dlg.FileName);
}
}
#region Functional Test Support
public void ShowExportTransitionListDlg()
{
ShowExportMethodDialog(ExportFileType.List);
}
#endregion
}
}
| 1 | 13,967 | I think "this" should be "parent". | ProteoWizard-pwiz | .cs |
@@ -331,6 +331,8 @@ ostree_async_progress_set (OstreeAsyncProgress *self,
if (self->dead)
goto out;
+ changed = FALSE;
+
va_start (ap, self);
for (key = va_arg (ap, const char *), format_string = va_arg (ap, const char *); | 1 | /* -*- mode: C; c-file-style: "gnu"; indent-tabs-mode: nil; -*-
*
* Copyright (C) 2013 Colin Walters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include "ostree-async-progress.h"
#include "libglnx.h"
/**
* SECTION:ostree-async-progress
* @title: Progress notification system for asynchronous operations
* @short_description: Values representing progress
*
* For many asynchronous operations, it's desirable for callers to be
* able to watch their status as they progress. For example, an user
* interface calling an asynchronous download operation will want to
* be able to see the total number of bytes downloaded.
*
* This class provides a mechanism for callees of asynchronous
* operations to communicate back with callers. It transparently
* handles thread safety, ensuring that the progress change
* notification occurs in the thread-default context of the calling
* operation.
*
* The ostree_async_progress_get_status() and ostree_async_progress_set_status()
* methods get and set a well-known `status` key of type %G_VARIANT_TYPE_STRING.
* This key may be accessed using the other #OstreeAsyncProgress methods, but it
* must always have the correct type.
*/
enum {
CHANGED,
LAST_SIGNAL
};
static guint signals[LAST_SIGNAL] = { 0 };
struct OstreeAsyncProgress
{
GObject parent_instance;
GMutex lock;
GMainContext *maincontext;
GSource *idle_source;
GHashTable *values; /* (element-type uint GVariant) */
gboolean dead;
};
G_DEFINE_TYPE (OstreeAsyncProgress, ostree_async_progress, G_TYPE_OBJECT)
static void
ostree_async_progress_finalize (GObject *object)
{
OstreeAsyncProgress *self;
self = OSTREE_ASYNC_PROGRESS (object);
g_mutex_clear (&self->lock);
g_clear_pointer (&self->maincontext, g_main_context_unref);
g_clear_pointer (&self->idle_source, g_source_unref);
g_hash_table_unref (self->values);
G_OBJECT_CLASS (ostree_async_progress_parent_class)->finalize (object);
}
static void
ostree_async_progress_class_init (OstreeAsyncProgressClass *klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
gobject_class->finalize = ostree_async_progress_finalize;
/**
* OstreeAsyncProgress::changed:
* @self: Self
*
* Emitted when @self has been changed.
**/
signals[CHANGED] =
g_signal_new ("changed",
OSTREE_TYPE_ASYNC_PROGRESS,
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (OstreeAsyncProgressClass, changed),
NULL, NULL,
NULL,
G_TYPE_NONE, 0);
}
static void
ostree_async_progress_init (OstreeAsyncProgress *self)
{
g_mutex_init (&self->lock);
self->maincontext = g_main_context_ref_thread_default ();
self->values = g_hash_table_new_full (NULL, NULL, NULL, (GDestroyNotify) g_variant_unref);
}
/**
* ostree_async_progress_get_variant:
* @self: an #OstreeAsyncProgress
* @key: a key to look up
*
* Look up a key in the #OstreeAsyncProgress and return the #GVariant associated
* with it. The lookup is thread-safe.
*
* Returns: (transfer full) (nullable): value for the given @key, or %NULL if
* it was not set
* Since: 2017.6
*/
GVariant *
ostree_async_progress_get_variant (OstreeAsyncProgress *self,
const char *key)
{
GVariant *rval;
g_return_val_if_fail (OSTREE_IS_ASYNC_PROGRESS (self), NULL);
g_return_val_if_fail (key != NULL, NULL);
g_mutex_lock (&self->lock);
rval = g_hash_table_lookup (self->values, GUINT_TO_POINTER (g_quark_from_string (key)));
if (rval != NULL)
g_variant_ref (rval);
g_mutex_unlock (&self->lock);
return rval;
}
guint
ostree_async_progress_get_uint (OstreeAsyncProgress *self,
const char *key)
{
g_autoptr(GVariant) rval = ostree_async_progress_get_variant (self, key);
return (rval != NULL) ? g_variant_get_uint32 (rval) : 0;
}
guint64
ostree_async_progress_get_uint64 (OstreeAsyncProgress *self,
const char *key)
{
g_autoptr(GVariant) rval = ostree_async_progress_get_variant (self, key);
return (rval != NULL) ? g_variant_get_uint64 (rval) : 0;
}
/**
* ostree_async_progress_get:
* @self: an #OstreeAsyncProgress
* @...: key name, format string, #GVariant return locations, …, followed by %NULL
*
* Get the values corresponding to zero or more keys from the
* #OstreeAsyncProgress. Each key is specified in @... as the key name, followed
* by a #GVariant format string, followed by the necessary arguments for that
* format string, just as for g_variant_get(). After those arguments is the
* next key name. The varargs list must be %NULL-terminated.
*
* Each format string must make deep copies of its value, as the values stored
* in the #OstreeAsyncProgress may be freed from another thread after this
* function returns.
*
* This operation is thread-safe, and all the keys are queried atomically.
*
* |[<!-- language="C" -->
* guint32 outstanding_fetches;
* guint64 bytes_received;
* g_autofree gchar *status = NULL;
* g_autoptr(GVariant) refs_variant = NULL;
*
* ostree_async_progress_get (progress,
* "outstanding-fetches", "u", &outstanding_fetches,
* "bytes-received", "t", &bytes_received,
* "status", "s", &status,
* "refs", "@a{ss}", &refs_variant,
* NULL);
* ]|
*
* Since: 2017.6
*/
void
ostree_async_progress_get (OstreeAsyncProgress *self,
...)
{
va_list ap;
const char *key, *format_string;
g_mutex_lock (&self->lock);
va_start (ap, self);
for (key = va_arg (ap, const char *), format_string = va_arg (ap, const char *);
key != NULL;
key = va_arg (ap, const char *), format_string = va_arg (ap, const char *))
{
GVariant *variant;
g_assert (format_string != NULL);
variant = g_hash_table_lookup (self->values, GUINT_TO_POINTER (g_quark_from_string (key)));
g_assert (variant != NULL);
g_assert (g_variant_check_format_string (variant, format_string, TRUE));
g_variant_get_va (variant, format_string, NULL, &ap);
}
va_end (ap);
g_mutex_unlock (&self->lock);
}
static gboolean
idle_invoke_async_progress (gpointer user_data)
{
OstreeAsyncProgress *self = user_data;
g_mutex_lock (&self->lock);
self->idle_source = NULL;
g_mutex_unlock (&self->lock);
g_signal_emit (self, signals[CHANGED], 0);
return FALSE;
}
static void
ensure_callback_locked (OstreeAsyncProgress *self)
{
if (self->idle_source)
return;
self->idle_source = g_idle_source_new ();
g_source_set_callback (self->idle_source, idle_invoke_async_progress, self, NULL);
g_source_attach (self->idle_source, self->maincontext);
}
/**
* ostree_async_progress_set_status:
* @self: an #OstreeAsyncProgress
* @status: (nullable): new status string, or %NULL to clear the status
*
* Set the human-readable status string for the #OstreeAsyncProgress. This
* operation is thread-safe. %NULL may be passed to clear the status.
*
* This is a convenience function to set the well-known `status` key.
*
* Since: 2017.6
*/
void
ostree_async_progress_set_status (OstreeAsyncProgress *self,
const char *status)
{
ostree_async_progress_set_variant (self, "status",
g_variant_new_string ((status != NULL) ? status : ""));
}
/**
* ostree_async_progress_get_status:
* @self: an #OstreeAsyncProgress
*
* Get the human-readable status string from the #OstreeAsyncProgress. This
* operation is thread-safe. The retuned value may be %NULL if no status is
* set.
*
* This is a convenience function to get the well-known `status` key.
*
* Returns: (transfer full) (nullable): the current status, or %NULL if none is set
* Since: 2017.6
*/
char *
ostree_async_progress_get_status (OstreeAsyncProgress *self)
{
g_autoptr(GVariant) rval = ostree_async_progress_get_variant (self, "status");
const gchar *status = (rval != NULL) ? g_variant_get_string (rval, NULL) : NULL;
if (status != NULL && *status == '\0')
status = NULL;
return g_strdup (status);
}
/**
* ostree_async_progress_set:
* @self: an #OstreeAsyncProgress
* @...: key name, format string, #GVariant parameters, …, followed by %NULL
*
* Set the values for zero or more keys in the #OstreeAsyncProgress. Each key is
* specified in @... as the key name, followed by a #GVariant format string,
* followed by the necessary arguments for that format string, just as for
* g_variant_new(). After those arguments is the next key name. The varargs list
* must be %NULL-terminated.
*
* g_variant_ref_sink() will be called as appropriate on the #GVariant
* parameters, so they may be floating.
*
* This operation is thread-safe, and all the keys are set atomically.
*
* |[<!-- language="C" -->
* guint32 outstanding_fetches = 15;
* guint64 bytes_received = 1000;
*
* ostree_async_progress_set (progress,
* "outstanding-fetches", "u", outstanding_fetches,
* "bytes-received", "t", bytes_received,
* "status", "s", "Updated status",
* "refs", "@a{ss}", g_variant_new_parsed ("@a{ss} {}"),
* NULL);
* ]|
*
* Since: 2017.6
*/
void
ostree_async_progress_set (OstreeAsyncProgress *self,
...)
{
va_list ap;
const char *key, *format_string;
gboolean changed;
g_mutex_lock (&self->lock);
if (self->dead)
goto out;
va_start (ap, self);
for (key = va_arg (ap, const char *), format_string = va_arg (ap, const char *);
key != NULL;
key = va_arg (ap, const char *), format_string = va_arg (ap, const char *))
{
GVariant *orig_value;
g_autoptr(GVariant) new_value = NULL;
gpointer qkey = GUINT_TO_POINTER (g_quark_from_string (key));
new_value = g_variant_ref_sink (g_variant_new_va (format_string, NULL, &ap));
if (g_hash_table_lookup_extended (self->values, qkey, NULL, (gpointer *) &orig_value) &&
g_variant_equal (orig_value, new_value))
continue;
g_hash_table_replace (self->values, qkey, g_steal_pointer (&new_value));
changed = TRUE;
}
va_end (ap);
if (changed)
ensure_callback_locked (self);
out:
g_mutex_unlock (&self->lock);
}
/**
* ostree_async_progress_set_variant:
* @self: an #OstreeAsyncProgress
* @key: a key to set
* @value: the value to assign to @key
*
* Assign a new @value to the given @key, replacing any existing value. The
* operation is thread-safe. @value may be a floating reference;
* g_variant_ref_sink() will be called on it.
*
* Any watchers of the #OstreeAsyncProgress will be notified of the change if
* @value differs from the existing value for @key.
*
* Since: 2017.6
*/
void
ostree_async_progress_set_variant (OstreeAsyncProgress *self,
const char *key,
GVariant *value)
{
GVariant *orig_value;
g_autoptr(GVariant) new_value = g_variant_ref_sink (value);
gpointer qkey = GUINT_TO_POINTER (g_quark_from_string (key));
g_return_if_fail (OSTREE_IS_ASYNC_PROGRESS (self));
g_return_if_fail (key != NULL);
g_return_if_fail (value != NULL);
g_mutex_lock (&self->lock);
if (self->dead)
goto out;
if (g_hash_table_lookup_extended (self->values, qkey, NULL, (gpointer *) &orig_value))
{
if (g_variant_equal (orig_value, new_value))
goto out;
}
g_hash_table_replace (self->values, qkey, g_steal_pointer (&new_value));
ensure_callback_locked (self);
out:
g_mutex_unlock (&self->lock);
}
void
ostree_async_progress_set_uint (OstreeAsyncProgress *self,
const char *key,
guint value)
{
ostree_async_progress_set_variant (self, key, g_variant_new_uint32 (value));
}
void
ostree_async_progress_set_uint64 (OstreeAsyncProgress *self,
const char *key,
guint64 value)
{
ostree_async_progress_set_variant (self, key, g_variant_new_uint64 (value));
}
/**
* ostree_async_progress_new:
*
* Returns: (transfer full): A new progress object
*/
OstreeAsyncProgress *
ostree_async_progress_new (void)
{
return (OstreeAsyncProgress*)g_object_new (OSTREE_TYPE_ASYNC_PROGRESS, NULL);
}
OstreeAsyncProgress *
ostree_async_progress_new_and_connect (void (*changed) (OstreeAsyncProgress *self, gpointer user_data),
gpointer user_data)
{
OstreeAsyncProgress *ret = ostree_async_progress_new ();
g_signal_connect (ret, "changed", G_CALLBACK (changed), user_data);
return ret;
}
/**
* ostree_async_progress_finish:
* @self: Self
*
* Process any pending signals, ensuring the main context is cleared
* of sources used by this object. Also ensures that no further
* events will be queued.
*/
void
ostree_async_progress_finish (OstreeAsyncProgress *self)
{
gboolean emit_changed = FALSE;
g_mutex_lock (&self->lock);
if (!self->dead)
{
self->dead = TRUE;
if (self->idle_source)
{
g_source_destroy (self->idle_source);
self->idle_source = NULL;
emit_changed = TRUE;
}
}
g_mutex_unlock (&self->lock);
if (emit_changed)
g_signal_emit (self, signals[CHANGED], 0);
}
| 1 | 10,437 | Why not init this at variable decl time instead? | ostreedev-ostree | c |
@@ -25,6 +25,11 @@ const (
ProjectIDEnvKey = "PROJECT_ID"
)
+// ProjectIDEnvConfig is a struct to parse Project id from env var
+type ProjectIDEnvConfig struct {
+ ProjectID string `envconfig:"PROJECT_ID"`
+}
+
// ProjectID returns the project ID for a particular resource.
func ProjectID(project string, client metadataClient.Client) (string, error) {
// If project is set, then return that one. | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
metadataClient "github.com/google/knative-gcp/pkg/gclient/metadata"
)
const (
clusterNameAttr = "cluster-name"
ProjectIDEnvKey = "PROJECT_ID"
)
// ProjectID returns the project ID for a particular resource.
func ProjectID(project string, client metadataClient.Client) (string, error) {
// If project is set, then return that one.
if project != "" {
return project, nil
}
// Otherwise, ask GKE metadata server.
projectID, err := client.ProjectID()
if err != nil {
return "", err
}
return projectID, nil
}
// ClusterName returns the cluster name for a particular resource.
func ClusterName(clusterName string, client metadataClient.Client) (string, error) {
// If clusterName is set, then return that one.
if clusterName != "" {
return clusterName, nil
}
clusterName, err := client.InstanceAttributeValue(clusterNameAttr)
if err != nil {
return "", err
}
return clusterName, nil
}
| 1 | 18,338 | Do we still need this if we're doing everything through `envconfig`? | google-knative-gcp | go |
@@ -22,6 +22,8 @@ type OptionsNetwork struct {
Testnet bool
Localnet bool
+ IdentityCheck bool
+
DiscoveryAPIAddress string
BrokerAddress string
| 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package node
// OptionsNetwork describes possible parameters of network configuration
type OptionsNetwork struct {
Testnet bool
Localnet bool
DiscoveryAPIAddress string
BrokerAddress string
EtherClientRPC string
EtherPaymentsAddress string
}
| 1 | 11,985 | I would like these options to have `experiment` prefix or suffix. Because we will be deleting them later | mysteriumnetwork-node | go |
@@ -88,7 +88,11 @@
<%= f.label :additional_info, 'Please include any additional information you feel we need to correctly execute the purchase' %>
<%= f.text_area :additional_info, class: 'form-control' %>
</div>
- <%= f.submit class: 'form-button' %>
+ <% unless ENV['DISABLE_SANDBOX_WARNING'] == 'true' %>
+ <%= f.submit value: 'Submit test request for approval', class: 'form-button' %>
+ <% else %>
+ <%= f.submit class: 'form-button' %>
+ <% end %>
<% if @model_instance.persisted? %>
<%= link_to 'Discard Changes', proposal_url(@model_instance.proposal) %>
<% end %> | 1 | <% content_for :title, "GSA 18f Purchase Request" %>
<div class="container content m-request-form">
<h2>GSA 18f</h2>
<h3>Purchase Request - Supplies</h3>
<p>Eg; ergonomic keyboards, foamcore boards, ergonomic mice, monitor stands, post-its, sharpies, etc. </p>
<%= form_for @model_instance do |f| %>
<!-- Office Location -->
<div class="form-group">
<%= f.label :office %>
<%= f.collection_select(
:office, Gsa18f::Procurement::OFFICES, :to_s, :to_s,
{include_blank: true}, {class: 'form-control'}) %>
</div>
<!-- Product Name and Description -Text Area- -->
<div class="form-group">
<%= f.label :product_name_and_description, 'Product Name and Description', class: "required" %>
<%= f.text_area :product_name_and_description, class: 'form-control' %>
</div>
<!-- Justification -Text Area- -->
<div class="form-group">
<%= f.label :justification %>
<%= f.text_area :justification, class: 'form-control' %>
</div>
<!-- Link to Product -->
<div class="form-group">
<%= f.label :link_to_product %>
<p> Search <a href='https://www.gsaadvantage.gov/advantage/main/start_page.do' target='_blank'>GSA Advantage</a> first; then <a href='http://www.amazon.com' target='_blank'>Amazon;</a> then other vendor. Paste link here.</p>
<%= f.text_field :link_to_product, class: 'form-control' %>
</div>
<!-- Amount -->
<div class="form-group">
<%= f.label :cost_per_unit, class: "required" %>
<div class="input-group">
<div class="input-group-addon">$</div>
<%= f.number_field :cost_per_unit, class: 'form-control', step: 0.01 %>
</div>
</div>
<!-- Quantity of Product -->
<div class="form-group">
<%= f.label :quantity, class: "required" %>
<div class="input-group">
<%= f.number_field :quantity, class: 'form-control', step: 1 %>
</div>
</div>
<div class="form-group">
<%= f.label :recurring do %>
<%= f.check_box :recurring, :"data-filter-control" => "recurring" %>
Recurring Charge
<% end %>
</div>
<!-- Recurring Frequency -->
<div data-filter-key="recurring"
data-filter-value="1">
<div class="form-group">
<%= f.label :recurring_interval %>
<div class="input-group">
<%= f.collection_select(
:recurring_interval, Gsa18f::Procurement::RECURRENCE, :to_s, :to_s,
{include_blank: false}, {class: 'form-control'}) %>
</div>
</div>
<div class="form-group">
<%= f.label :recurring_length %>
<div class="input-group">
<%= f.number_field :recurring_length, class: 'form-control', step: 1, 'aria-describedby'=>'description-add-info' %>
</div>
<div class="help-block" id="description-add-info">
e.g. Number of Days, Months, Years
</div>
</div>
</div>
<!-- Date Requested -->
<div class="form-group">
<%= f.label :date_requested %>
<div class="input-group">
<%= f.date_field :date_requested, as: :date, value: f.object.date_requested %>
</div>
</div>
<!-- Urgency -->
<div class="form-group">
<%= f.label :urgency %>
<%= f.collection_select(
:urgency, Gsa18f::Procurement::URGENCY, :to_s, :to_s,
{include_blank: true}, {class: 'form-control'}) %>
</div>
<!-- Additional Info -->
<div class="form-group">
<%= f.label :additional_info, 'Please include any additional information you feel we need to correctly execute the purchase' %>
<%= f.text_area :additional_info, class: 'form-control' %>
</div>
<%= f.submit class: 'form-button' %>
<% if @model_instance.persisted? %>
<%= link_to 'Discard Changes', proposal_url(@model_instance.proposal) %>
<% end %>
<% end %>
</div>
| 1 | 14,077 | Since we have the same logic in both forms, can we make a shared partial/helper for it? | 18F-C2 | rb |
@@ -69,10 +69,12 @@ func NewSalesman(opts ...Option) (*Salesman, error) {
if err := o.Validate(); err != nil {
return nil, err
}
+ marketAddr := blockchain.MarketAddr().Hex()
+ askPlansKey := marketAddr + "/ask_plans"
s := &Salesman{
options: o,
- askPlanStorage: state.NewKeyedStorage("ask_plans", o.storage),
+ askPlanStorage: state.NewKeyedStorage(askPlansKey, o.storage),
askPlanCGroups: map[string]cgroups.CGroup{},
deals: map[string]*sonm.Deal{},
orders: map[string]*sonm.Order{}, | 1 | package salesman
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/mohae/deepcopy"
"github.com/pborman/uuid"
"github.com/sonm-io/core/blockchain"
"github.com/sonm-io/core/insonmnia/cgroups"
"github.com/sonm-io/core/insonmnia/hardware"
"github.com/sonm-io/core/insonmnia/matcher"
"github.com/sonm-io/core/insonmnia/resource"
"github.com/sonm-io/core/insonmnia/state"
"github.com/sonm-io/core/proto"
"github.com/sonm-io/core/util"
"github.com/sonm-io/core/util/multierror"
"go.uber.org/zap"
)
const defaultMaintenancePeriod = time.Hour * 24 * 365 * 100
const maintenanceGap = time.Minute * 10
type Config struct {
Logger zap.SugaredLogger
Storage *state.Storage
Resources *resource.Scheduler
Hardware *hardware.Hardware
Eth blockchain.API
CGroupManager cgroups.CGroupManager
Matcher matcher.Matcher
Ethkey *ecdsa.PrivateKey
Config YAMLConfig
}
type YAMLConfig struct {
RegularBillPeriod time.Duration `yaml:"regular_deal_bill_period" default:"24h"`
SpotBillPeriod time.Duration `yaml:"spot_deal_bill_period" default:"1h"`
SyncStepTimeout time.Duration `yaml:"sync_step_timeout" default:"2m"`
SyncInterval time.Duration `yaml:"sync_interval" default:"10s"`
MatcherRetryInterval time.Duration `yaml:"matcher_retry_interval" default:"10s"`
}
type Salesman struct {
*options
askPlanStorage *state.KeyedStorage
askPlans map[string]*sonm.AskPlan
askPlanCGroups map[string]cgroups.CGroup
deals map[string]*sonm.Deal
orders map[string]*sonm.Order
nextMaintenance time.Time
dealsCh chan *sonm.Deal
mu sync.Mutex
}
func NewSalesman(opts ...Option) (*Salesman, error) {
o := &options{}
for _, opt := range opts {
opt(o)
}
if err := o.Validate(); err != nil {
return nil, err
}
s := &Salesman{
options: o,
askPlanStorage: state.NewKeyedStorage("ask_plans", o.storage),
askPlanCGroups: map[string]cgroups.CGroup{},
deals: map[string]*sonm.Deal{},
orders: map[string]*sonm.Order{},
nextMaintenance: time.Now().Add(defaultMaintenancePeriod),
dealsCh: make(chan *sonm.Deal, 100),
}
if err := s.restoreState(); err != nil {
return nil, err
}
return s, nil
}
func (m *Salesman) Close() {}
func (m *Salesman) Run(ctx context.Context) <-chan *sonm.Deal {
go func() {
for _, plan := range m.askPlans {
orderID := plan.GetOrderID()
dealID := plan.GetDealID()
if dealID.IsZero() && !orderID.IsZero() {
order, err := m.eth.Market().GetOrderInfo(ctx, orderID.Unwrap())
if err != nil {
m.log.Warnf("failed to get order info for order %s, stopping waiting for deal: %s", orderID.Unwrap().String(), err)
continue
}
go m.waitForDeal(ctx, order)
}
}
go m.syncRoutine(ctx)
}()
return m.dealsCh
}
func (m *Salesman) ScheduleMaintenance(timePoint time.Time) error {
m.log.Infof("Scheduling next maintenance at %s", timePoint.String())
m.mu.Lock()
defer m.mu.Unlock()
m.nextMaintenance = timePoint
return m.storage.Save("next_maintenance", m.nextMaintenance)
}
func (m *Salesman) NextMaintenance() time.Time {
m.mu.Lock()
defer m.mu.Unlock()
return m.nextMaintenance
}
func (m *Salesman) AskPlan(planID string) (*sonm.AskPlan, error) {
m.mu.Lock()
defer m.mu.Unlock()
askPlan, ok := m.askPlans[planID]
if !ok {
return nil, errors.New("specified ask-plan does not exist")
}
copy := deepcopy.Copy(askPlan).(*sonm.AskPlan)
return copy, nil
}
func (m *Salesman) AskPlans() map[string]*sonm.AskPlan {
m.mu.Lock()
defer m.mu.Unlock()
return deepcopy.Copy(m.askPlans).(map[string]*sonm.AskPlan)
}
func (m *Salesman) CreateAskPlan(askPlan *sonm.AskPlan) (string, error) {
id := uuid.New()
askPlan.ID = id
if err := askPlan.GetResources().GetGPU().Normalize(m.hardware); err != nil {
return "", err
}
m.mu.Lock()
defer m.mu.Unlock()
if err := m.createCGroup(askPlan); err != nil {
return "", err
}
if err := m.resources.Consume(askPlan); err != nil {
m.dropCGroup(askPlan.ID)
return "", err
}
m.askPlans[askPlan.ID] = askPlan
if err := m.askPlanStorage.Save(m.askPlans); err != nil {
m.dropCGroup(askPlan.ID)
m.resources.Release(askPlan.ID)
return "", err
}
return id, nil
}
func (m *Salesman) RemoveAskPlan(planID string) error {
m.mu.Lock()
defer m.mu.Unlock()
ask, ok := m.askPlans[planID]
if !ok {
return fmt.Errorf("no such plan %s", planID)
}
ask.Status = sonm.AskPlan_PENDING_DELETION
if err := m.askPlanStorage.Save(m.askPlans); err != nil {
return fmt.Errorf("could not mark ask plan %s with active deal %s for deletion: %s",
planID, ask.GetDealID().Unwrap().String(), err)
}
return nil
}
func (m *Salesman) maybeShutdownAskPlan(ctx context.Context, plan *sonm.AskPlan) error {
if plan.Status != sonm.AskPlan_PENDING_DELETION {
return nil
}
m.log.Debugf("trying to shut down ask plan %s", plan.GetID())
if !plan.GetDealID().IsZero() {
m.log.Debugf("ask plan %s is still bound to deal %s", plan.ID, plan.GetDealID().Unwrap().String())
return nil
}
if !plan.GetOrderID().IsZero() {
if err := m.eth.Market().CancelOrder(ctx, m.ethkey, plan.GetOrderID().Unwrap()); err != nil {
return fmt.Errorf("could not cancel order: %s", err)
}
}
m.mu.Lock()
defer m.mu.Unlock()
if err := m.resources.Release(plan.ID); err != nil {
// We can not handle this error, because it is persistent so just log it and skip
m.log.Errorf("inconsistency found - could not release resources from pool: %s", err)
}
delete(m.askPlans, plan.ID)
m.askPlanStorage.Save(m.askPlans)
return m.dropCGroup(plan.ID)
}
func (m *Salesman) AskPlanByDeal(dealID *sonm.BigInt) (*sonm.AskPlan, error) {
m.mu.Lock()
defer m.mu.Unlock()
for _, plan := range m.askPlans {
if plan.DealID.Cmp(dealID) == 0 {
return deepcopy.Copy(plan).(*sonm.AskPlan), nil
}
}
return nil, fmt.Errorf("ask plan for deal id %s is not found", dealID)
}
func (m *Salesman) Deal(dealID *sonm.BigInt) (*sonm.Deal, error) {
id := dealID.Unwrap().String()
m.mu.Lock()
defer m.mu.Unlock()
deal, ok := m.deals[id]
if !ok {
return nil, fmt.Errorf("deal not found by %s", id)
}
return deal, nil
}
func (m *Salesman) CGroup(planID string) (cgroups.CGroup, error) {
m.mu.Lock()
defer m.mu.Unlock()
cGroup, ok := m.askPlanCGroups[planID]
if !ok {
return nil, fmt.Errorf("cgroup for ask plan %s not found, probably no such plan", planID)
}
return cGroup, nil
}
func (m *Salesman) syncRoutine(ctx context.Context) {
m.log.Debugf("starting sync routine")
ticker := util.NewImmediateTicker(m.config.SyncInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.syncWithBlockchain(ctx)
case <-ctx.Done():
return
}
}
}
func (m *Salesman) syncWithBlockchain(ctx context.Context) {
m.log.Debugf("syncing salesman with blockchain")
plans := m.AskPlans()
for _, plan := range plans {
orderId := plan.GetOrderID()
dealId := plan.GetDealID()
ctxWithTimeout, cancel := context.WithTimeout(ctx, m.config.SyncStepTimeout)
if !dealId.IsZero() {
if err := m.loadCheckDeal(ctxWithTimeout, plan); err != nil {
m.log.Warnf("could not check deal %s for plan %s: %s", dealId.Unwrap().String(), plan.ID, err)
}
} else if !orderId.IsZero() {
if err := m.checkOrder(ctxWithTimeout, plan); err != nil {
m.log.Warnf("could not check order %s for plan %s: %s", orderId.Unwrap().String(), plan.ID, err)
}
} else if plan.GetStatus() != sonm.AskPlan_PENDING_DELETION {
order, err := m.placeOrder(ctxWithTimeout, plan)
if err != nil {
m.log.Warnf("could not place order for plan %s: %s", plan.ID, err)
} else {
go m.waitForDeal(ctx, order)
}
}
if err := m.maybeShutdownAskPlan(ctxWithTimeout, plan); err != nil {
m.log.Warnf("could not shutdown ask plan %s: %s", plan.ID, err)
}
cancel()
}
}
func (m *Salesman) restoreState() error {
m.askPlans = map[string]*sonm.AskPlan{}
if err := m.askPlanStorage.Load(&m.askPlans); err != nil {
return fmt.Errorf("could not restore salesman state: %s", err)
}
for _, plan := range m.askPlans {
if err := m.resources.Consume(plan); err != nil {
m.log.Warnf("dropping ask plan due to resource changes")
//Ignore error here, as resources that were not consumed can not be released.
m.RemoveAskPlan(plan.ID)
} else {
m.log.Debugf("consumed resource for ask plan %s", plan.GetID())
if err := m.createCGroup(plan); err != nil {
m.log.Warnf("can not create cgroup for ask plan %s: %s", plan.ID, err)
return err
}
}
}
if err := m.storage.Load("next_maintenance", &m.nextMaintenance); err != nil {
return fmt.Errorf("failed to load next maintenance: %s", err)
}
//TODO: restore tasks
return nil
}
func (m *Salesman) createCGroup(plan *sonm.AskPlan) error {
cgroupResources := plan.GetResources().ToCgroupResources()
cgroup, err := m.cGroupManager.Attach(plan.ID, cgroupResources)
if err != nil {
return err
}
m.log.Infof("created cgroup %s for ask plan %s", cgroup.Suffix(), plan.ID)
m.askPlanCGroups[plan.ID] = cgroup
return nil
}
func (m *Salesman) dropCGroup(planID string) error {
cgroup, ok := m.askPlanCGroups[planID]
if !ok {
return fmt.Errorf("cgroup for ask plan %s not found, probably no such plan", planID)
}
delete(m.askPlanCGroups, planID)
if err := cgroup.Delete(); err != nil {
return fmt.Errorf("could not drop cgroup %s for ask plan %s: %s", cgroup.Suffix(), planID, err)
}
m.log.Debugf("dropped cgroup for ask plan %s", planID)
return nil
}
func (m *Salesman) loadCheckDeal(ctx context.Context, plan *sonm.AskPlan) error {
dealID := plan.DealID.Unwrap()
deal, err := m.eth.Market().GetDealInfo(ctx, dealID)
if err != nil {
return fmt.Errorf("could not get deal info for ask plan %s: %s", plan.ID, err)
}
return m.checkDeal(ctx, plan, deal)
}
func (m *Salesman) checkDeal(ctx context.Context, plan *sonm.AskPlan, deal *sonm.Deal) error {
m.log.Debugf("checking deal %s for ask plan %s", deal.GetId().Unwrap().String(), plan.GetID())
if deal.Status == sonm.DealStatus_DEAL_CLOSED {
if err := m.unregisterOrder(plan.ID); err != nil {
return fmt.Errorf("failed to unregister order from ask plan %s: %s", plan.GetID(), err)
}
if err := m.unregisterDeal(plan.GetID(), deal); err != nil {
return fmt.Errorf("failed to cleanup deal from ask plan %s: %s", plan.GetID(), err)
}
m.log.Debugf("succesefully removed closed deal %s from ask plan %s", deal.GetId().Unwrap().String(), plan.GetID())
return nil
} else {
multi := multierror.NewMultiError()
if err := m.registerDeal(plan.GetID(), deal); err != nil {
multi = multierror.Append(multi, err)
}
if err := m.maybeCloseDeal(ctx, plan, deal); err != nil {
multi = multierror.Append(multi, fmt.Errorf("could not close deal: %s", err))
}
if err := m.maybeBillDeal(ctx, deal); err != nil {
multi = multierror.Append(multi, fmt.Errorf("could not bill deal: %s", err))
}
return multi.ErrorOrNil()
}
}
func (m *Salesman) maybeBillDeal(ctx context.Context, deal *sonm.Deal) error {
startTime := deal.GetStartTime().Unix()
billTime := deal.GetLastBillTS().Unix()
if billTime.Before(startTime) {
billTime = startTime
}
var billPeriod time.Duration
if deal.IsSpot() {
billPeriod = m.config.SpotBillPeriod
} else {
billPeriod = m.config.RegularBillPeriod
}
if time.Now().Sub(billTime) > billPeriod {
if err := m.eth.Market().Bill(ctx, m.ethkey, deal.GetId().Unwrap()); err != nil {
return err
}
m.log.Infof("billed deal %s", deal.GetId().Unwrap().String())
}
return nil
}
func (m *Salesman) shouldCloseDeal(ctx context.Context, plan *sonm.AskPlan, deal *sonm.Deal) bool {
if deal.GetDuration() != 0 {
endTime := deal.GetStartTime().Unix().Add(time.Second * time.Duration(deal.GetDuration()))
if time.Now().After(endTime) {
return true
}
} else {
if plan.Status == sonm.AskPlan_PENDING_DELETION {
return true
}
if time.Now().After(m.NextMaintenance()) {
return true
}
}
return false
}
func (m *Salesman) maybeCloseDeal(ctx context.Context, plan *sonm.AskPlan, deal *sonm.Deal) error {
if m.shouldCloseDeal(ctx, plan, deal) {
// TODO: we will know about closed deal on next iteration for simplicicty,
// but maybe we can optimize here.
if err := m.eth.Market().CloseDeal(ctx, m.ethkey, deal.GetId().Unwrap(), sonm.BlacklistType_BLACKLIST_NOBODY); err != nil {
return err
}
m.log.Infof("closed deal %s", deal.GetId().Unwrap().String())
}
return nil
}
func (m *Salesman) unregisterOrder(planID string) error {
m.mu.Lock()
defer m.mu.Unlock()
plan, ok := m.askPlans[planID]
if !ok {
return fmt.Errorf("failed to drop order from plan %s: no such plan", planID)
}
orderID := plan.GetOrderID()
if orderID.IsZero() {
return fmt.Errorf("failed to drop order from plan %s: plan has zero order", planID)
}
idStr := orderID.Unwrap().String()
delete(m.orders, idStr)
plan.OrderID = nil
m.log.Infof("unregistered order %s", idStr)
return nil
}
func (m *Salesman) registerOrder(planID string, order *sonm.Order) error {
if order.GetId().IsZero() {
return fmt.Errorf("failed to register order: zero order id")
}
orderIDStr := order.GetId().Unwrap().String()
m.mu.Lock()
defer m.mu.Unlock()
plan, ok := m.askPlans[planID]
if !ok {
return fmt.Errorf("could not assign order %s to plan %s: no such plan", orderIDStr, planID)
}
if plan.GetOrderID().Cmp(order.GetId()) == 0 {
return nil
}
if !plan.GetOrderID().IsZero() {
return fmt.Errorf("attempted to register order %s for plan %s with deal %s",
orderIDStr, planID, plan.GetOrderID().Unwrap().String())
}
plan.OrderID = order.GetId()
if err := m.askPlanStorage.Save(m.askPlans); err != nil {
return err
}
m.orders[orderIDStr] = order
m.log.Infof("assigned order %s to plan %s", orderIDStr, planID)
return nil
}
func (m *Salesman) unregisterDeal(planID string, deal *sonm.Deal) error {
m.mu.Lock()
defer m.mu.Unlock()
plan, ok := m.askPlans[planID]
if !ok {
return fmt.Errorf("could not drop deal from plan %s: no such plan", planID)
}
dealID := plan.DealID
if dealID.IsZero() {
return nil
}
m.dealsCh <- deal
delete(m.deals, dealID.Unwrap().String())
plan.DealID = nil
if err := m.askPlanStorage.Save(m.askPlans); err != nil {
return err
}
m.log.Infof("dropped deal %s from plan %s", dealID.Unwrap().String(), planID)
return nil
}
func (m *Salesman) registerDeal(planID string, deal *sonm.Deal) error {
if deal.GetId().IsZero() {
return fmt.Errorf("failed to register deal: zero deal id")
}
dealIDStr := deal.GetId().Unwrap().String()
m.mu.Lock()
defer m.mu.Unlock()
plan, ok := m.askPlans[planID]
if !ok {
return fmt.Errorf("could not assign deal %s to plan %s: no such plan", dealIDStr, planID)
}
if plan.DealID.Cmp(deal.GetId()) != 0 && !plan.DealID.IsZero() {
return fmt.Errorf("attempted to register deal %s for plan %s with deal %s",
dealIDStr, planID, plan.DealID.Unwrap().String())
}
m.dealsCh <- deal
m.deals[dealIDStr] = deal
plan.DealID = deal.GetId()
if err := m.askPlanStorage.Save(m.askPlans); err != nil {
return err
}
m.log.Infof("assigned deal %s to plan %s", dealIDStr, planID)
return nil
}
func (m *Salesman) checkOrder(ctx context.Context, plan *sonm.AskPlan) error {
//TODO: validate deal that it is ours
m.log.Debugf("checking order %s for ask plan %s", plan.GetOrderID().Unwrap().String(), plan.ID)
order, err := m.eth.Market().GetOrderInfo(ctx, plan.GetOrderID().Unwrap())
if err != nil {
return fmt.Errorf("could not get order info for order %s: %s", plan.GetOrderID().Unwrap().String(), err)
}
if err := m.registerOrder(plan.GetID(), order); err != nil {
return fmt.Errorf("could not register order %s: %s", plan.GetOrderID().Unwrap().String(), err)
}
if !order.DealID.IsZero() {
plan.DealID = order.DealID
return m.loadCheckDeal(ctx, plan)
} else if order.OrderStatus != sonm.OrderStatus_ORDER_ACTIVE {
return m.unregisterOrder(plan.ID)
} else {
maintenanceTime := m.NextMaintenance()
orderEndTime := time.Now().Add(time.Second * time.Duration(order.Duration))
if orderEndTime.After(maintenanceTime) {
if err := m.eth.Market().CancelOrder(ctx, m.ethkey, plan.GetOrderID().Unwrap()); err != nil {
return fmt.Errorf("could not cancel order for maintenance - %s", err)
}
return m.unregisterOrder(plan.ID)
}
}
return nil
}
func (m *Salesman) placeOrder(ctx context.Context, plan *sonm.AskPlan) (*sonm.Order, error) {
benchmarks, err := m.hardware.ResourcesToBenchmarks(plan.GetResources())
if err != nil {
return nil, fmt.Errorf("could not get benchmarks for ask plan %s: %s", plan.ID, err)
}
maintenanceTime := m.NextMaintenance()
// we add some "gap" here to be ready for maintenance slightly before it occurs
clearTime := maintenanceTime.Add(-maintenanceGap)
now := time.Now()
if now.After(clearTime) {
return nil, fmt.Errorf("faiiled to place order: maintenance is scheduled at %s", maintenanceTime.String())
}
duration := plan.GetDuration().Unwrap()
if duration != 0 && now.Add(duration).After(clearTime) {
duration = clearTime.Sub(now)
//rare case but still possible
if uint64(duration) == 0 {
return nil, fmt.Errorf("faiiled to place order: maintenance is scheduled at %s", maintenanceTime.String())
}
m.log.Infof("reducing order duration from %d to %d due to maintenance at %s",
uint64(plan.GetDuration().Unwrap().Seconds()), uint64(duration.Seconds()), clearTime.String())
}
net := plan.GetResources().GetNetwork()
order := &sonm.Order{
OrderType: sonm.OrderType_ASK,
OrderStatus: sonm.OrderStatus_ORDER_ACTIVE,
AuthorID: sonm.NewEthAddress(crypto.PubkeyToAddress(m.ethkey.PublicKey)),
CounterpartyID: plan.GetCounterparty(),
Duration: uint64(duration.Seconds()),
Price: plan.GetPrice().GetPerSecond(),
//TODO:refactor NetFlags in separqate PR
Netflags: net.GetNetFlags(),
IdentityLevel: plan.GetIdentity(),
Blacklist: plan.GetBlacklist().Unwrap().Hex(),
Tag: plan.GetTag(),
Benchmarks: benchmarks,
}
order, err = m.eth.Market().PlaceOrder(ctx, m.ethkey, order)
if err != nil {
return nil, fmt.Errorf("could not place order on bc market: %s", err)
}
if err := m.registerOrder(plan.ID, order); err != nil {
return nil, err
}
m.log.Infof("placed order %s on blockchain", plan.OrderID.Unwrap().String())
return order, nil
}
func (m *Salesman) waitForDeal(ctx context.Context, order *sonm.Order) error {
m.log.Infof("waiting for deal for %s", order.GetId().Unwrap().String())
ticker := util.NewImmediateTicker(m.config.MatcherRetryInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
//TODO: we also need to do it on worker start
deal, err := m.matcher.CreateDealByOrder(ctx, order)
if err != nil {
m.log.Warnf("could not wait for deal on order %s: %s", order.Id.Unwrap().String(), err)
id := order.Id.Unwrap()
order, err := m.eth.Market().GetOrderInfo(ctx, id)
if err != nil {
m.log.Warnf("could not get order info for order %s: %s", id.String(), err)
continue
}
if order.GetOrderStatus() != sonm.OrderStatus_ORDER_ACTIVE {
return nil
}
continue
}
m.log.Infof("created deal %s for order %s", deal.Id.Unwrap().String(), order.Id.Unwrap().String())
return nil
}
}
}
| 1 | 7,198 | Why to introduce this intermediate variable? | sonm-io-core | go |
@@ -43,6 +43,9 @@ class webhdfs(luigi.Config):
description='Port for webhdfs')
user = luigi.Parameter(default='', description='Defaults to $USER envvar',
config_path=dict(section='hdfs', name='user'))
+ client_type = luigi.Parameter(default='insecure',
+ description='Type of client to use. One of insecure, kerberos or token')
+ token = luigi.Parameter(default='', description='Hadoop delegation token, only used when client_type="token"')
class WebHdfsClient(hdfs_abstract_client.HdfsFileSystem): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 VNG Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A luigi file system client that wraps around the hdfs-library (a webhdfs
client)
This is a sensible fast alternative to snakebite. In particular for python3
users, where snakebite is not supported at the time of writing (dec 2015).
Note. This wrapper client is not feature complete yet. As with most software
the authors only implement the features they need. If you need to wrap more of
the file system operations, please do and contribute back.
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import abstract_client as hdfs_abstract_client
import luigi.contrib.target
import logging
import os
import warnings
logger = logging.getLogger('luigi-interface')
class webhdfs(luigi.Config):
port = luigi.IntParameter(default=50070,
description='Port for webhdfs')
user = luigi.Parameter(default='', description='Defaults to $USER envvar',
config_path=dict(section='hdfs', name='user'))
class WebHdfsClient(hdfs_abstract_client.HdfsFileSystem):
"""
A webhdfs that tries to confirm to luigis interface for file existence.
The library is using `this api
<https://hdfscli.readthedocs.io/en/latest/api.html>`__.
"""
def __init__(self, host=None, port=None, user=None):
self.host = host or hdfs_config.hdfs().namenode_host
self.port = port or webhdfs().port
self.user = user or webhdfs().user or os.environ['USER']
@property
def url(self):
# the hdfs package allows it to specify multiple namenodes by passing a string containing
# multiple namenodes separated by ';'
hosts = self.host.split(";")
urls = ['http://' + host + ':' + str(self.port) for host in hosts]
return ";".join(urls)
@property
def client(self):
# A naive benchmark showed that 1000 existence checks took 2.5 secs
# when not recreating the client, and 4.0 secs when recreating it. So
# not urgent to memoize it. Note that it *might* be issues with process
# forking and whatnot (as the one in the snakebite client) if we
# memoize it too trivially.
import hdfs
return hdfs.InsecureClient(url=self.url, user=self.user)
def walk(self, path, depth=1):
return self.client.walk(path, depth=depth)
def exists(self, path):
"""
Returns true if the path exists and false otherwise.
"""
import hdfs
try:
self.client.status(path)
return True
except hdfs.util.HdfsError as e:
if str(e).startswith('File does not exist: '):
return False
else:
raise e
def upload(self, hdfs_path, local_path, overwrite=False):
return self.client.upload(hdfs_path, local_path, overwrite=overwrite)
def download(self, hdfs_path, local_path, overwrite=False, n_threads=-1):
return self.client.download(hdfs_path, local_path, overwrite=overwrite,
n_threads=n_threads)
def remove(self, hdfs_path, recursive=True, skip_trash=False):
assert skip_trash # Yes, you need to explicitly say skip_trash=True
return self.client.delete(hdfs_path, recursive=recursive)
def read(self, hdfs_path, offset=0, length=None, buffer_size=None,
chunk_size=1024, buffer_char=None):
return self.client.read(hdfs_path, offset=offset, length=length,
buffer_size=buffer_size, chunk_size=chunk_size,
buffer_char=buffer_char)
def move(self, path, dest):
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
self.client.rename(path, dest)
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Has no returnvalue (just like WebHDFS)
"""
if not parents or raise_if_exists:
warnings.warn('webhdfs mkdir: parents/raise_if_exists not implemented')
permission = int(oct(mode)[2:]) # Convert from int(decimal) to int(octal)
self.client.makedirs(path, permission=permission)
def chmod(self, path, permissions, recursive=False):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("Webhdfs in luigi doesn't implement chmod")
def chown(self, path, owner, group, recursive=False):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("Webhdfs in luigi doesn't implement chown")
def count(self, path):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("Webhdfs in luigi doesn't implement count")
def copy(self, path, destination):
"""
Raise a NotImplementedError exception.
"""
raise NotImplementedError("Webhdfs in luigi doesn't implement copy")
def put(self, local_path, destination):
"""
Restricted version of upload
"""
self.upload(local_path, destination)
def get(self, path, local_destination):
"""
Restricted version of download
"""
self.download(path, local_destination)
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
assert not recursive
return self.client.list(path, status=False)
def touchz(self, path):
"""
To touchz using the web hdfs "write" cmd.
"""
self.client.write(path, data='', overwrite=False)
| 1 | 18,331 | Maybe use ChoiceParameter? | spotify-luigi | py |
@@ -2,11 +2,12 @@ class ProposalsController < ApplicationController
include TokenAuth
before_filter :authenticate_user!, except: :approve
- before_filter ->{authorize self.proposal}, only: :show
+ before_filter ->{authorize self.proposal}, only: [:show, :cancel, :cancel_form]
before_filter :needs_token_on_get, only: :approve
before_filter :validate_access, only: :approve
helper_method :display_status
add_template_helper ProposalsHelper
+ rescue_from Pundit::NotAuthorizedError, with: :auth_errors
def show
@proposal = self.proposal.decorate | 1 | class ProposalsController < ApplicationController
include TokenAuth
before_filter :authenticate_user!, except: :approve
before_filter ->{authorize self.proposal}, only: :show
before_filter :needs_token_on_get, only: :approve
before_filter :validate_access, only: :approve
helper_method :display_status
add_template_helper ProposalsHelper
def show
@proposal = self.proposal.decorate
@show_comments = true
@include_comments_files = true
end
def index
@proposals = self.chronological_proposals
@CLOSED_PROPOSAL_LIMIT = 10
end
def archive
@proposals = self.chronological_proposals.closed
end
def approve
approval = self.proposal.approval_for(current_user)
if approval.user.delegates_to?(current_user)
# assign them to the approval
approval.update_attributes!(user: current_user)
end
approval.approve!
flash[:success] = "You have approved #{proposal.public_identifier}."
redirect_to proposal
end
# @todo - this is acting more like an index; rename existing #index to #mine
# or similar, then rename #query to #index
def query
@proposals = self.proposals
@start_date = self.param_date(:start_date)
@end_date = self.param_date(:end_date)
@text = params[:text]
if @start_date
@proposals = @proposals.where('created_at >= ?', @start_date)
end
if @end_date
@proposals = @proposals.where('created_at < ?', @end_date)
end
if @text
@proposals = ProposalSearch.new(@proposals).execute(@text)
else
@proposals = @proposals.order('created_at DESC')
end
# TODO limit/paginate results
end
protected
def proposal
@cached_proposal ||= Proposal.find params[:id]
end
def proposals
policy_scope(Proposal)
end
def chronological_proposals
self.proposals.order('created_at DESC')
end
end
| 1 | 13,387 | Not positive is this is needed, given the `TokenAuth` include | 18F-C2 | rb |
@@ -50,3 +50,13 @@ class ErrorDescriptionDict(TypedDict):
CheckerStats = Dict[
str, Union[int, "Counter[str]", List, Dict[str, Union[int, str, Dict[str, int]]]]
]
+
+
+# Tuple with information about the location of a to-be-displayed message
+class MessageLocationTuple(NamedTuple):
+ abspath: str
+ path: str
+ module: str
+ obj: str
+ line: int
+ column: int | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""A collection of typing utilities."""
import sys
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Union
if TYPE_CHECKING:
from typing import Counter # typing.Counter added in Python 3.6.1
if sys.version_info >= (3, 8):
from typing import Literal, TypedDict
else:
from typing_extensions import Literal, TypedDict
class FileItem(NamedTuple):
"""Represents data about a file handled by pylint
Each file item has:
- name: full name of the module
- filepath: path of the file
- modname: module name
"""
name: str
filepath: str
modpath: str
class ModuleDescriptionDict(TypedDict):
"""Represents data about a checked module"""
path: str
name: str
isarg: bool
basepath: str
basename: str
class ErrorDescriptionDict(TypedDict):
"""Represents data about errors collected during checking of a module"""
key: Literal["fatal"]
mod: str
ex: Union[ImportError, SyntaxError]
# The base type of the "stats" attribute of a checker
CheckerStats = Dict[
str, Union[int, "Counter[str]", List, Dict[str, Union[int, str, Dict[str, int]]]]
]
| 1 | 16,074 | Why not use a docstring for it? | PyCQA-pylint | py |
@@ -3672,4 +3672,17 @@ public class BesuCommandTest extends CommandTestAbstract {
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option", "--Xws-timeout-seconds", "abc", "is not a long");
}
+
+ @Test
+ public void assertThatDuplicatePortSpecifiedFails() {
+ parseCommand("--p2p-port=9", "--rpc-http-port=10", "--rpc-ws-port=10");
+ assertThat(commandErrorOutput.toString())
+ .contains("Port number '10' has been specified multiple times.");
+ }
+
+ @Test
+ public void assertThatDuplicatePortZeroSucceeds() {
+ parseCommand("--p2p-port=0", "--rpc-http-port=0", "--rpc-ws-port=0");
+ assertThat(commandErrorOutput.toString()).isEmpty();
+ }
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.cli.config.NetworkName.CLASSIC;
import static org.hyperledger.besu.cli.config.NetworkName.DEV;
import static org.hyperledger.besu.cli.config.NetworkName.GOERLI;
import static org.hyperledger.besu.cli.config.NetworkName.KOTTI;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.config.NetworkName.MORDOR;
import static org.hyperledger.besu.cli.config.NetworkName.RINKEBY;
import static org.hyperledger.besu.cli.config.NetworkName.ROPSTEN;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.ETH;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.NET;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.PERM;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.WEB3;
import static org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration.MAINNET_BOOTSTRAP_NODES;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.isNotNull;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoInteractions;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.handlers.TimeoutOptions;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URI;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.collect.Lists;
import com.google.common.io.Resources;
import io.vertx.core.json.JsonObject;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.logging.log4j.Level;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.toml.Toml;
import org.apache.tuweni.toml.TomlParseResult;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import picocli.CommandLine;
public class BesuCommandTest extends CommandTestAbstract {
private static final String ENCLAVE_URI = "http://1.2.3.4:5555";
private static final String ENCLAVE_PUBLIC_KEY = "A1aVtMxLCUHmBVHXoZzzBgPbW/wj5axDpW9X8l91SGo=";
private static final String VALID_NODE_ID =
"6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0";
private static final String PERMISSIONING_CONFIG_TOML = "/permissioning_config.toml";
private static final JsonRpcConfiguration DEFAULT_JSON_RPC_CONFIGURATION;
private static final GraphQLConfiguration DEFAULT_GRAPH_QL_CONFIGURATION;
private static final WebSocketConfiguration DEFAULT_WEB_SOCKET_CONFIGURATION;
private static final MetricsConfiguration DEFAULT_METRICS_CONFIGURATION;
private static final int GENESIS_CONFIG_TEST_CHAINID = 3141592;
private static final JsonObject GENESIS_VALID_JSON =
(new JsonObject())
.put("config", (new JsonObject()).put("chainId", GENESIS_CONFIG_TEST_CHAINID));
private static final JsonObject GENESIS_INVALID_DATA =
(new JsonObject()).put("config", new JsonObject());
private static final String ENCLAVE_PUBLIC_KEY_PATH =
BesuCommand.class.getResource("/orion_publickey.pub").getPath();
private final String[] validENodeStrings = {
"enode://" + VALID_NODE_ID + "@192.168.0.1:4567",
"enode://" + VALID_NODE_ID + "@192.168.0.2:4567",
"enode://" + VALID_NODE_ID + "@192.168.0.3:4567"
};
static {
DEFAULT_JSON_RPC_CONFIGURATION = JsonRpcConfiguration.createDefault();
DEFAULT_GRAPH_QL_CONFIGURATION = GraphQLConfiguration.createDefault();
DEFAULT_WEB_SOCKET_CONFIGURATION = WebSocketConfiguration.createDefault();
DEFAULT_METRICS_CONFIGURATION = MetricsConfiguration.builder().build();
}
@Test
public void callingHelpSubCommandMustDisplayUsage() {
parseCommand("--help");
final String expectedOutputStart = String.format("Usage:%n%nbesu [OPTIONS] [COMMAND]");
assertThat(commandOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingHelpDisplaysDefaultRpcApisCorrectly() {
parseCommand("--help");
assertThat(commandOutput.toString()).contains("default: [ETH, NET, WEB3]");
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingVersionDisplayBesuInfoVersion() {
parseCommand("--version");
assertThat(commandOutput.toString()).isEqualToIgnoringWhitespace(BesuInfo.version());
assertThat(commandErrorOutput.toString()).isEmpty();
}
// Testing default values
@Test
public void callingBesuCommandWithoutOptionsMustSyncWithDefaultValues() throws Exception {
parseCommand();
final ArgumentCaptor<EthNetworkConfig> ethNetworkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockRunnerBuilder).discovery(eq(true));
verify(mockRunnerBuilder)
.ethNetworkConfig(
new EthNetworkConfig(
EthNetworkConfig.jsonConfig(MAINNET),
EthNetworkConfig.MAINNET_NETWORK_ID,
MAINNET_BOOTSTRAP_NODES));
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("127.0.0.1"));
verify(mockRunnerBuilder).p2pListenPort(eq(30303));
verify(mockRunnerBuilder).maxPeers(eq(25));
verify(mockRunnerBuilder).fractionRemoteConnectionsAllowed(eq(0.6f));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(DEFAULT_JSON_RPC_CONFIGURATION));
verify(mockRunnerBuilder).graphQLConfiguration(eq(DEFAULT_GRAPH_QL_CONFIGURATION));
verify(mockRunnerBuilder).webSocketConfiguration(eq(DEFAULT_WEB_SOCKET_CONFIGURATION));
verify(mockRunnerBuilder).metricsConfiguration(eq(DEFAULT_METRICS_CONFIGURATION));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkArg.capture());
verify(mockRunnerBuilder).autoLogBloomCaching(eq(true));
verify(mockRunnerBuilder).build();
verify(mockControllerBuilderFactory).fromEthNetworkConfig(ethNetworkArg.capture(), any());
final ArgumentCaptor<MiningParameters> miningArg =
ArgumentCaptor.forClass(MiningParameters.class);
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
verify(mockControllerBuilder).dataDirectory(isNotNull());
verify(mockControllerBuilder).miningParameters(miningArg.capture());
verify(mockControllerBuilder).nodeKey(isNotNull());
verify(mockControllerBuilder).storageProvider(storageProviderArgumentCaptor.capture());
verify(mockControllerBuilder).targetGasLimit(eq(Optional.empty()));
verify(mockControllerBuilder).build();
assertThat(storageProviderArgumentCaptor.getValue()).isNotNull();
assertThat(syncConfigurationCaptor.getValue().getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(miningArg.getValue().getCoinbase()).isEqualTo(Optional.empty());
assertThat(miningArg.getValue().getMinTransactionGasPrice()).isEqualTo(Wei.of(1000));
assertThat(miningArg.getValue().getExtraData()).isEqualTo(Bytes.EMPTY);
assertThat(ethNetworkArg.getValue().getNetworkId()).isEqualTo(1);
assertThat(ethNetworkArg.getValue().getBootNodes()).isEqualTo(MAINNET_BOOTSTRAP_NODES);
}
// Testing each option
@Test
public void callingWithConfigOptionButNoConfigFileShouldDisplayHelp() {
parseCommand("--config-file");
final String expectedOutputStart =
"Missing required parameter for option '--config-file' (<FILE>)";
assertThat(commandErrorOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void callingWithConfigOptionButNonExistingFileShouldDisplayHelp() throws IOException {
final Path tempConfigFilePath = createTempFile("an-invalid-file-name-without-extension", "");
parseCommand("--config-file", tempConfigFilePath.toString());
final String expectedOutputStart =
"Unable to read TOML configuration file " + tempConfigFilePath;
assertThat(commandErrorOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void callingWithConfigOptionButTomlFileNotFoundShouldDisplayHelp() {
parseCommand("--config-file", "./an-invalid-file-name-sdsd87sjhqoi34io23.toml");
final String expectedOutputStart = "Unable to read TOML configuration, file not found.";
assertThat(commandErrorOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void callingWithConfigOptionButInvalidContentTomlFileShouldDisplayHelp() throws Exception {
// We write a config file to prevent an invalid file in resource folder to raise errors in
// code checks (CI + IDE)
final Path tempConfigFile = createTempFile("invalid_config.toml", ".");
parseCommand("--config-file", tempConfigFile.toString());
final String expectedOutputStart =
"Invalid TOML configuration: Unexpected '.', expected a-z, A-Z, 0-9, ', \", a table key, "
+ "a newline, or end-of-input (line 1, column 1)";
assertThat(commandErrorOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void callingWithNoBootnodesConfig() throws Exception {
final URL configFile = this.getClass().getResource("/no_bootnodes.toml");
final Path toml = createTempFile("toml", Resources.toString(configFile, UTF_8));
parseCommand("--config-file", toml.toAbsolutePath().toString());
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void callingWithConfigOptionButInvalidValueTomlFileShouldDisplayHelp() throws Exception {
// We write a config file to prevent an invalid file in resource folder to raise errors in
// code checks (CI + IDE)
final Path tempConfigFile = createTempFile("invalid_config.toml", "tester===========.......");
parseCommand("--config-file", tempConfigFile.toString());
final String expectedOutputStart =
"Invalid TOML configuration: Unexpected '=', expected ', \", ''', \"\"\", a number, "
+ "a boolean, a date/time, an array, or a table (line 1, column 8)";
assertThat(commandErrorOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void overrideDefaultValuesIfKeyIsPresentInConfigFile() throws IOException {
final URL configFile = this.getClass().getResource("/complete_config.toml");
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
final File dataFolder = temp.newFolder();
final String updatedConfig =
Resources.toString(configFile, UTF_8)
.replace("/opt/besu/genesis.json", escapeTomlString(genesisFile.toString()))
.replace(
"data-path=\"/opt/besu\"",
"data-path=\"" + escapeTomlString(dataFolder.getPath()) + "\"");
final Path toml = createTempFile("toml", updatedConfig.getBytes(UTF_8));
final List<RpcApi> expectedApis = asList(ETH, WEB3);
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(false);
jsonRpcConfiguration.setHost("5.6.7.8");
jsonRpcConfiguration.setPort(5678);
jsonRpcConfiguration.setCorsAllowedDomains(Collections.emptyList());
jsonRpcConfiguration.setRpcApis(expectedApis);
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(false);
graphQLConfiguration.setHost("6.7.8.9");
graphQLConfiguration.setPort(6789);
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(false);
webSocketConfiguration.setHost("9.10.11.12");
webSocketConfiguration.setPort(9101);
webSocketConfiguration.setRpcApis(expectedApis);
final MetricsConfiguration metricsConfiguration =
MetricsConfiguration.builder().enabled(false).host("8.6.7.5").port(309).build();
parseCommand("--config-file", toml.toString());
verify(mockRunnerBuilder).discovery(eq(false));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("1.2.3.4"));
verify(mockRunnerBuilder).p2pListenPort(eq(1234));
verify(mockRunnerBuilder).maxPeers(eq(42));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
final List<EnodeURL> nodes =
asList(
EnodeURL.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURL.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"),
EnodeURL.fromString("enode://" + VALID_NODE_ID + "@192.168.0.1:4567"));
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes()).isEqualTo(nodes);
final EthNetworkConfig networkConfig =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(MAINNET))
.setNetworkId(BigInteger.valueOf(42))
.setGenesisConfig(encodeJsonGenesis(GENESIS_VALID_JSON))
.setBootNodes(nodes)
.build();
verify(mockControllerBuilder).dataDirectory(eq(dataFolder.toPath()));
verify(mockControllerBuilderFactory).fromEthNetworkConfig(eq(networkConfig), any());
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
assertThat(syncConfigurationCaptor.getValue().getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfigurationCaptor.getValue().getFastSyncMinimumPeerCount()).isEqualTo(13);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void nodePermissionsSmartContractWithoutOptionMustError() {
parseCommand("--permissions-nodes-contract-address");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.startsWith("Missing required parameter for option '--permissions-nodes-contract-address'");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissionsEnabledWithoutContractAddressMustError() {
parseCommand("--permissions-nodes-contract-enabled");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.contains("No node permissioning contract address specified");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissionsEnabledWithInvalidContractAddressMustError() {
parseCommand(
"--permissions-nodes-contract-enabled",
"--permissions-nodes-contract-address",
"invalid-smart-contract-address");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Invalid value");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissionsEnabledWithTooShortContractAddressMustError() {
parseCommand(
"--permissions-nodes-contract-enabled", "--permissions-nodes-contract-address", "0x1234");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Invalid value");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissionsSmartContractMustUseOption() {
final String smartContractAddress = "0x0000000000000000000000000000000000001234";
parseCommand(
"--permissions-nodes-contract-enabled",
"--permissions-nodes-contract-address",
smartContractAddress);
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
new SmartContractPermissioningConfiguration();
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
Address.fromHexString(smartContractAddress));
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(true);
verify(mockRunnerBuilder)
.permissioningConfiguration(permissioningConfigurationArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
final PermissioningConfiguration config = permissioningConfigurationArgumentCaptor.getValue();
assertThat(config.getSmartContractConfig().get())
.isEqualToComparingFieldByField(smartContractPermissioningConfiguration);
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissionsSmartContractWithoutOptionMustError() {
parseCommand("--permissions-accounts-contract-address");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.startsWith(
"Missing required parameter for option '--permissions-accounts-contract-address'");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissionsEnabledWithoutContractAddressMustError() {
parseCommand("--permissions-accounts-contract-enabled");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.contains("No account permissioning contract address specified");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissionsEnabledWithInvalidContractAddressMustError() {
parseCommand(
"--permissions-accounts-contract-enabled",
"--permissions-accounts-contract-address",
"invalid-smart-contract-address");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Invalid value");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissionsEnabledWithTooShortContractAddressMustError() {
parseCommand(
"--permissions-accounts-contract-enabled",
"--permissions-accounts-contract-address",
"0x1234");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Invalid value");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissionsSmartContractMustUseOption() {
final String smartContractAddress = "0x0000000000000000000000000000000000001234";
parseCommand(
"--permissions-accounts-contract-enabled",
"--permissions-accounts-contract-address",
smartContractAddress);
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
new SmartContractPermissioningConfiguration();
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
Address.fromHexString(smartContractAddress));
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(true);
verify(mockRunnerBuilder)
.permissioningConfiguration(permissioningConfigurationArgumentCaptor.capture());
final PermissioningConfiguration permissioningConfiguration =
permissioningConfigurationArgumentCaptor.getValue();
assertThat(permissioningConfiguration.getSmartContractConfig()).isPresent();
final SmartContractPermissioningConfiguration effectiveSmartContractConfig =
permissioningConfiguration.getSmartContractConfig().get();
assertThat(effectiveSmartContractConfig.isSmartContractAccountAllowlistEnabled()).isTrue();
assertThat(effectiveSmartContractConfig.getAccountSmartContractAddress())
.isEqualTo(Address.fromHexString(smartContractAddress));
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissioningTomlPathWithoutOptionMustDisplayUsage() {
parseCommand("--permissions-nodes-config-file");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.startsWith("Missing required parameter for option '--permissions-nodes-config-file'");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissioningTomlPathWithoutOptionMustDisplayUsage() {
parseCommand("--permissions-accounts-config-file");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString())
.startsWith("Missing required parameter for option '--permissions-accounts-config-file'");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissioningEnabledWithNonexistentConfigFileMustError() {
parseCommand(
"--permissions-nodes-config-file-enabled",
"--permissions-nodes-config-file",
"file-does-not-exist");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Configuration file does not exist");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissioningEnabledWithNonexistentConfigFileMustError() {
parseCommand(
"--permissions-accounts-config-file-enabled",
"--permissions-accounts-config-file",
"file-does-not-exist");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandErrorOutput.toString()).contains("Configuration file does not exist");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissioningTomlFileWithNoPermissionsEnabledMustNotError() throws IOException {
final URL configFile = this.getClass().getResource(PERMISSIONING_CONFIG_TOML);
final Path permToml = createTempFile("toml", Resources.toByteArray(configFile));
parseCommand("--permissions-nodes-config-file", permToml.toString());
verify(mockRunnerBuilder).build();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissioningTomlFileWithNoPermissionsEnabledMustNotError()
throws IOException {
final URL configFile = this.getClass().getResource(PERMISSIONING_CONFIG_TOML);
final Path permToml = createTempFile("toml", Resources.toByteArray(configFile));
parseCommand("--permissions-accounts-config-file", permToml.toString());
verify(mockRunnerBuilder).build();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void defaultPermissionsTomlFileWithNoPermissionsEnabledMustNotError() {
parseCommand("--p2p-enabled", "false");
verify(mockRunnerBuilder).build();
assertThat(commandErrorOutput.toString()).doesNotContain("no permissions enabled");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void nodePermissioningTomlPathMustUseOption() throws IOException {
final List<EnodeURL> allowedNodes =
Lists.newArrayList(
EnodeURL.fromString(
"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@192.168.0.9:4567"),
EnodeURL.fromString(
"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@192.169.0.9:4568"));
final URL configFile = this.getClass().getResource(PERMISSIONING_CONFIG_TOML);
final Path permToml = createTempFile("toml", Resources.toByteArray(configFile));
final String allowedNodesString =
allowedNodes.stream().map(Object::toString).collect(Collectors.joining(","));
parseCommand(
"--permissions-nodes-config-file-enabled",
"--permissions-nodes-config-file",
permToml.toString(),
"--bootnodes",
allowedNodesString);
final LocalPermissioningConfiguration localPermissioningConfiguration =
LocalPermissioningConfiguration.createDefault();
localPermissioningConfiguration.setNodePermissioningConfigFilePath(permToml.toString());
localPermissioningConfiguration.setNodeAllowlist(allowedNodes);
verify(mockRunnerBuilder)
.permissioningConfiguration(permissioningConfigurationArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
final PermissioningConfiguration config = permissioningConfigurationArgumentCaptor.getValue();
assertThat(config.getLocalConfig().get())
.isEqualToComparingFieldByField(localPermissioningConfiguration);
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void accountPermissioningTomlPathMustUseOption() throws IOException {
final URL configFile = this.getClass().getResource(PERMISSIONING_CONFIG_TOML);
final Path permToml = createTempFile("toml", Resources.toByteArray(configFile));
parseCommand(
"--permissions-accounts-config-file-enabled",
"--permissions-accounts-config-file",
permToml.toString());
final LocalPermissioningConfiguration localPermissioningConfiguration =
LocalPermissioningConfiguration.createDefault();
localPermissioningConfiguration.setAccountPermissioningConfigFilePath(permToml.toString());
localPermissioningConfiguration.setAccountAllowlist(
Collections.singletonList("0x0000000000000000000000000000000000000009"));
verify(mockRunnerBuilder)
.permissioningConfiguration(permissioningConfigurationArgumentCaptor.capture());
final PermissioningConfiguration permissioningConfiguration =
permissioningConfigurationArgumentCaptor.getValue();
assertThat(permissioningConfiguration.getLocalConfig()).isPresent();
final LocalPermissioningConfiguration effectiveLocalPermissioningConfig =
permissioningConfiguration.getLocalConfig().get();
assertThat(effectiveLocalPermissioningConfig.isAccountAllowlistEnabled()).isTrue();
assertThat(effectiveLocalPermissioningConfig.getAccountPermissioningConfigFilePath())
.isEqualTo(permToml.toString());
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void tomlThatConfiguresEverythingExceptPermissioningToml() throws IOException {
// Load a TOML that configures literally everything (except permissioning TOML config)
final URL configFile = this.getClass().getResource("/everything_config.toml");
final Path toml = createTempFile("toml", Resources.toByteArray(configFile));
// Parse it.
final CommandLine.Model.CommandSpec spec = parseCommand("--config-file", toml.toString()).spec;
final TomlParseResult tomlResult = Toml.parse(toml);
// Verify we configured everything
final HashSet<CommandLine.Model.OptionSpec> options = new HashSet<>(spec.options());
// Except for meta-options
options.remove(spec.optionsMap().get("--config-file"));
options.remove(spec.optionsMap().get("--help"));
options.remove(spec.optionsMap().get("--version"));
for (final String tomlKey : tomlResult.keySet()) {
final CommandLine.Model.OptionSpec optionSpec = spec.optionsMap().get("--" + tomlKey);
assertThat(optionSpec)
.describedAs("Option '%s' should be a configurable option.", tomlKey)
.isNotNull();
// Verify TOML stores it by the appropriate type
if (optionSpec.type().equals(Boolean.class)) {
tomlResult.getBoolean(tomlKey);
} else if (optionSpec.isMultiValue() || optionSpec.arity().max > 1) {
tomlResult.getArray(tomlKey);
} else if (optionSpec.type().equals(Double.class)) {
tomlResult.getDouble(tomlKey);
} else if (Number.class.isAssignableFrom(optionSpec.type())) {
tomlResult.getLong(tomlKey);
} else if (Wei.class.isAssignableFrom(optionSpec.type())) {
tomlResult.getLong(tomlKey);
} else if (Fraction.class.isAssignableFrom(optionSpec.type())) {
tomlResult.getDouble(tomlKey);
} else if (Percentage.class.isAssignableFrom(optionSpec.type())) {
tomlResult.getLong(tomlKey);
} else {
tomlResult.getString(tomlKey);
}
options.remove(optionSpec);
}
assertThat(
options.stream()
.filter(optionSpec -> !optionSpec.hidden())
.map(CommandLine.Model.OptionSpec::longestName))
.isEmpty();
}
@Test
public void noOverrideDefaultValuesIfKeyIsNotPresentInConfigFile() throws IOException {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
parseCommand("--config-file", configFile);
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
final MetricsConfiguration metricsConfiguration = MetricsConfiguration.builder().build();
verify(mockRunnerBuilder).discovery(eq(true));
verify(mockRunnerBuilder)
.ethNetworkConfig(
new EthNetworkConfig(
EthNetworkConfig.jsonConfig(MAINNET),
EthNetworkConfig.MAINNET_NETWORK_ID,
MAINNET_BOOTSTRAP_NODES));
verify(mockRunnerBuilder).p2pAdvertisedHost(eq("127.0.0.1"));
verify(mockRunnerBuilder).p2pListenPort(eq(30303));
verify(mockRunnerBuilder).maxPeers(eq(25));
verify(mockRunnerBuilder).limitRemoteWireConnectionsEnabled(eq(true));
verify(mockRunnerBuilder).fractionRemoteConnectionsAllowed(eq(0.6f));
verify(mockRunnerBuilder).jsonRpcConfiguration(eq(jsonRpcConfiguration));
verify(mockRunnerBuilder).graphQLConfiguration(eq(graphQLConfiguration));
verify(mockRunnerBuilder).webSocketConfiguration(eq(webSocketConfiguration));
verify(mockRunnerBuilder).metricsConfiguration(eq(metricsConfiguration));
verify(mockRunnerBuilder).build();
verify(mockControllerBuilder).build();
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfig.getFastSyncMinimumPeerCount()).isEqualTo(5);
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void envVariableOverridesValueFromConfigFile() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000004";
setEnvironmentVariable("BESU_MINER_COINBASE", expectedCoinbase);
parseCommand("--config-file", configFile);
verify(mockControllerBuilder)
.miningParameters(
new MiningParameters(
Address.fromHexString(expectedCoinbase),
DefaultCommandValues.DEFAULT_MIN_TRANSACTION_GAS_PRICE,
DefaultCommandValues.DEFAULT_EXTRA_DATA,
false));
}
@Test
public void cliOptionOverridesEnvVariableAndConfig() {
final String configFile = this.getClass().getResource("/partial_config.toml").getFile();
final String expectedCoinbase = "0x0000000000000000000000000000000000000006";
setEnvironmentVariable("BESU_MINER_COINBASE", "0x0000000000000000000000000000000000000004");
parseCommand("--config-file", configFile, "--miner-coinbase", expectedCoinbase);
verify(mockControllerBuilder)
.miningParameters(
new MiningParameters(
Address.fromHexString(expectedCoinbase),
DefaultCommandValues.DEFAULT_MIN_TRANSACTION_GAS_PRICE,
DefaultCommandValues.DEFAULT_EXTRA_DATA,
false));
}
@Test
public void nodekeyOptionMustBeUsed() throws Exception {
final File file = new File("./specific/enclavePrivateKey");
parseCommand("--node-private-key-file", file.getPath());
verify(mockControllerBuilder).dataDirectory(isNotNull());
verify(mockControllerBuilder).nodeKey(isNotNull());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void dataDirOptionMustBeUsed() throws Exception {
final Path path = Paths.get(".");
parseCommand("--data-path", path.toString());
verify(mockControllerBuilder).dataDirectory(pathArgumentCaptor.capture());
verify(mockControllerBuilder).nodeKey(isNotNull());
verify(mockControllerBuilder).build();
assertThat(pathArgumentCaptor.getValue()).isEqualByComparingTo(path.toAbsolutePath());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void genesisPathOptionMustBeUsed() throws Exception {
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
parseCommand("--genesis-file", genesisFile.toString());
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue().getGenesisConfig())
.isEqualTo(encodeJsonGenesis(GENESIS_VALID_JSON));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void genesisAndNetworkMustNotBeUsedTogether() throws Exception {
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
parseCommand("--genesis-file", genesisFile.toString(), "--network", "rinkeby");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.startsWith("--network option and --genesis-file option can't be used at the same time.");
}
@Test
public void defaultNetworkIdAndBootnodesForCustomNetworkOptions() throws Exception {
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
parseCommand("--genesis-file", genesisFile.toString());
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue().getGenesisConfig())
.isEqualTo(encodeJsonGenesis(GENESIS_VALID_JSON));
assertThat(networkArg.getValue().getBootNodes()).isEmpty();
assertThat(networkArg.getValue().getNetworkId()).isEqualTo(GENESIS_CONFIG_TEST_CHAINID);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void defaultNetworkIdForInvalidGenesisMustBeMainnetNetworkId() throws Exception {
final Path genesisFile = createFakeGenesisFile(GENESIS_INVALID_DATA);
parseCommand("--genesis-file", genesisFile.toString());
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue().getGenesisConfig())
.isEqualTo(encodeJsonGenesis(GENESIS_INVALID_DATA));
// assertThat(networkArg.getValue().getNetworkId())
// .isEqualTo(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void predefinedNetworkIdsMustBeEqualToChainIds() {
// check the network id against the one in mainnet genesis config
// it implies that EthNetworkConfig.mainnet().getNetworkId() returns a value equals to the chain
// id
// in this network genesis file.
final GenesisConfigFile genesisConfigFile =
GenesisConfigFile.fromConfig(EthNetworkConfig.getNetworkConfig(MAINNET).getGenesisConfig());
assertThat(genesisConfigFile.getConfigOptions().getChainId().isPresent()).isTrue();
assertThat(genesisConfigFile.getConfigOptions().getChainId().get())
.isEqualTo(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId());
}
@Test
public void identityValueTrueMustBeUsed() {
parseCommand("--identity", "test");
verify(mockRunnerBuilder.identityString(eq(Optional.of("test")))).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void p2pEnabledOptionValueTrueMustBeUsed() {
parseCommand("--p2p-enabled", "true");
verify(mockRunnerBuilder.p2pEnabled(eq(true))).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void p2pEnabledOptionValueFalseMustBeUsed() {
parseCommand("--p2p-enabled", "false");
verify(mockRunnerBuilder.p2pEnabled(eq(false))).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void p2pOptionsRequiresServiceToBeEnabled() {
final String[] nodes = {
"6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"
};
parseCommand(
"--p2p-enabled",
"false",
"--bootnodes",
String.join(",", validENodeStrings),
"--discovery-enabled",
"false",
"--max-peers",
"42",
"--remote-connections-max-percentage",
"50",
"--banned-node-id",
String.join(",", nodes),
"--banned-node-ids",
String.join(",", nodes));
verifyOptionsConstraintLoggerCall(
"--p2p-enabled",
"--discovery-enabled",
"--bootnodes",
"--max-peers",
"--banned-node-ids",
"--remote-connections-max-percentage");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void discoveryOptionValueTrueMustBeUsed() {
parseCommand("--discovery-enabled", "true");
verify(mockRunnerBuilder.discovery(eq(true))).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void discoveryOptionValueFalseMustBeUsed() {
parseCommand("--discovery-enabled", "false");
verify(mockRunnerBuilder.discovery(eq(false))).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingWithBootnodesOptionButNoValueMustPassEmptyBootnodeList() {
parseCommand("--bootnodes");
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingWithValidBootnodeMustSucceed() {
parseCommand(
"--bootnodes",
"enode://d2567893371ea5a6fa6371d483891ed0d129e79a8fc74d6df95a00a6545444cd4a6960bbffe0b4e2edcf35135271de57ee559c0909236bbc2074346ef2b5b47c@127.0.0.1:30304");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingWithInvalidBootnodeMustDisplayError() {
parseCommand("--bootnodes", "invalid_enode_url");
assertThat(commandOutput.toString()).isEmpty();
final String expectedErrorOutputStart =
"Invalid enode URL syntax. Enode URL should have the following format "
+ "'enode://<node_id>@<ip>:<listening_port>[?discport=<discovery_port>]'.";
assertThat(commandErrorOutput.toString()).startsWith(expectedErrorOutputStart);
}
@Test
public void callingWithBootnodeThatHasDiscoveryDisabledMustDisplayError() {
final String validBootnode =
"enode://d2567893371ea5a6fa6371d483891ed0d129e79a8fc74d6df95a00a6545444cd4a6960bbffe0b4e2edcf35135271de57ee559c0909236bbc2074346ef2b5b47c@127.0.0.1:30304";
final String invalidBootnode =
"enode://02567893371ea5a6fa6371d483891ed0d129e79a8fc74d6df95a00a6545444cd4a6960bbffe0b4e2edcf35135271de57ee559c0909236bbc2074346ef2b5b47c@127.0.0.1:30303?discport=0";
final String bootnodesValue = validBootnode + "," + invalidBootnode;
parseCommand("--bootnodes", bootnodesValue);
assertThat(commandOutput.toString()).isEmpty();
final String expectedErrorOutputStart =
"Bootnodes must have discovery enabled. Invalid bootnodes: " + invalidBootnode + ".";
assertThat(commandErrorOutput.toString()).startsWith(expectedErrorOutputStart);
}
// This test ensures non regression on https://pegasys1.atlassian.net/browse/PAN-2387
@Test
public void callingWithInvalidBootnodeAndEqualSignMustDisplayError() {
parseCommand("--bootnodes=invalid_enode_url");
assertThat(commandOutput.toString()).isEmpty();
final String expectedErrorOutputStart =
"Invalid enode URL syntax. Enode URL should have the following format "
+ "'enode://<node_id>@<ip>:<listening_port>[?discport=<discovery_port>]'.";
assertThat(commandErrorOutput.toString()).startsWith(expectedErrorOutputStart);
}
@Test
public void bootnodesOptionMustBeUsed() {
parseCommand("--bootnodes", String.join(",", validENodeStrings));
verify(mockRunnerBuilder).ethNetworkConfig(ethNetworkConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(ethNetworkConfigArgumentCaptor.getValue().getBootNodes())
.isEqualTo(
Stream.of(validENodeStrings).map(EnodeURL::fromString).collect(Collectors.toList()));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void bannedNodeIdsOptionMustBeUsed() {
final Bytes[] nodes = {
Bytes.fromHexString(
"6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"),
Bytes.fromHexString(
"7f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"),
Bytes.fromHexString(
"0x8f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0")
};
final String nodeIdsArg =
Arrays.stream(nodes).map(Bytes::toShortHexString).collect(Collectors.joining(","));
parseCommand("--banned-node-ids", nodeIdsArg);
verify(mockRunnerBuilder).bannedNodeIds(bytesCollectionCollector.capture());
verify(mockRunnerBuilder).build();
assertThat(bytesCollectionCollector.getValue().toArray()).isEqualTo(nodes);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingWithBannedNodeidsOptionButNoValueMustDisplayError() {
parseCommand("--banned-node-ids");
assertThat(commandOutput.toString()).isEmpty();
final String expectedErrorOutputStart =
"Missing required parameter for option '--banned-node-ids' at index 0 (<NODEID>)";
assertThat(commandErrorOutput.toString()).startsWith(expectedErrorOutputStart);
}
@Test
public void callingWithBannedNodeidsOptionWithInvalidValuesMustDisplayError() {
parseCommand("--banned-node-ids", "0x10,20,30");
assertThat(commandOutput.toString()).isEmpty();
final String expectedErrorOutputStart =
"Invalid ids supplied to '--banned-node-ids'. Expected 64 bytes in 0x10";
assertThat(commandErrorOutput.toString()).startsWith(expectedErrorOutputStart);
}
@Test
public void p2pHostAndPortOptionsAreRespected() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand("--p2p-host", host, "--p2p-port", String.valueOf(port));
verify(mockRunnerBuilder).p2pAdvertisedHost(stringArgumentCaptor.capture());
verify(mockRunnerBuilder).p2pListenPort(intArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(stringArgumentCaptor.getValue()).isEqualTo(host);
assertThat(intArgumentCaptor.getValue()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void p2pInterfaceOptionIsRespected() {
final String ip = "1.2.3.4";
parseCommand("--p2p-interface", ip);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
verify(mockRunnerBuilder).p2pListenInterface(stringArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(stringArgumentCaptor.getValue()).isEqualTo(ip);
}
@Test
public void p2pHostMayBeLocalhost() {
final String host = "localhost";
parseCommand("--p2p-host", host);
verify(mockRunnerBuilder).p2pAdvertisedHost(stringArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(stringArgumentCaptor.getValue()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void p2pHostMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--p2p-host", host);
verify(mockRunnerBuilder).p2pAdvertisedHost(stringArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(stringArgumentCaptor.getValue()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void maxpeersOptionMustBeUsed() {
final int maxPeers = 123;
parseCommand("--max-peers", String.valueOf(maxPeers));
verify(mockRunnerBuilder).maxPeers(intArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(intArgumentCaptor.getValue()).isEqualTo(maxPeers);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void remoteConnectionsPercentageOptionMustBeUsed() {
final int remoteConnectionsPercentage = 12;
parseCommand(
"--remote-connections-limit-enabled",
"--remote-connections-max-percentage",
String.valueOf(remoteConnectionsPercentage));
verify(mockRunnerBuilder).fractionRemoteConnectionsAllowed(floatCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(floatCaptor.getValue())
.isEqualTo(
Fraction.fromPercentage(Percentage.fromInt(remoteConnectionsPercentage)).getValue());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void remoteConnectionsPercentageWithInvalidFormatMustFail() {
parseCommand(
"--remote-connections-limit-enabled", "--remote-connections-max-percentage", "invalid");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Invalid value for option '--remote-connections-max-percentage'",
"should be a number between 0 and 100 inclusive");
}
@Test
public void remoteConnectionsPercentageWithOutOfRangeMustFail() {
parseCommand(
"--remote-connections-limit-enabled", "--remote-connections-max-percentage", "150");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Invalid value for option '--remote-connections-max-percentage'",
"should be a number between 0 and 100 inclusive");
}
@Test
public void syncMode_fast() {
parseCommand("--sync-mode", "FAST");
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void syncMode_full() {
parseCommand("--sync-mode", "FULL");
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FULL);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void syncMode_full_by_default_for_dev() {
parseCommand("--network", "dev");
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FULL);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void helpShouldDisplayFastSyncOptions() {
parseCommand("--help");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).contains("--fast-sync-min-peers");
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void parsesValidFastSyncMinPeersOption() {
parseCommand("--sync-mode", "FAST", "--fast-sync-min-peers", "11");
verify(mockControllerBuilder).synchronizerConfiguration(syncConfigurationCaptor.capture());
final SynchronizerConfiguration syncConfig = syncConfigurationCaptor.getValue();
assertThat(syncConfig.getSyncMode()).isEqualTo(SyncMode.FAST);
assertThat(syncConfig.getFastSyncMinimumPeerCount()).isEqualTo(11);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void parsesInvalidFastSyncMinPeersOptionWrongFormatShouldFail() {
parseCommand("--sync-mode", "FAST", "--fast-sync-min-peers", "ten");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option '--fast-sync-min-peers': 'ten' is not an int");
}
@Test
public void natMethodOptionIsParsedCorrectly() {
parseCommand("--nat-method", "NONE");
verify(mockRunnerBuilder).natMethod(eq(NatMethod.NONE));
parseCommand("--nat-method", "UPNP");
verify(mockRunnerBuilder).natMethod(eq(NatMethod.UPNP));
parseCommand("--nat-method", "AUTO");
verify(mockRunnerBuilder).natMethod(eq(NatMethod.AUTO));
parseCommand("--nat-method", "DOCKER");
verify(mockRunnerBuilder).natMethod(eq(NatMethod.DOCKER));
parseCommand("--nat-method", "KUBERNETES");
verify(mockRunnerBuilder).natMethod(eq(NatMethod.KUBERNETES));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void parsesInvalidNatMethodOptionsShouldFail() {
parseCommand("--nat-method", "invalid");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Invalid value for option '--nat-method': expected one of [UPNP, DOCKER, KUBERNETES, AUTO, NONE] (case-insensitive) but was 'invalid'");
}
@Test
public void ethStatsOptionIsParsedCorrectly() {
final String url = "besu-node:secret@host:443";
parseCommand("--Xethstats", url);
verify(mockRunnerBuilder).ethstatsUrl(url);
}
@Test
public void ethStatsContactOptionIsParsedCorrectly() {
final String contact = "[email protected]";
parseCommand("--Xethstats", "besu-node:secret@host:443", "--Xethstats-contact", contact);
verify(mockRunnerBuilder).ethstatsContact(contact);
}
@Test
public void ethStatsContactOptionCannotBeUsedWithoutEthStatsServerProvided() {
parseCommand("--Xethstats-contact", "besu-updated");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact or provide an url (via --Xethstats=nodename:secret@host:port)");
}
@Test
public void dnsEnabledOptionIsParsedCorrectly() {
TestBesuCommand besuCommand = parseCommand("--Xdns-enabled", "true");
assertThat(besuCommand.getEnodeDnsConfiguration().dnsEnabled()).isTrue();
assertThat(besuCommand.getEnodeDnsConfiguration().updateEnabled()).isFalse();
}
@Test
public void dnsUpdateEnabledOptionIsParsedCorrectly() {
TestBesuCommand besuCommand =
parseCommand("--Xdns-enabled", "true", "--Xdns-update-enabled", "true");
assertThat(besuCommand.getEnodeDnsConfiguration().dnsEnabled()).isTrue();
assertThat(besuCommand.getEnodeDnsConfiguration().updateEnabled()).isTrue();
}
@Test
public void dnsUpdateEnabledOptionCannotBeUsedWithoutDnsEnabled() {
parseCommand("--Xdns-update-enabled", "true");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled or specify dns is enabled (--Xdns-enabled)");
}
@Test
public void helpShouldDisplayNatMethodInfo() {
parseCommand("--help");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).contains("--nat-method");
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natMethodPropertyDefaultIsAuto() {
parseCommand();
verify(mockRunnerBuilder).natMethod(eq(NatMethod.AUTO));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natManagerPodNamePropertyDefaultIsBesu() {
parseCommand();
verify(mockRunnerBuilder).natManagerServiceName(eq(DEFAULT_BESU_SERVICE_NAME_FILTER));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natManagerPodNamePropertyIsCorrectlyUpdated() {
final String podName = "besu-updated";
parseCommand("--Xnat-kube-service-name", podName);
verify(mockRunnerBuilder).natManagerServiceName(eq(podName));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natManagerPodNameCannotBeUsedWithNatDockerMethod() {
parseCommand("--nat-method", "DOCKER", "--Xnat-kube-service-name", "besu-updated");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
@Test
public void natManagerPodNameCannotBeUsedWithNatNoneMethod() {
parseCommand("--nat-method", "NONE", "--Xnat-kube-service-name", "besu-updated");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
@Test
public void natMethodFallbackEnabledPropertyIsCorrectlyUpdatedWithKubernetes() {
parseCommand("--nat-method", "KUBERNETES", "--Xnat-method-fallback-enabled", "false");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(false));
parseCommand("--nat-method", "KUBERNETES", "--Xnat-method-fallback-enabled", "true");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(true));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natMethodFallbackEnabledPropertyIsCorrectlyUpdatedWithDocker() {
parseCommand("--nat-method", "DOCKER", "--Xnat-method-fallback-enabled", "false");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(false));
parseCommand("--nat-method", "DOCKER", "--Xnat-method-fallback-enabled", "true");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(true));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natMethodFallbackEnabledPropertyIsCorrectlyUpdatedWithUpnp() {
parseCommand("--nat-method", "UPNP", "--Xnat-method-fallback-enabled", "false");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(false));
parseCommand("--nat-method", "UPNP", "--Xnat-method-fallback-enabled", "true");
verify(mockRunnerBuilder).natMethodFallbackEnabled(eq(true));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void natMethodFallbackEnabledCannotBeUsedWithAutoMethod() {
parseCommand("--nat-method", "AUTO", "--Xnat-method-fallback-enabled", "false");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled or select another mode (via --nat--method=XXXX)");
}
@Test
public void rpcHttpEnabledPropertyDefaultIsFalse() {
parseCommand();
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().isEnabled()).isFalse();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpEnabledPropertyMustBeUsed() {
parseCommand("--rpc-http-enabled");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void graphQLHttpEnabledPropertyDefaultIsFalse() {
parseCommand();
verify(mockRunnerBuilder).graphQLConfiguration(graphQLConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(graphQLConfigArgumentCaptor.getValue().isEnabled()).isFalse();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void graphQLHttpEnabledPropertyMustBeUsed() {
parseCommand("--graphql-http-enabled");
verify(mockRunnerBuilder).graphQLConfiguration(graphQLConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(graphQLConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcApisPropertyMustBeUsed() {
parseCommand("--rpc-http-api", "ETH,NET,PERM", "--rpc-http-enabled");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
verify(mockLogger)
.warn("Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getRpcApis())
.containsExactlyInAnyOrder(ETH, NET, PERM);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcApisPropertyIgnoresDuplicatesAndMustBeUsed() {
parseCommand("--rpc-http-api", "ETH,NET,NET", "--rpc-http-enabled");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getRpcApis())
.containsExactlyInAnyOrder(ETH, NET);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpOptionsRequiresServiceToBeEnabled() {
parseCommand(
"--rpc-http-api",
"ETH,NET",
"--rpc-http-host",
"0.0.0.0",
"--rpc-http-port",
"1234",
"--rpc-http-cors-origins",
"all");
verifyOptionsConstraintLoggerCall(
"--rpc-http-enabled",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-cors-origins",
"--rpc-http-api");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privacyTlsOptionsRequiresTlsToBeEnabled() {
when(storageService.getByName("rocksdb-privacy"))
.thenReturn(Optional.of(rocksDBSPrivacyStorageFactory));
final URL configFile = this.getClass().getResource("/orion_publickey.pub");
final String coinbaseStr = String.format("%040x", 1);
parseCommand(
"--privacy-enabled",
"--miner-enabled",
"--miner-coinbase=" + coinbaseStr,
"--min-gas-price",
"0",
"--privacy-url",
ENCLAVE_URI,
"--privacy-public-key-file",
configFile.getPath(),
"--privacy-tls-keystore-file",
"/Users/me/key");
verifyOptionsConstraintLoggerCall("--privacy-tls-enabled", "--privacy-tls-keystore-file");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privacyTlsOptionsRequiresPrivacyToBeEnabled() {
parseCommand("--privacy-tls-enabled", "--privacy-tls-keystore-file", "/Users/me/key");
verifyOptionsConstraintLoggerCall("--privacy-enabled", "--privacy-tls-enabled");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void fastSyncOptionsRequiresFastSyncModeToBeSet() {
parseCommand("--fast-sync-min-peers", "5");
verifyOptionsConstraintLoggerCall("--sync-mode", "--fast-sync-min-peers");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcApisPropertyWithInvalidEntryMustDisplayError() {
parseCommand("--rpc-http-api", "BOB");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
// PicoCLI uses longest option name for message when option has multiple names, so here plural.
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option '--rpc-http-apis'");
}
@Test
public void rpcHttpHostAndPortOptionsMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand(
"--rpc-http-enabled", "--rpc-http-host", host, "--rpc-http-port", String.valueOf(port));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostMayBeLocalhost() {
final String host = "localhost";
parseCommand("--rpc-http-enabled", "--rpc-http-host", host);
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--rpc-http-enabled", "--rpc-http-host", host);
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpTlsRequiresRpcHttpEnabled() {
parseCommand("--rpc-http-tls-enabled");
verifyOptionsConstraintLoggerCall("--rpc-http-enabled", "--rpc-http-tls-enabled");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpTlsWithoutKeystoreReportsError() {
parseCommand("--rpc-http-enabled", "--rpc-http-tls-enabled");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
@Test
public void rpcHttpTlsWithoutPasswordfileReportsError() {
parseCommand(
"--rpc-http-enabled",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"/tmp/test.p12");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
@Test
public void rpcHttpTlsKeystoreAndPasswordMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
final String keystoreFile = "/tmp/test.p12";
final String keystorePasswordFile = "/tmp/test.txt";
parseCommand(
"--rpc-http-enabled",
"--rpc-http-host",
host,
"--rpc-http-port",
String.valueOf(port),
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
keystoreFile,
"--rpc-http-tls-keystore-password-file",
keystorePasswordFile);
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
final Optional<TlsConfiguration> tlsConfiguration =
jsonRpcConfigArgumentCaptor.getValue().getTlsConfiguration();
assertThat(tlsConfiguration.isPresent()).isTrue();
assertThat(tlsConfiguration.get().getKeyStorePath()).isEqualTo(Path.of(keystoreFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().isEmpty()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpTlsClientAuthWithoutKnownFileReportsError() {
final String host = "1.2.3.4";
final int port = 1234;
final String keystoreFile = "/tmp/test.p12";
final String keystorePasswordFile = "/tmp/test.txt";
parseCommand(
"--rpc-http-enabled",
"--rpc-http-host",
host,
"--rpc-http-port",
String.valueOf(port),
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
keystoreFile,
"--rpc-http-tls-keystore-password-file",
keystorePasswordFile,
"--rpc-http-tls-client-auth-enabled");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
@Test
public void rpcHttpTlsClientAuthWithKnownClientFile() {
final String host = "1.2.3.4";
final int port = 1234;
final String keystoreFile = "/tmp/test.p12";
final String keystorePasswordFile = "/tmp/test.txt";
final String knownClientFile = "/tmp/knownClientFile";
parseCommand(
"--rpc-http-enabled",
"--rpc-http-host",
host,
"--rpc-http-port",
String.valueOf(port),
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
keystoreFile,
"--rpc-http-tls-keystore-password-file",
keystorePasswordFile,
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
knownClientFile);
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
final Optional<TlsConfiguration> tlsConfiguration =
jsonRpcConfigArgumentCaptor.getValue().getTlsConfiguration();
assertThat(tlsConfiguration.isPresent()).isTrue();
assertThat(tlsConfiguration.get().getKeyStorePath()).isEqualTo(Path.of(keystoreFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().isPresent()).isTrue();
assertThat(
tlsConfiguration.get().getClientAuthConfiguration().get().getKnownClientsFile().get())
.isEqualTo(Path.of(knownClientFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().get().isCaClientsEnabled())
.isFalse();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpTlsClientAuthWithCAClient() {
final String host = "1.2.3.4";
final int port = 1234;
final String keystoreFile = "/tmp/test.p12";
final String keystorePasswordFile = "/tmp/test.txt";
parseCommand(
"--rpc-http-enabled",
"--rpc-http-host",
host,
"--rpc-http-port",
String.valueOf(port),
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
keystoreFile,
"--rpc-http-tls-keystore-password-file",
keystorePasswordFile,
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-ca-clients-enabled");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
final Optional<TlsConfiguration> tlsConfiguration =
jsonRpcConfigArgumentCaptor.getValue().getTlsConfiguration();
assertThat(tlsConfiguration.isPresent()).isTrue();
assertThat(tlsConfiguration.get().getKeyStorePath()).isEqualTo(Path.of(keystoreFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().isPresent()).isTrue();
assertThat(
tlsConfiguration
.get()
.getClientAuthConfiguration()
.get()
.getKnownClientsFile()
.isEmpty())
.isTrue();
assertThat(tlsConfiguration.get().getClientAuthConfiguration().get().isCaClientsEnabled())
.isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpTlsClientAuthWithCAClientAndKnownClientFile() {
final String host = "1.2.3.4";
final int port = 1234;
final String keystoreFile = "/tmp/test.p12";
final String keystorePasswordFile = "/tmp/test.txt";
final String knownClientFile = "/tmp/knownClientFile";
parseCommand(
"--rpc-http-enabled",
"--rpc-http-host",
host,
"--rpc-http-port",
String.valueOf(port),
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
keystoreFile,
"--rpc-http-tls-keystore-password-file",
keystorePasswordFile,
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-ca-clients-enabled",
"--rpc-http-tls-known-clients-file",
knownClientFile);
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
final Optional<TlsConfiguration> tlsConfiguration =
jsonRpcConfigArgumentCaptor.getValue().getTlsConfiguration();
assertThat(tlsConfiguration.isPresent()).isTrue();
assertThat(tlsConfiguration.get().getKeyStorePath()).isEqualTo(Path.of(keystoreFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().isPresent()).isTrue();
assertThat(
tlsConfiguration.get().getClientAuthConfiguration().get().getKnownClientsFile().get())
.isEqualTo(Path.of(knownClientFile));
assertThat(tlsConfiguration.get().getClientAuthConfiguration().get().isCaClientsEnabled())
.isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void graphQLHttpHostAndPortOptionsMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand(
"--graphql-http-enabled",
"--graphql-http-host",
host,
"--graphql-http-port",
String.valueOf(port));
verify(mockRunnerBuilder).graphQLConfiguration(graphQLConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(graphQLConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(graphQLConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void graphQLHttpHostMayBeLocalhost() {
final String host = "localhost";
parseCommand("--graphql-http-enabled", "--graphql-http-host", host);
verify(mockRunnerBuilder).graphQLConfiguration(graphQLConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(graphQLConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void graphQLHttpHostMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--graphql-http-enabled", "--graphql-http-host", host);
verify(mockRunnerBuilder).graphQLConfiguration(graphQLConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(graphQLConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsTwoDomainsMustBuildListWithBothDomains() {
final String[] origins = {"http://domain1.com", "https://domain2.com"};
parseCommand("--rpc-http-enabled", "--rpc-http-cors-origins", String.join(",", origins));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getCorsAllowedDomains().toArray())
.isEqualTo(origins);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsDoubleCommaFilteredOut() {
final String[] origins = {"http://domain1.com", "https://domain2.com"};
parseCommand("--rpc-http-enabled", "--rpc-http-cors-origins", String.join(",,", origins));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getCorsAllowedDomains().toArray())
.isEqualTo(origins);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsWithWildcardMustBuildListWithWildcard() {
final String[] origins = {"*"};
parseCommand("--rpc-http-enabled", "--rpc-http-cors-origins", String.join(",", origins));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getCorsAllowedDomains().toArray())
.isEqualTo(origins);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsWithAllMustBuildListWithWildcard() {
parseCommand("--rpc-http-enabled", "--rpc-http-cors-origins", "all");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getCorsAllowedDomains()).containsExactly("*");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsWithNoneMustBuildEmptyList() {
final String[] origins = {"none"};
parseCommand("--rpc-http-enabled", "--rpc-http-cors-origins", String.join(",", origins));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getCorsAllowedDomains()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpCorsOriginsNoneWithAnotherDomainMustFail() {
final String[] origins = {"http://domain1.com", "none"};
parseCommand("--rpc-http-cors-origins", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Value 'none' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsNoneWithAnotherDomainMustFailNoneFirst() {
final String[] origins = {"none", "http://domain1.com"};
parseCommand("--rpc-http-cors-origins", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Value 'none' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsAllWithAnotherDomainMustFail() {
parseCommand("--rpc-http-cors-origins=http://domain1.com,all");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsAllWithAnotherDomainMustFailAsFlags() {
parseCommand("--rpc-http-cors-origins=http://domain1.com", "--rpc-http-cors-origins=all");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsWildcardWithAnotherDomainMustFail() {
parseCommand("--rpc-http-cors-origins=http://domain1.com,*");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsWildcardWithAnotherDomainMustFailAsFlags() {
parseCommand("--rpc-http-cors-origins=http://domain1.com", "--rpc-http-cors-origins=*");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other domains");
}
@Test
public void rpcHttpCorsOriginsInvalidRegexShouldFail() {
final String[] origins = {"**"};
parseCommand("--rpc-http-cors-origins", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Domain values result in invalid regex pattern");
}
@Test
public void rpcHttpCorsOriginsEmptyValueFails() {
parseCommand("--rpc-http-cors-origins=");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Domain cannot be empty string or null string.");
}
/** test deprecated CLI option * */
@Deprecated
@Test
public void rpcHttpHostWhitelistAcceptsSingleArgument() {
parseCommand("--host-whitelist", "a");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(1);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistAcceptsSingleArgument() {
parseCommand("--host-allowlist", "a");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(1);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistAcceptsMultipleArguments() {
parseCommand("--host-allowlist", "a,b");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(2);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a", "b");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("*", "localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistAcceptsDoubleComma() {
parseCommand("--host-allowlist", "a,,b");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(2);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a", "b");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("*", "localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Deprecated
@Test
public void rpcHttpHostWhitelistAllowlistAcceptsMultipleFlags() {
parseCommand("--host-whitelist=a", "--host-allowlist=b");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(2);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a", "b");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("*", "localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistAcceptsMultipleFlags() {
parseCommand("--host-allowlist=a", "--host-allowlist=b");
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist().size()).isEqualTo(2);
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).contains("a", "b");
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist())
.doesNotContain("*", "localhost");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistStarWithAnotherHostnameMustFail() {
final String[] origins = {"friend", "*"};
parseCommand("--host-allowlist", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other hostnames");
}
@Test
public void rpcHttpHostAllowlistStarWithAnotherHostnameMustFailStarFirst() {
final String[] origins = {"*", "friend"};
parseCommand("--host-allowlist", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other hostnames");
}
@Test
public void rpcHttpHostAllowlistAllWithAnotherHostnameMustFail() {
final String[] origins = {"friend", "all"};
parseCommand("--host-allowlist", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Values '*' or 'all' can't be used with other hostnames");
}
@Test
public void rpcHttpHostAllowlistWithNoneMustBuildEmptyList() {
final String[] origins = {"none"};
parseCommand("--host-allowlist", String.join(",", origins));
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHostsAllowlist()).isEmpty();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcHttpHostAllowlistNoneWithAnotherDomainMustFail() {
final String[] origins = {"http://domain1.com", "none"};
parseCommand("--host-allowlist", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Value 'none' can't be used with other hostnames");
}
@Test
public void rpcHttpHostAllowlistNoneWithAnotherDomainMustFailNoneFirst() {
final String[] origins = {"none", "http://domain1.com"};
parseCommand("--host-allowlist", String.join(",", origins));
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Value 'none' can't be used with other hostnames");
}
@Test
public void rpcHttpHostAllowlistEmptyValueFails() {
parseCommand("--host-allowlist=");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains("Hostname cannot be empty string or null string.");
}
@Test
public void rpcWsRpcEnabledPropertyDefaultIsFalse() {
parseCommand();
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().isEnabled()).isFalse();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsRpcEnabledPropertyMustBeUsed() {
parseCommand("--rpc-ws-enabled");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsOptionsRequiresServiceToBeEnabled() {
parseCommand("--rpc-ws-api", "ETH,NET", "--rpc-ws-host", "0.0.0.0", "--rpc-ws-port", "1234");
verifyOptionsConstraintLoggerCall(
"--rpc-ws-enabled", "--rpc-ws-host", "--rpc-ws-port", "--rpc-ws-api");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsApiPropertyMustBeUsed() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-api", "ETH, NET");
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getRpcApis())
.containsExactlyInAnyOrder(ETH, NET);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsHostAndPortOptionMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host, "--rpc-ws-port", String.valueOf(port));
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(wsRpcConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsHostAndMayBeLocalhost() {
final String host = "localhost";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rpcWsHostAndMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--rpc-ws-enabled", "--rpc-ws-host", host);
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsEnabledPropertyDefaultIsFalse() {
parseCommand();
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().isEnabled()).isFalse();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsEnabledPropertyMustBeUsed() {
parseCommand("--metrics-enabled");
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().isEnabled()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPushOptionsRequiresPushToBeEnabled() {
parseCommand(
"--metrics-push-host",
"0.0.0.0",
"--metrics-push-port",
"1234",
"--metrics-push-interval",
"2",
"--metrics-push-prometheus-job",
"job-name");
verifyOptionsConstraintLoggerCall(
"--metrics-push-enabled",
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsOptionsRequiresPullMetricsToBeEnabled() {
parseCommand("--metrics-host", "0.0.0.0", "--metrics-port", "1234");
verifyOptionsConstraintLoggerCall("--metrics-enabled", "--metrics-host", "--metrics-port");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsHostAndPortOptionMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand(
"--metrics-enabled", "--metrics-host", host, "--metrics-port", String.valueOf(port));
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(metricsConfigArgumentCaptor.getValue().getPort()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsHostMayBeLocalhost() {
final String host = "localhost";
parseCommand("--metrics-enabled", "--metrics-host", host);
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsHostMayBeIPv6() {
final String host = "2600:DB8::8545";
parseCommand("--metrics-enabled", "--metrics-host", host);
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsCategoryPropertyMustBeUsed() {
parseCommand("--metrics-enabled", "--metrics-category", StandardMetricCategory.JVM.toString());
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getMetricCategories())
.containsExactly(StandardMetricCategory.JVM);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPushEnabledPropertyMustBeUsed() {
parseCommand("--metrics-push-enabled");
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().isPushEnabled()).isTrue();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPushHostAndPushPortOptionMustBeUsed() {
final String host = "1.2.3.4";
final int port = 1234;
parseCommand(
"--metrics-push-enabled",
"--metrics-push-host",
host,
"--metrics-push-port",
String.valueOf(port));
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getPushHost()).isEqualTo(host);
assertThat(metricsConfigArgumentCaptor.getValue().getPushPort()).isEqualTo(port);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPushHostMayBeLocalhost() {
final String host = "localhost";
parseCommand("--metrics-push-enabled", "--metrics-push-host", host);
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getPushHost()).isEqualTo(host);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPushIntervalMustBeUsed() {
parseCommand("--metrics-push-enabled", "--metrics-push-interval", "42");
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getPushInterval()).isEqualTo(42);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsPrometheusJobMustBeUsed() {
parseCommand("--metrics-push-enabled", "--metrics-push-prometheus-job", "besu-command-test");
verify(mockRunnerBuilder).metricsConfiguration(metricsConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(metricsConfigArgumentCaptor.getValue().getPrometheusJob())
.isEqualTo("besu-command-test");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void metricsAndMetricsPushMustNotBeUsedTogether() {
parseCommand("--metrics-enabled", "--metrics-push-enabled");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.startsWith("--metrics-enabled option and --metrics-push-enabled option can't be used");
}
@Test
public void besuDoesNotStartInMiningModeIfCoinbaseNotSet() {
parseCommand("--miner-enabled");
Mockito.verifyZeroInteractions(mockControllerBuilder);
}
@Test
public void miningIsEnabledWhenSpecified() throws Exception {
final String coinbaseStr = String.format("%040x", 1);
parseCommand("--miner-enabled", "--miner-coinbase=" + coinbaseStr);
final ArgumentCaptor<MiningParameters> miningArg =
ArgumentCaptor.forClass(MiningParameters.class);
verify(mockControllerBuilder).miningParameters(miningArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(miningArg.getValue().isMiningEnabled()).isTrue();
assertThat(miningArg.getValue().getCoinbase())
.isEqualTo(Optional.of(Address.fromHexString(coinbaseStr)));
}
@Test
public void stratumMiningIsEnabledWhenSpecified() throws Exception {
final String coinbaseStr = String.format("%040x", 1);
parseCommand("--miner-enabled", "--miner-coinbase=" + coinbaseStr, "--miner-stratum-enabled");
final ArgumentCaptor<MiningParameters> miningArg =
ArgumentCaptor.forClass(MiningParameters.class);
verify(mockControllerBuilder).miningParameters(miningArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(miningArg.getValue().isMiningEnabled()).isTrue();
assertThat(miningArg.getValue().getCoinbase())
.isEqualTo(Optional.of(Address.fromHexString(coinbaseStr)));
assertThat(miningArg.getValue().isStratumMiningEnabled()).isTrue();
}
@Test
public void miningOptionsRequiresServiceToBeEnabled() {
final Address requestedCoinbase = Address.fromHexString("0000011111222223333344444");
parseCommand(
"--miner-coinbase",
requestedCoinbase.toString(),
"--min-gas-price",
"42",
"--miner-extra-data",
"0x1122334455667788990011223344556677889900112233445566778899001122",
"--miner-stratum-enabled");
verifyOptionsConstraintLoggerCall(
"--miner-enabled",
"--miner-coinbase",
"--min-gas-price",
"--miner-extra-data",
"--miner-stratum-enabled");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.startsWith(
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) or specify mining is enabled (--miner-enabled)");
}
@Test
public void miningParametersAreCaptured() throws Exception {
final Address requestedCoinbase = Address.fromHexString("0000011111222223333344444");
final String extraDataString =
"0x1122334455667788990011223344556677889900112233445566778899001122";
parseCommand(
"--miner-enabled",
"--miner-coinbase=" + requestedCoinbase.toString(),
"--min-gas-price=15",
"--miner-extra-data=" + extraDataString);
final ArgumentCaptor<MiningParameters> miningArg =
ArgumentCaptor.forClass(MiningParameters.class);
verify(mockControllerBuilder).miningParameters(miningArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(miningArg.getValue().getCoinbase()).isEqualTo(Optional.of(requestedCoinbase));
assertThat(miningArg.getValue().getMinTransactionGasPrice()).isEqualTo(Wei.of(15));
assertThat(miningArg.getValue().getExtraData()).isEqualTo(Bytes.fromHexString(extraDataString));
}
@Test
public void colorCanBeEnabledOrDisabledExplicitly() {
Stream.of(true, false)
.forEach(
bool -> {
parseCommand("--color-enabled", bool.toString());
assertThat(BesuCommand.getColorEnabled()).contains(bool);
});
}
@Ignore
public void pruningIsEnabledIfSyncModeIsFast() {
parseCommand("--sync-mode", "FAST");
verify(mockControllerBuilder).isPruningEnabled(true);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Ignore
public void pruningIsDisabledIfSyncModeIsFull() {
parseCommand("--sync-mode", "FULL");
verify(mockControllerBuilder).isPruningEnabled(false);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void pruningEnabledExplicitly() {
parseCommand("--pruning-enabled", "--sync-mode=FULL");
verify(mockControllerBuilder).isPruningEnabled(true);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Ignore
public void pruningDisabledExplicitly() {
parseCommand("--pruning-enabled=false", "--sync-mode=FAST");
verify(mockControllerBuilder).isPruningEnabled(false);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void pruningDisabledByDefault() {
parseCommand();
verify(mockControllerBuilder).isPruningEnabled(false);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void pruningParametersAreCaptured() throws Exception {
parseCommand(
"--pruning-enabled", "--pruning-blocks-retained=15", "--pruning-block-confirmations=4");
final ArgumentCaptor<PrunerConfiguration> pruningArg =
ArgumentCaptor.forClass(PrunerConfiguration.class);
verify(mockControllerBuilder).pruningConfiguration(pruningArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(pruningArg.getValue().getBlocksRetained()).isEqualTo(15);
assertThat(pruningArg.getValue().getBlockConfirmations()).isEqualTo(4);
}
@Test
public void devModeOptionMustBeUsed() throws Exception {
parseCommand("--network", "dev");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(DEV));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rinkebyValuesAreUsed() throws Exception {
parseCommand("--network", "rinkeby");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(RINKEBY));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void ropstenValuesAreUsed() throws Exception {
parseCommand("--network", "ropsten");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(ROPSTEN));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void goerliValuesAreUsed() throws Exception {
parseCommand("--network", "goerli");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(GOERLI));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void classicValuesAreUsed() throws Exception {
parseCommand("--network", "classic");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(CLASSIC));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void kottiValuesAreUsed() throws Exception {
parseCommand("--network", "kotti");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(KOTTI));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void mordorValuesAreUsed() throws Exception {
parseCommand("--network", "mordor");
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue()).isEqualTo(EthNetworkConfig.getNetworkConfig(MORDOR));
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void rinkebyValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("rinkeby");
}
@Test
public void goerliValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("goerli");
}
@Test
public void ropstenValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("ropsten");
}
@Test
public void devValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("dev");
}
@Test
public void classicValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("classic");
}
@Test
public void kottiValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("kotti");
}
@Test
public void mordorValuesCanBeOverridden() throws Exception {
networkValuesCanBeOverridden("mordor");
}
private void networkValuesCanBeOverridden(final String network) throws Exception {
parseCommand(
"--network",
network,
"--network-id",
"1234567",
"--bootnodes",
String.join(",", validENodeStrings));
final ArgumentCaptor<EthNetworkConfig> networkArg =
ArgumentCaptor.forClass(EthNetworkConfig.class);
verify(mockControllerBuilderFactory).fromEthNetworkConfig(networkArg.capture(), any());
verify(mockControllerBuilder).build();
assertThat(networkArg.getValue().getBootNodes())
.isEqualTo(
Stream.of(validENodeStrings).map(EnodeURL::fromString).collect(Collectors.toList()));
assertThat(networkArg.getValue().getNetworkId()).isEqualTo(1234567);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void fullCLIOptionsShown() {
parseCommand("--help");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).contains("--config-file");
assertThat(commandOutput.toString()).contains("--data-path");
assertThat(commandOutput.toString()).contains("--genesis-file");
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void mustUseEnclaveUriAndOptions() {
final URL configFile = this.getClass().getResource("/orion_publickey.pub");
parseCommand(
"--privacy-enabled",
"--privacy-url",
ENCLAVE_URI,
"--privacy-public-key-file",
configFile.getPath(),
"--min-gas-price",
"0");
final ArgumentCaptor<PrivacyParameters> enclaveArg =
ArgumentCaptor.forClass(PrivacyParameters.class);
verify(mockControllerBuilder).privacyParameters(enclaveArg.capture());
verify(mockControllerBuilder).build();
assertThat(enclaveArg.getValue().isEnabled()).isEqualTo(true);
assertThat(enclaveArg.getValue().getEnclaveUri()).isEqualTo(URI.create(ENCLAVE_URI));
assertThat(enclaveArg.getValue().getEnclavePublicKey()).isEqualTo(ENCLAVE_PUBLIC_KEY);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privacyOptionsRequiresServiceToBeEnabled() {
final File file = new File("./specific/enclavePublicKey");
file.deleteOnExit();
parseCommand("--privacy-url", ENCLAVE_URI, "--privacy-public-key-file", file.toString());
verifyOptionsConstraintLoggerCall(
"--privacy-enabled", "--privacy-url", "--privacy-public-key-file");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privacyWithoutPrivacyPublicKeyFails() {
parseCommand("--privacy-enabled", "--privacy-url", ENCLAVE_URI);
assertThat(commandErrorOutput.toString())
.startsWith("Please specify Enclave public key file path to enable privacy");
}
@Test
public void mustVerifyPrivacyIsDisabled() {
parseCommand();
final ArgumentCaptor<PrivacyParameters> enclaveArg =
ArgumentCaptor.forClass(PrivacyParameters.class);
verify(mockControllerBuilder).privacyParameters(enclaveArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(enclaveArg.getValue().isEnabled()).isEqualTo(false);
}
@Test
public void privacyMultiTenancyIsConfiguredWhenConfiguredWithNecessaryOptions() {
parseCommand(
"--privacy-enabled",
"--rpc-http-authentication-enabled",
"--privacy-multi-tenancy-enabled",
"--rpc-http-authentication-jwt-public-key-file",
"/non/existent/file",
"--min-gas-price",
"0");
final ArgumentCaptor<PrivacyParameters> privacyParametersArgumentCaptor =
ArgumentCaptor.forClass(PrivacyParameters.class);
verify(mockControllerBuilder).privacyParameters(privacyParametersArgumentCaptor.capture());
verify(mockControllerBuilder).build();
assertThat(privacyParametersArgumentCaptor.getValue().isMultiTenancyEnabled()).isTrue();
}
@Test
public void privacyMultiTenancyWithoutAuthenticationFails() {
parseCommand(
"--privacy-enabled",
"--privacy-multi-tenancy-enabled",
"--rpc-http-authentication-jwt-public-key-file",
"/non/existent/file");
assertThat(commandErrorOutput.toString())
.startsWith(
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
@Test
public void privacyMultiTenancyWithPrivacyPublicKeyFileFails() {
parseCommand(
"--privacy-enabled",
"--rpc-http-authentication-enabled",
"--privacy-multi-tenancy-enabled",
"--rpc-http-authentication-jwt-public-key-file",
"/non/existent/file",
"--privacy-public-key-file",
ENCLAVE_PUBLIC_KEY_PATH);
assertThat(commandErrorOutput.toString())
.startsWith("Privacy multi-tenancy and privacy public key cannot be used together");
}
@Test
public void onChainPrivacyGroupEnabledFlagDefaultValueIsFalse() {
parseCommand(
"--privacy-enabled",
"--privacy-public-key-file",
ENCLAVE_PUBLIC_KEY_PATH,
"--min-gas-price",
"0");
final ArgumentCaptor<PrivacyParameters> privacyParametersArgumentCaptor =
ArgumentCaptor.forClass(PrivacyParameters.class);
verify(mockControllerBuilder).privacyParameters(privacyParametersArgumentCaptor.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
final PrivacyParameters privacyParameters = privacyParametersArgumentCaptor.getValue();
assertThat(privacyParameters.isOnchainPrivacyGroupsEnabled()).isEqualTo(false);
}
@Test
public void onChainPrivacyGroupEnabledFlagValueIsSet() {
parseCommand(
"--privacy-enabled",
"--privacy-public-key-file",
ENCLAVE_PUBLIC_KEY_PATH,
"--privacy-onchain-groups-enabled",
"--min-gas-price",
"0");
final ArgumentCaptor<PrivacyParameters> privacyParametersArgumentCaptor =
ArgumentCaptor.forClass(PrivacyParameters.class);
verify(mockControllerBuilder).privacyParameters(privacyParametersArgumentCaptor.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
final PrivacyParameters privacyParameters = privacyParametersArgumentCaptor.getValue();
assertThat(privacyParameters.isOnchainPrivacyGroupsEnabled()).isEqualTo(true);
}
@Test
public void onchainPrivacyAndMultiTenancyCannotBeUsedTogether() {
parseCommand(
"--privacy-enabled",
"--privacy-onchain-groups-enabled",
"--privacy-multi-tenancy-enabled",
"--rpc-http-authentication-jwt-public-key-file",
"/non/existent/file",
"--rpc-http-authentication-enabled");
assertThat(commandErrorOutput.toString())
.startsWith("Privacy multi-tenancy and onchain privacy groups cannot be used together");
}
@Test
public void privacyMarkerTransactionSigningKeyFileRequiredIfMinGasPriceNonZero() {
parseCommand("--privacy-enabled", "--privacy-public-key-file", ENCLAVE_PUBLIC_KEY_PATH);
assertThat(commandErrorOutput.toString())
.startsWith(
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified");
}
private Path createFakeGenesisFile(final JsonObject jsonGenesis) throws IOException {
final Path genesisFile = Files.createTempFile("genesisFile", "");
Files.write(genesisFile, encodeJsonGenesis(jsonGenesis).getBytes(UTF_8));
genesisFile.toFile().deleteOnExit();
return genesisFile;
}
private Path createTempFile(final String filename, final String contents) throws IOException {
final Path file = Files.createTempFile(filename, "");
Files.write(file, contents.getBytes(UTF_8));
file.toFile().deleteOnExit();
return file;
}
private Path createTempFile(final String filename, final byte[] contents) throws IOException {
final Path file = Files.createTempFile(filename, "");
Files.write(file, contents);
file.toFile().deleteOnExit();
return file;
}
private String encodeJsonGenesis(final JsonObject jsonGenesis) {
return jsonGenesis.encodePrettily();
}
private static String escapeTomlString(final String s) {
return StringEscapeUtils.escapeJava(s);
}
/**
* Check logger calls
*
* <p>Here we check the calls to logger and not the result of the log line as we don't test the
* logger itself but the fact that we call it.
*
* @param dependentOptions the string representing the list of dependent options names
* @param mainOption the main option name
*/
private void verifyOptionsConstraintLoggerCall(
final String mainOption, final String... dependentOptions) {
verify(mockLogger, atLeast(1))
.warn(
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture(),
stringArgumentCaptor.capture());
assertThat(stringArgumentCaptor.getAllValues().get(0)).isEqualTo(DEPENDENCY_WARNING_MSG);
for (final String option : dependentOptions) {
assertThat(stringArgumentCaptor.getAllValues().get(1)).contains(option);
}
assertThat(stringArgumentCaptor.getAllValues().get(2)).isEqualTo(mainOption);
}
@Test
public void privacyWithFastSyncMustError() {
parseCommand("--sync-mode=FAST", "--privacy-enabled");
assertThat(commandErrorOutput.toString()).contains("Fast sync cannot be enabled with privacy.");
assertThat(commandOutput.toString()).isEmpty();
}
@Test
public void privacyWithPruningMustError() {
parseCommand("--pruning-enabled", "--privacy-enabled");
assertThat(commandErrorOutput.toString()).contains("Pruning cannot be enabled with privacy.");
assertThat(commandOutput.toString()).isEmpty();
}
@Rule public TemporaryFolder testFolder = new TemporaryFolder();
@Test
public void errorIsRaisedIfStaticNodesAreNotAllowed() throws IOException {
final File staticNodesFile = testFolder.newFile("static-nodes.json");
staticNodesFile.deleteOnExit();
final File permissioningConfig = testFolder.newFile("permissioning");
permissioningConfig.deleteOnExit();
final EnodeURL staticNodeURI =
EnodeURL.builder()
.nodeId(
"50203c6bfca6874370e71aecc8958529fd723feb05013dc1abca8fc1fff845c5259faba05852e9dfe5ce172a7d6e7c2a3a5eaa8b541c8af15ea5518bbff5f2fa")
.ipAddress("127.0.0.1")
.useDefaultPorts()
.build();
final EnodeURL allowedNode =
EnodeURL.builder()
.nodeId(
"50203c6bfca6874370e71aecc8958529fd723feb05013dc1abca8fc1fff845c5259faba05852e9dfe5ce172a7d6e7c2a3a5eaa8b541c8af15ea5518bbff5f2fa")
.useDefaultPorts()
.ipAddress("127.0.0.1")
.listeningPort(30304)
.build();
Files.write(
staticNodesFile.toPath(), ("[\"" + staticNodeURI.toString() + "\"]").getBytes(UTF_8));
Files.write(
permissioningConfig.toPath(),
("nodes-allowlist=[\"" + allowedNode.toString() + "\"]").getBytes(UTF_8));
parseCommand(
"--data-path=" + testFolder.getRoot().getPath(),
"--bootnodes",
"--permissions-nodes-config-file-enabled=true",
"--permissions-nodes-config-file=" + permissioningConfig.getPath());
assertThat(commandErrorOutput.toString())
.contains(staticNodeURI.toString(), "not in nodes-allowlist");
}
@Test
public void pendingTransactionRetentionPeriod() {
final int pendingTxRetentionHours = 999;
parseCommand("--tx-pool-retention-hours", String.valueOf(pendingTxRetentionHours));
verify(mockControllerBuilder)
.transactionPoolConfiguration(transactionPoolConfigCaptor.capture());
assertThat(transactionPoolConfigCaptor.getValue().getPendingTxRetentionPeriod())
.isEqualTo(pendingTxRetentionHours);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void transactionPoolPriceBump() {
final Percentage priceBump = Percentage.fromInt(13);
parseCommand("--tx-pool-price-bump", priceBump.toString());
verify(mockControllerBuilder)
.transactionPoolConfiguration(transactionPoolConfigCaptor.capture());
assertThat(transactionPoolConfigCaptor.getValue().getPriceBump()).isEqualTo(priceBump);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void invalidTansactionPoolPriceBumpShouldFail() {
parseCommand("--tx-pool-price-bump", "101");
assertThat(commandErrorOutput.toString())
.contains(
"Invalid value for option '--tx-pool-price-bump'",
"should be a number between 0 and 100 inclusive");
}
@Test
public void transactionPoolTxFeeCap() {
final Wei txFeeCap = Wei.fromEth(2);
parseCommand("--rpc-tx-feecap", txFeeCap.toString());
verify(mockControllerBuilder)
.transactionPoolConfiguration(transactionPoolConfigCaptor.capture());
assertThat(transactionPoolConfigCaptor.getValue().getTxFeeCap()).isEqualTo(txFeeCap);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void invalidTansactionPoolTxFeeCapShouldFail() {
parseCommand("--rpc-tx-feecap", "abcd");
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option '--rpc-tx-feecap'", "cannot convert 'abcd' to Wei");
}
@Test
public void txMessageKeepAliveSecondsWithInvalidInputShouldFail() {
parseCommand("--Xincoming-tx-messages-keep-alive-seconds", "acbd");
Mockito.verifyZeroInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Invalid value for option '--Xincoming-tx-messages-keep-alive-seconds': 'acbd' is not an int");
}
@Test
public void tomlThatHasInvalidOptions() throws IOException {
final URL configFile = this.getClass().getResource("/complete_config.toml");
// update genesis file path, "similar" valid option and add invalid options
final Path genesisFile = createFakeGenesisFile(GENESIS_VALID_JSON);
final String updatedConfig =
Resources.toString(configFile, UTF_8)
.replace("/opt/besu/genesis.json", escapeTomlString(genesisFile.toString()))
.replace("rpc-http-api", "rpc-http-apis")
+ System.lineSeparator()
+ "invalid_option=true"
+ System.lineSeparator()
+ "invalid_option2=true";
final Path toml = createTempFile("toml", updatedConfig.getBytes(UTF_8));
// Parse it.
parseCommand("--config-file", toml.toString());
assertThat(commandErrorOutput.toString())
.contains("Unknown options in TOML configuration file: invalid_option, invalid_option2");
}
@Test
public void targetGasLimitIsEnabledWhenSpecified() throws Exception {
parseCommand("--target-gas-limit=10000000");
@SuppressWarnings("unchecked")
final ArgumentCaptor<Optional<Long>> targetGasLimitArg =
ArgumentCaptor.forClass(Optional.class);
verify(mockControllerBuilder).targetGasLimit(targetGasLimitArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(targetGasLimitArg.getValue()).isEqualTo(Optional.of(10_000_000L));
}
@Test
public void targetGasLimitIsDisabledWhenNotSpecified() throws Exception {
parseCommand();
@SuppressWarnings("unchecked")
final ArgumentCaptor<Optional<Long>> targetGasLimitArg =
ArgumentCaptor.forClass(Optional.class);
verify(mockControllerBuilder).targetGasLimit(targetGasLimitArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(targetGasLimitArg.getValue()).isEqualTo(Optional.empty());
}
@Test
public void requiredBlocksSetWhenSpecified() {
final long blockNumber = 8675309L;
final String hash = "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef";
parseCommand("--required-block=" + blockNumber + "=" + hash);
@SuppressWarnings("unchecked")
final ArgumentCaptor<Map<Long, Hash>> requiredBlocksArg = ArgumentCaptor.forClass(Map.class);
verify(mockControllerBuilder).requiredBlocks(requiredBlocksArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(requiredBlocksArg.getValue()).containsOnlyKeys(blockNumber);
assertThat(requiredBlocksArg.getValue())
.containsEntry(blockNumber, Hash.fromHexStringLenient(hash));
}
@Test
public void requiredBlocksEmptyWhenNotSpecified() {
parseCommand();
@SuppressWarnings("unchecked")
final ArgumentCaptor<Map<Long, Hash>> requiredBlocksArg = ArgumentCaptor.forClass(Map.class);
verify(mockControllerBuilder).requiredBlocks(requiredBlocksArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(requiredBlocksArg.getValue()).isEmpty();
}
@Test
public void requiredBlocksMulpleBlocksOneArg() {
final long block1 = 8675309L;
final long block2 = 5551212L;
final String hash1 = "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef";
final String hash2 = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
parseCommand("--required-block=" + block1 + "=" + hash1 + "," + block2 + "=" + hash2);
@SuppressWarnings("unchecked")
final ArgumentCaptor<Map<Long, Hash>> requiredBlocksArg = ArgumentCaptor.forClass(Map.class);
verify(mockControllerBuilder).requiredBlocks(requiredBlocksArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(requiredBlocksArg.getValue()).containsOnlyKeys(block1, block2);
assertThat(requiredBlocksArg.getValue())
.containsEntry(block1, Hash.fromHexStringLenient(hash1));
assertThat(requiredBlocksArg.getValue())
.containsEntry(block2, Hash.fromHexStringLenient(hash2));
}
@Test
public void requiredBlocksMultipleBlocksTwoArgs() {
final long block1 = 8675309L;
final long block2 = 5551212L;
final String hash1 = "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef";
final String hash2 = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
parseCommand(
"--required-block=" + block1 + "=" + hash1, "--required-block=" + block2 + "=" + hash2);
@SuppressWarnings("unchecked")
final ArgumentCaptor<Map<Long, Hash>> requiredBlocksArg = ArgumentCaptor.forClass(Map.class);
verify(mockControllerBuilder).requiredBlocks(requiredBlocksArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(requiredBlocksArg.getValue()).containsOnlyKeys(block1, block2);
assertThat(requiredBlocksArg.getValue())
.containsEntry(block1, Hash.fromHexStringLenient(hash1));
assertThat(requiredBlocksArg.getValue())
.containsEntry(block2, Hash.fromHexStringLenient(hash2));
}
@Test
public void httpAuthenticationPublicKeyIsConfigured() throws IOException {
final Path publicKey = Files.createTempFile("public_key", "");
parseCommand("--rpc-http-authentication-jwt-public-key-file", publicKey.toString());
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getAuthenticationPublicKeyFile().getPath())
.isEqualTo(publicKey.toString());
}
@Test
public void httpAuthenticationWithoutRequiredConfiguredOptionsMustFail() {
parseCommand("--rpc-http-enabled", "--rpc-http-authentication-enabled");
verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
@Test
public void wsAuthenticationPublicKeyIsConfigured() throws IOException {
final Path publicKey = Files.createTempFile("public_key", "");
parseCommand("--rpc-ws-authentication-jwt-public-key-file", publicKey.toString());
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getAuthenticationPublicKeyFile().getPath())
.isEqualTo(publicKey.toString());
}
@Test
public void wsAuthenticationWithoutRequiredConfiguredOptionsMustFail() {
parseCommand("--rpc-ws-enabled", "--rpc-ws-authentication-enabled");
verifyNoInteractions(mockRunnerBuilder);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.contains(
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
@Test
public void privHttpApisWithPrivacyDisabledLogsWarning() {
parseCommand("--privacy-enabled=false", "--rpc-http-api", "PRIV", "--rpc-http-enabled");
verify(mockRunnerBuilder).build();
verify(mockLogger)
.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privWsApisWithPrivacyDisabledLogsWarning() {
parseCommand("--privacy-enabled=false", "--rpc-ws-api", "PRIV", "--rpc-ws-enabled");
verify(mockRunnerBuilder).build();
verify(mockLogger)
.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void eeaHttpApisWithPrivacyDisabledLogsWarning() {
parseCommand("--privacy-enabled=false", "--rpc-http-api", "EEA", "--rpc-http-enabled");
verify(mockRunnerBuilder).build();
verify(mockLogger)
.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void eeaWsApisWithPrivacyDisabledLogsWarning() {
parseCommand("--privacy-enabled=false", "--rpc-ws-api", "EEA", "--rpc-ws-enabled");
verify(mockRunnerBuilder).build();
verify(mockLogger)
.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void privEnclaveKeyFileDoesNotExist() {
parseCommand("--privacy-enabled=true", "--privacy-public-key-file", "/non/existent/file");
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).startsWith("Problem with privacy-public-key-file");
assertThat(commandErrorOutput.toString()).contains("No such file");
}
@Test
public void privEnclaveKeyFileInvalidContentTooShort() throws IOException {
final Path file = createTempFile("privacy.key", "lkjashdfiluhwelrk");
parseCommand("--privacy-enabled=true", "--privacy-public-key-file", file.toString());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.startsWith("Contents of privacy-public-key-file invalid");
assertThat(commandErrorOutput.toString()).contains("needs to be 44 characters long");
}
@Test
public void privEnclaveKeyFileInvalidContentNotValidBase64() throws IOException {
final Path file = createTempFile("privacy.key", "l*jashdfillk9ashdfillkjashdfillkjashdfilrtg=");
parseCommand("--privacy-enabled=true", "--privacy-public-key-file", file.toString());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString())
.startsWith("Contents of privacy-public-key-file invalid");
assertThat(commandErrorOutput.toString()).contains("Illegal base64 character");
}
@Test
public void logLevelHasNullAsDefaultValue() {
final TestBesuCommand command = parseCommand();
assertThat(command.getLogLevel()).isNull();
}
@Test
public void logLevelIsSetByLoggingOption() {
final TestBesuCommand command = parseCommand("--logging", "WARN");
assertThat(command.getLogLevel()).isEqualTo(Level.WARN);
}
@Test
public void assertThatEnablingExperimentalEIPsWorks() {
parseCommand("--Xeip1559-enabled=true");
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(ExperimentalEIPs.eip1559Enabled).isTrue();
}
@Test
public void assertThatDisablingExperimentalEIPsWorks() {
parseCommand("--Xeip1559-enabled=false");
assertThat(commandErrorOutput.toString()).isEmpty();
assertThat(ExperimentalEIPs.eip1559Enabled).isFalse();
}
@Test
public void assertThatDefaultHttpTimeoutSecondsWorks() {
parseCommand();
assertThat(commandErrorOutput.toString()).isEmpty();
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHttpTimeoutSec())
.isEqualTo(TimeoutOptions.defaultOptions().getTimeoutSeconds());
}
@Test
public void assertThatHttpTimeoutSecondsWorks() {
parseCommand("--Xhttp-timeout-seconds=513");
assertThat(commandErrorOutput.toString()).isEmpty();
verify(mockRunnerBuilder).jsonRpcConfiguration(jsonRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(jsonRpcConfigArgumentCaptor.getValue().getHttpTimeoutSec()).isEqualTo(513);
}
@Test
public void assertThatInvalidHttpTimeoutSecondsFails() {
parseCommand("--Xhttp-timeout-seconds=abc");
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option", "--Xhttp-timeout-seconds", "abc", "is not a long");
}
@Test
public void assertThatDefaultWsTimeoutSecondsWorks() {
parseCommand();
assertThat(commandErrorOutput.toString()).isEmpty();
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getTimeoutSec())
.isEqualTo(TimeoutOptions.defaultOptions().getTimeoutSeconds());
}
@Test
public void assertThatWsTimeoutSecondsWorks() {
parseCommand("--Xws-timeout-seconds=11112018");
assertThat(commandErrorOutput.toString()).isEmpty();
verify(mockRunnerBuilder).webSocketConfiguration(wsRpcConfigArgumentCaptor.capture());
verify(mockRunnerBuilder).build();
assertThat(wsRpcConfigArgumentCaptor.getValue().getTimeoutSec()).isEqualTo(11112018);
}
@Test
public void assertThatInvalidWsTimeoutSecondsFails() {
parseCommand("--Xws-timeout-seconds=abc");
assertThat(commandErrorOutput.toString())
.contains("Invalid value for option", "--Xws-timeout-seconds", "abc", "is not a long");
}
}
| 1 | 23,478 | Two more tests - no ports specified and every possible port specified - p2pPort, graphQLHttpPort, rpcHttpPort, rpcWsPort, metricsPort, metricsPushPort, stratumPort | hyperledger-besu | java |
@@ -9,6 +9,7 @@
# Copyright (c) 2019-2020 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Ashley Whetter <[email protected]>
# Copyright (c) 2019 Taewon D. Kim <[email protected]>
+# Copyright (c) 2020 Eli Fine <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING | 1 | # Copyright (c) 2010, 2012, 2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012 Ry4an Brase <[email protected]>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016-2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Derek Gustafson <[email protected]>
# Copyright (c) 2018 Scott Worley <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2019-2020 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Ashley Whetter <[email protected]>
# Copyright (c) 2019 Taewon D. Kim <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
from contextlib import redirect_stdout
from io import StringIO
from pathlib import Path
import pytest
from pylint.checkers import similar
INPUT = Path(__file__).parent / ".." / "input"
SIMILAR1 = str(INPUT / "similar1")
SIMILAR2 = str(INPUT / "similar2")
MULTILINE = str(INPUT / "multiline-import")
HIDE_CODE_WITH_IMPORTS = str(INPUT / "hide_code_with_imports.py")
def test_ignore_comments():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run(["--ignore-comments", SIMILAR1, SIMILAR2])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== (
"""
10 similar lines in 2 files
==%s:0
==%s:0
import one
from two import two
three
four
five
six
seven
eight
nine
''' ten
TOTAL lines=60 duplicates=10 percent=16.67
"""
% (SIMILAR1, SIMILAR2)
).strip()
)
def test_ignore_docsrings():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run(["--ignore-docstrings", SIMILAR1, SIMILAR2])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== (
"""
8 similar lines in 2 files
==%s:6
==%s:6
seven
eight
nine
''' ten
ELEVEN
twelve '''
thirteen
fourteen
5 similar lines in 2 files
==%s:0
==%s:0
import one
from two import two
three
four
five
TOTAL lines=60 duplicates=13 percent=21.67
"""
% ((SIMILAR1, SIMILAR2) * 2)
).strip()
)
def test_ignore_imports():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run(["--ignore-imports", SIMILAR1, SIMILAR2])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== """
TOTAL lines=60 duplicates=0 percent=0.00
""".strip()
)
def test_multiline_imports():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run([MULTILINE, MULTILINE])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== (
"""
8 similar lines in 2 files
==%s:0
==%s:0
from foo import (
bar,
baz,
quux,
quuux,
quuuux,
quuuuux,
)
TOTAL lines=16 duplicates=8 percent=50.00
"""
% (MULTILINE, MULTILINE)
).strip()
)
def test_ignore_multiline_imports():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run(["--ignore-imports", MULTILINE, MULTILINE])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== """
TOTAL lines=16 duplicates=0 percent=0.00
""".strip()
)
def test_no_hide_code_with_imports():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run(["--ignore-imports"] + 2 * [HIDE_CODE_WITH_IMPORTS])
assert ex.value.code == 0
assert "TOTAL lines=32 duplicates=16 percent=50.00" in output.getvalue()
def test_ignore_nothing():
output = StringIO()
with redirect_stdout(output), pytest.raises(SystemExit) as ex:
similar.Run([SIMILAR1, SIMILAR2])
assert ex.value.code == 0
assert (
output.getvalue().strip()
== (
"""
5 similar lines in 2 files
==%s:0
==%s:0
import one
from two import two
three
four
five
TOTAL lines=60 duplicates=5 percent=8.33
"""
% (SIMILAR1, SIMILAR2)
).strip()
)
def test_help():
output = StringIO()
with redirect_stdout(output):
try:
similar.Run(["--help"])
except SystemExit as ex:
assert ex.code == 0
else:
pytest.fail("not system exit")
def test_no_args():
output = StringIO()
with redirect_stdout(output):
try:
similar.Run([])
except SystemExit as ex:
assert ex.code == 1
else:
pytest.fail("not system exit")
| 1 | 12,377 | Don't worry about that next time, it's automated :) | PyCQA-pylint | py |
@@ -172,6 +172,7 @@ def pytest_configure(config):
# pylint: disable=unused-variable
if config.webengine:
import PyQt5.QtWebEngineWidgets
+ # pylint: enable=unused-variable
@pytest.fixture(scope='session', autouse=True) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=unused-import,wildcard-import,unused-wildcard-import
"""The qutebrowser test suite conftest file."""
import os
import sys
import warnings
import pytest
import hypothesis
from PyQt5.QtCore import PYQT_VERSION
pytest.register_assert_rewrite('helpers')
from helpers import logfail
from helpers.logfail import fail_on_logging
from helpers.messagemock import message_mock
from helpers.fixtures import *
from qutebrowser.utils import qtutils, standarddir, usertypes, utils
from qutebrowser.misc import objects
import qutebrowser.app # To register commands
# Set hypothesis settings
hypothesis.settings.register_profile('default',
hypothesis.settings(deadline=600))
hypothesis.settings.load_profile('default')
def _apply_platform_markers(config, item):
"""Apply a skip marker to a given item."""
markers = [
('posix', not utils.is_posix, "Requires a POSIX os"),
('windows', not utils.is_windows, "Requires Windows"),
('linux', not utils.is_linux, "Requires Linux"),
('mac', not utils.is_mac, "Requires macOS"),
('not_mac', utils.is_mac, "Skipped on macOS"),
('not_frozen', getattr(sys, 'frozen', False),
"Can't be run when frozen"),
('frozen', not getattr(sys, 'frozen', False),
"Can only run when frozen"),
('ci', 'CI' not in os.environ, "Only runs on CI."),
('no_ci', 'CI' in os.environ, "Skipped on CI."),
('issue2478', utils.is_windows and config.webengine,
"Broken with QtWebEngine on Windows"),
]
for searched_marker, condition, default_reason in markers:
marker = item.get_marker(searched_marker)
if not marker or not condition:
continue
if 'reason' in marker.kwargs:
reason = '{}: {}'.format(default_reason, marker.kwargs['reason'])
del marker.kwargs['reason']
else:
reason = default_reason + '.'
skipif_marker = pytest.mark.skipif(condition, *marker.args,
reason=reason, **marker.kwargs)
item.add_marker(skipif_marker)
def pytest_collection_modifyitems(config, items):
"""Handle custom markers.
pytest hook called after collection has been performed.
Adds a marker named "gui" which can be used to filter gui tests from the
command line.
For example:
pytest -m "not gui" # run all tests except gui tests
pytest -m "gui" # run only gui tests
It also handles the platform specific markers by translating them to skipif
markers.
Args:
items: list of _pytest.main.Node items, where each item represents
a python test that will be executed.
Reference:
http://pytest.org/latest/plugins.html
"""
remaining_items = []
deselected_items = []
for item in items:
deselected = False
if 'qapp' in getattr(item, 'fixturenames', ()):
item.add_marker('gui')
if hasattr(item, 'module'):
module_path = os.path.relpath(
item.module.__file__,
os.path.commonprefix([__file__, item.module.__file__]))
module_root_dir = module_path.split(os.sep)[0]
assert module_root_dir in ['end2end', 'unit', 'helpers',
'test_conftest.py']
if module_root_dir == 'end2end':
item.add_marker(pytest.mark.end2end)
_apply_platform_markers(config, item)
if item.get_marker('xfail_norun'):
item.add_marker(pytest.mark.xfail(run=False))
if item.get_marker('js_prompt'):
if config.webengine:
item.add_marker(pytest.mark.skipif(
PYQT_VERSION <= 0x050700,
reason='JS prompts are not supported with PyQt 5.7'))
if deselected:
deselected_items.append(item)
else:
remaining_items.append(item)
config.hook.pytest_deselected(items=deselected_items)
items[:] = remaining_items
def pytest_ignore_collect(path):
"""Ignore BDD tests if we're unable to run them."""
skip_bdd = hasattr(sys, 'frozen')
rel_path = path.relto(os.path.dirname(__file__))
return rel_path == os.path.join('end2end', 'features') and skip_bdd
@pytest.fixture(scope='session')
def qapp(qapp):
"""Change the name of the QApplication instance."""
qapp.setApplicationName('qute_test')
return qapp
def pytest_addoption(parser):
parser.addoption('--qute-delay', action='store', default=0, type=int,
help="Delay between qutebrowser commands.")
parser.addoption('--qute-profile-subprocs', action='store_true',
default=False, help="Run cProfile for subprocesses.")
parser.addoption('--qute-bdd-webengine', action='store_true',
help='Use QtWebEngine for BDD tests')
def pytest_configure(config):
webengine_arg = config.getoption('--qute-bdd-webengine')
webengine_env = os.environ.get('QUTE_BDD_WEBENGINE', '')
config.webengine = bool(webengine_arg or webengine_env)
# Fail early if QtWebEngine is not available
# pylint: disable=unused-variable
if config.webengine:
import PyQt5.QtWebEngineWidgets
@pytest.fixture(scope='session', autouse=True)
def check_display(request):
if (not request.config.getoption('--no-xvfb') and
'QUTE_BUILDBOT' in os.environ and
request.config.xvfb is not None):
raise Exception("Xvfb is running on buildbot!")
if utils.is_linux and not os.environ.get('DISPLAY', ''):
raise Exception("No display and no Xvfb available!")
@pytest.fixture(autouse=True)
def set_backend(monkeypatch, request):
"""Make sure the backend global is set."""
backend = (usertypes.Backend.QtWebEngine if request.config.webengine
else usertypes.Backend.QtWebKit)
monkeypatch.setattr(objects, 'backend', backend)
@pytest.fixture(autouse=True)
def apply_fake_os(monkeypatch, request):
fake_os = request.node.get_marker('fake_os')
if not fake_os:
return
name = fake_os.args[0]
mac = False
windows = False
linux = False
posix = False
if name == 'unknown':
pass
elif name == 'mac':
mac = True
posix = True
elif name == 'windows':
windows = True
elif name == 'linux':
linux = True
posix = True
else:
raise ValueError("Invalid fake_os {}".format(name))
monkeypatch.setattr('qutebrowser.utils.utils.is_mac', mac)
monkeypatch.setattr('qutebrowser.utils.utils.is_linux', linux)
monkeypatch.setattr('qutebrowser.utils.utils.is_windows', windows)
monkeypatch.setattr('qutebrowser.utils.utils.is_posix', posix)
@pytest.fixture(scope='session', autouse=True)
def check_yaml_c_exts():
"""Make sure PyYAML C extensions are available on Travis."""
if 'TRAVIS' in os.environ:
from yaml import CLoader # pylint: disable=unused-variable
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Make test information available in fixtures.
See http://pytest.org/latest/example/simple.html#making-test-result-information-available-in-fixtures
"""
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
| 1 | 19,422 | No need for this, as pylint already only turns things off for this function and it's needed for the entire function. | qutebrowser-qutebrowser | py |
@@ -103,10 +103,11 @@ const (
fluentTagDockerFirelensV2Format = "%s.%s"
// Environment variables are needed for firelens
- fluentNetworkHost = "FLUENT_HOST"
- fluentNetworkPort = "FLUENT_PORT"
- FluentNetworkPortValue = "24224"
- FluentAWSVPCHostValue = "127.0.0.1"
+ fluentNetworkHost = "FLUENT_HOST"
+ fluentNetworkPort = "FLUENT_PORT"
+ FluentNetworkPortValue = "24224"
+ AWSVPCHostValue = "127.0.0.1"
+ opentelemetryNetworkHost = "OPEN_TELEMETRY_HOST"
defaultMonitorExecAgentsInterval = 15 * time.Minute
| 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package engine contains the core logic for managing tasks
package engine
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/containerresource"
"github.com/aws/amazon-ecs-agent/agent/containerresource/containerstatus"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/logger/field"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/apierrors"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/data"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec"
"github.com/aws/amazon-ecs-agent/agent/taskresource/firelens"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/cihub/seelog"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
const (
//DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint
DockerEndpointEnvVariable = "DOCKER_HOST"
// DockerDefaultEndpoint is the default value for the Docker endpoint
DockerDefaultEndpoint = "unix:///var/run/docker.sock"
labelPrefix = "com.amazonaws.ecs."
labelTaskARN = labelPrefix + "task-arn"
labelContainerName = labelPrefix + "container-name"
labelTaskDefinitionFamily = labelPrefix + "task-definition-family"
labelTaskDefinitionVersion = labelPrefix + "task-definition-version"
labelCluster = labelPrefix + "cluster"
minGetIPBridgeTimeout = time.Second
maxGetIPBridgeTimeout = 10 * time.Second
getIPBridgeRetryJitterMultiplier = 0.2
getIPBridgeRetryDelayMultiplier = 2
ipamCleanupTmeout = 5 * time.Second
minEngineConnectRetryDelay = 2 * time.Second
maxEngineConnectRetryDelay = 200 * time.Second
engineConnectRetryJitterMultiplier = 0.20
engineConnectRetryDelayMultiplier = 1.5
// logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs.
logDriverTypeFirelens = "awsfirelens"
logDriverTypeFluentd = "fluentd"
logDriverTag = "tag"
logDriverFluentdAddress = "fluentd-address"
dataLogDriverPathFirelensV1 = "/data/firelens/"
dataLogDriverPathFirelensV2 = "/data/telemetry/"
logDriverAsyncConnect = "fluentd-async-connect"
logDriverSubSecondPrecision = "fluentd-sub-second-precision"
logDriverBufferLimit = "fluentd-buffer-limit"
dataLogDriverSocketPath = "/socket/fluent.sock"
socketPathPrefix = "unix://"
// fluentTagDockerFormat is the format for the firelens v1 log tag, which is "containerName-firelens-taskID"
fluentTagDockerFirelensV1Format = "%s-firelens-%s"
// fluentTagDockerFormat is the format for the firelens v2 log tag, which is "taskID.containerName"
fluentTagDockerFirelensV2Format = "%s.%s"
// Environment variables are needed for firelens
fluentNetworkHost = "FLUENT_HOST"
fluentNetworkPort = "FLUENT_PORT"
FluentNetworkPortValue = "24224"
FluentAWSVPCHostValue = "127.0.0.1"
defaultMonitorExecAgentsInterval = 15 * time.Minute
defaultStopContainerBackoffMin = time.Second
defaultStopContainerBackoffMax = time.Second * 5
stopContainerBackoffJitter = 0.2
stopContainerBackoffMultiplier = 1.3
stopContainerMaxRetryCount = 5
)
var newExponentialBackoff = retry.NewExponentialBackoff
// DockerTaskEngine is a state machine for managing a task and its containers
// in ECS.
//
// DockerTaskEngine implements an abstraction over the DockerGoClient so that
// it does not have to know about tasks, only containers
// The DockerTaskEngine interacts with Docker to implement a TaskEngine
type DockerTaskEngine struct {
// implements TaskEngine
cfg *config.Config
ctx context.Context
initialized bool
mustInitLock sync.Mutex
// state stores all tasks this task engine is aware of, including their
// current state and mappings to/from dockerId and name.
// This is used to checkpoint state to disk so tasks may survive agent
// failures or updates
state dockerstate.TaskEngineState
managedTasks map[string]*managedTask
taskStopGroup *utilsync.SequentialWaitGroup
events <-chan dockerapi.DockerContainerChangeEvent
stateChangeEvents chan statechange.Event
client dockerapi.DockerClient
dataClient data.Client
cniClient ecscni.CNIClient
containerChangeEventStream *eventstream.EventStream
stopEngine context.CancelFunc
// tasksLock is a mutex that the task engine must acquire before changing
// any task's state which it manages. Since this is a lock that encompasses
// all tasks, it must not acquire it for any significant duration
// The write mutex should be taken when adding and removing tasks from managedTasks.
tasksLock sync.RWMutex
credentialsManager credentials.Manager
_time ttime.Time
_timeOnce sync.Once
imageManager ImageManager
containerStatusToTransitionFunction map[containerstatus.ContainerStatus]transitionApplyFunc
metadataManager containermetadata.Manager
// taskSteadyStatePollInterval is the duration that a managed task waits
// once the task gets into steady state before polling the state of all of
// the task's containers to re-evaluate if the task is still in steady state
// This is set to defaultTaskSteadyStatePollInterval in production code.
// This can be used by tests that are looking to ensure that the steady state
// verification logic gets executed to set it to a low interval
taskSteadyStatePollInterval time.Duration
taskSteadyStatePollIntervalJitter time.Duration
resourceFields *taskresource.ResourceFields
// handleDelay is a function used to delay cleanup. Implementation is
// swappable for testing
handleDelay func(duration time.Duration)
monitorExecAgentsTicker *time.Ticker
execCmdMgr execcmd.Manager
monitorExecAgentsInterval time.Duration
stopContainerBackoffMin time.Duration
stopContainerBackoffMax time.Duration
namespaceHelper ecscni.NamespaceHelper
}
// NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine.
// The distinction between created and initialized is that when created it may
// be serialized/deserialized, but it will not communicate with docker until it
// is also initialized.
func NewDockerTaskEngine(cfg *config.Config,
client dockerapi.DockerClient,
credentialsManager credentials.Manager,
containerChangeEventStream *eventstream.EventStream,
imageManager ImageManager,
state dockerstate.TaskEngineState,
metadataManager containermetadata.Manager,
resourceFields *taskresource.ResourceFields,
execCmdMgr execcmd.Manager) *DockerTaskEngine {
dockerTaskEngine := &DockerTaskEngine{
cfg: cfg,
client: client,
dataClient: data.NewNoopClient(),
state: state,
managedTasks: make(map[string]*managedTask),
taskStopGroup: utilsync.NewSequentialWaitGroup(),
stateChangeEvents: make(chan statechange.Event),
credentialsManager: credentialsManager,
containerChangeEventStream: containerChangeEventStream,
imageManager: imageManager,
cniClient: ecscni.NewClient(cfg.CNIPluginsPath),
metadataManager: metadataManager,
taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval,
taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter,
resourceFields: resourceFields,
handleDelay: time.Sleep,
execCmdMgr: execCmdMgr,
monitorExecAgentsInterval: defaultMonitorExecAgentsInterval,
stopContainerBackoffMin: defaultStopContainerBackoffMin,
stopContainerBackoffMax: defaultStopContainerBackoffMax,
namespaceHelper: ecscni.NewNamespaceHelper(client),
}
dockerTaskEngine.initializeContainerStatusToTransitionFunction()
return dockerTaskEngine
}
func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() {
containerStatusToTransitionFunction := map[containerstatus.ContainerStatus]transitionApplyFunc{
containerstatus.ContainerPulled: engine.pullContainer,
containerstatus.ContainerCreated: engine.createContainer,
containerstatus.ContainerRunning: engine.startContainer,
containerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources,
containerstatus.ContainerStopped: engine.stopContainer,
}
engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction
}
// ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1
// Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718)
// Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks.
var ImagePullDeleteLock sync.RWMutex
// UnmarshalJSON restores a previously marshaled task-engine state from json
func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error {
return engine.state.UnmarshalJSON(data)
}
// MarshalJSON marshals into state directly
func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) {
return engine.state.MarshalJSON()
}
// Init initializes a DockerTaskEngine such that it may communicate with docker
// and operate normally.
// This function must be called before any other function, except serializing and deserializing, can succeed without error.
func (engine *DockerTaskEngine) Init(ctx context.Context) error {
derivedCtx, cancel := context.WithCancel(ctx)
engine.stopEngine = cancel
engine.ctx = derivedCtx
// Open the event stream before we sync state so that e.g. if a container
// goes from running to stopped after we sync with it as "running" we still
// have the "went to stopped" event pending so we can be up to date.
err := engine.openEventstream(derivedCtx)
if err != nil {
return err
}
engine.synchronizeState()
// Now catch up and start processing new events per normal
go engine.handleDockerEvents(derivedCtx)
engine.initialized = true
go engine.startPeriodicExecAgentsMonitoring(derivedCtx)
return nil
}
func (engine *DockerTaskEngine) startPeriodicExecAgentsMonitoring(ctx context.Context) {
engine.monitorExecAgentsTicker = time.NewTicker(engine.monitorExecAgentsInterval)
for {
select {
case <-engine.monitorExecAgentsTicker.C:
go engine.monitorExecAgentProcesses(ctx)
case <-ctx.Done():
engine.monitorExecAgentsTicker.Stop()
return
}
}
}
func (engine *DockerTaskEngine) monitorExecAgentProcesses(ctx context.Context) {
// TODO: [ecs-exec]add jitter between containers to not overload docker with top calls
engine.tasksLock.RLock()
defer engine.tasksLock.RUnlock()
for _, mTask := range engine.managedTasks {
task := mTask.Task
if task.GetKnownStatus() != apitaskstatus.TaskRunning {
continue
}
for _, c := range task.Containers {
if execcmd.IsExecEnabledContainer(c) {
if ma, _ := c.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
go engine.monitorExecAgentRunning(ctx, mTask, c)
}
}
}
}
}
func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context,
mTask *managedTask, c *apicontainer.Container) {
if !c.IsRunning() {
return
}
task := mTask.Task
dockerID, err := engine.getDockerID(task, c)
if err != nil {
seelog.Errorf("Task engine [%s]: Could not retrieve docker id for container", task.Arn)
return
}
// Sleeping here so that all the containers do not call inspect/start exec agent process
// at the same time.
// The max sleep is 50% of the monitor interval to allow enough buffer time
// to finish monitoring.
// This is inspired from containers streaming stats from Docker.
time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2))
status, err := engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID)
if err != nil {
seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err)
mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent cannot be restarted")
}
if status == execcmd.Restarted {
mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent restarted")
}
}
// MustInit blocks and retries until an engine can be initialized.
func (engine *DockerTaskEngine) MustInit(ctx context.Context) {
if engine.initialized {
return
}
engine.mustInitLock.Lock()
defer engine.mustInitLock.Unlock()
errorOnce := sync.Once{}
taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay,
engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier)
retry.RetryWithBackoff(taskEngineConnectBackoff, func() error {
if engine.initialized {
return nil
}
err := engine.Init(ctx)
if err != nil {
errorOnce.Do(func() {
seelog.Errorf("Task engine: could not connect to docker daemon: %v", err)
})
}
return err
})
}
// SetDataClient sets the saver that is used by the DockerTaskEngine.
func (engine *DockerTaskEngine) SetDataClient(client data.Client) {
engine.dataClient = client
}
func (engine *DockerTaskEngine) Context() context.Context {
return engine.ctx
}
// Shutdown makes a best-effort attempt to cleanup after the task engine.
// This should not be relied on for anything more complicated than testing.
func (engine *DockerTaskEngine) Shutdown() {
engine.stopEngine()
engine.Disable()
}
// Disable prevents this engine from managing any additional tasks.
func (engine *DockerTaskEngine) Disable() {
engine.tasksLock.Lock()
}
// isTaskManaged checks if task for the corresponding arn is present
func (engine *DockerTaskEngine) isTaskManaged(arn string) bool {
engine.tasksLock.RLock()
defer engine.tasksLock.RUnlock()
_, ok := engine.managedTasks[arn]
return ok
}
// synchronizeState explicitly goes through each docker container stored in
// "state" and updates its KnownStatus appropriately, as well as queueing up
// events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated
// from loading state file.
func (engine *DockerTaskEngine) synchronizeState() {
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
imageStates := engine.state.AllImageStates()
if len(imageStates) != 0 {
engine.imageManager.AddAllImageStates(imageStates)
}
eniAttachments := engine.state.AllENIAttachments()
for _, eniAttachment := range eniAttachments {
timeoutFunc := func() {
eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress)
if !ok {
seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress)
return
}
if !eniAttachment.IsSent() {
seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record %s", eniAttachment.String())
engine.removeENIAttachmentData(eniAttachment.MACAddress)
engine.state.RemoveENIAttachment(eniAttachment.MACAddress)
}
}
err := eniAttachment.Initialize(timeoutFunc)
if err != nil {
// The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired
// attachment from state.
seelog.Warnf("ENI attachment has expired. Removing it from state. %s", eniAttachment.String())
engine.removeENIAttachmentData(eniAttachment.MACAddress)
engine.state.RemoveENIAttachment(eniAttachment.MACAddress)
}
}
tasks := engine.state.AllTasks()
tasksToStart := engine.filterTasksToStartUnsafe(tasks)
for _, task := range tasks {
task.InitializeResources(engine.resourceFields)
engine.saveTaskData(task)
}
for _, task := range tasksToStart {
engine.startTask(task)
}
}
// filterTasksToStartUnsafe filters only the tasks that need to be started after
// the agent has been restarted. It also synchronizes states of all of the containers
// in tasks that need to be started.
func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task {
var tasksToStart []*apitask.Task
for _, task := range tasks {
conts, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
// task hasn't started processing, no need to check container status
tasksToStart = append(tasksToStart, task)
continue
}
for _, cont := range conts {
engine.synchronizeContainerStatus(cont, task)
engine.saveDockerContainerData(cont) // persist the container with the updated information.
}
tasksToStart = append(tasksToStart, task)
// Put tasks that are stopped by acs but hasn't been stopped in wait group
if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 {
engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1)
}
}
return tasksToStart
}
// updateContainerMetadata sets the container metadata from the docker inspect
func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) {
container.SetCreatedAt(metadata.CreatedAt)
container.SetStartedAt(metadata.StartedAt)
container.SetFinishedAt(metadata.FinishedAt)
// Set the labels if it's not set
if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 {
container.SetLabels(metadata.Labels)
}
// Update volume for empty volume container
if metadata.Volumes != nil {
if container.IsInternal() {
task.UpdateMountPoints(container, metadata.Volumes)
} else {
container.SetVolumes(metadata.Volumes)
}
}
// Set Exitcode if it's not set
if metadata.ExitCode != nil {
container.SetKnownExitCode(metadata.ExitCode)
}
// Set port mappings
if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 {
container.SetKnownPortBindings(metadata.PortBindings)
}
// update the container health information
if container.HealthStatusShouldBeReported() {
container.SetHealthStatus(metadata.Health)
}
container.SetNetworkMode(metadata.NetworkMode)
container.SetNetworkSettings(metadata.NetworkSettings)
}
// synchronizeContainerStatus checks and updates the container status with docker
func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) {
if container.DockerID == "" {
seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s",
task.Arn, container.DockerName)
// Figure out the dockerid
describedContainer, err := engine.client.InspectContainer(engine.ctx,
container.DockerName, dockerclient.InspectContainerTimeout)
if err != nil {
seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v",
task.Arn, container.DockerName, err)
} else {
// update the container metadata in case the container was created during agent restart
metadata := dockerapi.MetadataFromContainer(describedContainer)
updateContainerMetadata(&metadata, container.Container, task)
container.DockerID = describedContainer.ID
container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State))
// update mappings that need dockerid
engine.state.AddContainer(container, task)
err := engine.imageManager.RecordContainerReference(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v",
task.Arn, err)
}
}
return
}
currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID)
if metadata.Error != nil {
currentState = containerstatus.ContainerStopped
// If this is a Docker API error
if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName {
seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v",
task.Arn, container.DockerID, container.DockerName, metadata.Error)
if !container.Container.KnownTerminal() {
container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{})
err := engine.imageManager.RemoveContainerReferenceFromImageState(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: could not remove container reference for image state %s: %v",
container.Container.Image, err)
}
}
} else {
// If this is a container state error
updateContainerMetadata(&metadata, container.Container, task)
container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error)
}
} else {
// update the container metadata in case the container status/metadata changed during agent restart
updateContainerMetadata(&metadata, container.Container, task)
err := engine.imageManager.RecordContainerReference(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v",
task.Arn, err)
}
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.Container.IsMetadataFileUpdated() {
go engine.updateMetadataFile(task, container)
}
}
if currentState > container.Container.GetKnownStatus() {
// update the container known status
container.Container.SetKnownStatus(currentState)
}
// Update task ExecutionStoppedAt timestamp
task.RecordExecutionStoppedAt(container.Container)
}
// checkTaskState inspects the state of all containers within a task and writes
// their state to the managed task's container channel.
func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) {
defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")()
for _, container := range task.Containers {
dockerID, err := engine.getDockerID(task, container)
if err != nil {
continue
}
status, metadata := engine.client.DescribeContainer(engine.ctx, dockerID)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
},
})
}
}
}
// sweepTask deletes all the containers associated with a task
func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) {
for _, cont := range task.Containers {
err := engine.removeContainer(task, cont)
if err != nil {
seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v",
task.Arn, cont.Name, err)
}
// Internal container(created by ecs-agent) state isn't recorded
if cont.IsInternal() {
continue
}
err = engine.imageManager.RemoveContainerReferenceFromImageState(cont)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v",
task.Arn, cont.Name, err)
}
}
// Clean metadata directory for task
if engine.cfg.ContainerMetadataEnabled.Enabled() {
err := engine.metadataManager.Clean(task.Arn)
if err != nil {
seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err)
}
}
}
var removeAll = os.RemoveAll
func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) {
for _, resource := range task.GetResources() {
err := resource.Cleanup()
if err != nil {
seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v",
task.Arn, resource.GetName(), err)
} else {
seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn,
resource.GetName())
}
}
if execcmd.IsExecEnabledTask(task) {
// cleanup host exec agent log dirs
if tID, err := task.GetID(); err != nil {
seelog.Warnf("Task Engine[%s]: error getting task ID for ExecAgent logs cleanup: %v", task.Arn, err)
} else {
if err := removeAll(filepath.Join(execcmd.ECSAgentExecLogDir, tID)); err != nil {
seelog.Warnf("Task Engine[%s]: unable to remove ExecAgent host logs for task: %v", task.Arn, err)
}
}
}
// Now remove ourselves from the global state and cleanup channels
engine.tasksLock.Lock()
engine.state.RemoveTask(task)
taskENIs := task.GetTaskENIs()
for _, taskENI := range taskENIs {
// ENIs that exist only as logical associations on another interface do not have
// attachments that need to be removed.
if taskENI.IsStandardENI() {
seelog.Debugf("Task engine [%s]: removing eni %s from agent state",
task.Arn, taskENI.ID)
engine.removeENIAttachmentData(taskENI.MacAddress)
engine.state.RemoveENIAttachment(taskENI.MacAddress)
} else {
seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state",
task.Arn, taskENI.ID)
}
}
// Remove task and container data from database.
engine.removeTaskData(task)
seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn)
delete(engine.managedTasks, task.Arn)
engine.tasksLock.Unlock()
}
func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err)
return
}
seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String())
engine.stateChangeEvents <- event
}
// startTask creates a managedTask construct to track the task and then begins
// pushing it towards its desired state when allowed startTask is protected by
// the tasksLock lock of 'AddTask'. It should not be called from anywhere
// else and should exit quickly to allow AddTask to do more work.
func (engine *DockerTaskEngine) startTask(task *apitask.Task) {
// Create a channel that may be used to communicate with this task, survey
// what tasks need to be waited for for this one to start, and then spin off
// a goroutine to oversee this task
thisTask := engine.newManagedTask(task)
thisTask._time = engine.time()
go thisTask.overseeTask()
}
func (engine *DockerTaskEngine) time() ttime.Time {
engine._timeOnce.Do(func() {
if engine._time == nil {
engine._time = &ttime.DefaultTime{}
}
})
return engine._time
}
// openEventstream opens, but does not consume, the docker event stream
func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error {
events, err := engine.client.ContainerEvents(ctx)
if err != nil {
return err
}
engine.events = events
return nil
}
// handleDockerEvents must be called after openEventstream; it processes each
// event that it reads from the docker eventstream
func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event := <-engine.events:
engine.handleDockerEvent(event)
}
}
}
// handleDockerEvent is the entrypoint for task modifications originating with
// events occurring through Docker, outside the task engine itself.
// handleDockerEvent is responsible for taking an event that correlates to a
// container and placing it in the context of the task to which that container
// belongs.
func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) {
seelog.Debugf("Task engine: handling a docker event: %s", event.String())
task, ok := engine.state.TaskByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task",
event.DockerID)
return
}
cont, ok := engine.state.ContainerByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container",
event.DockerID)
return
}
// Container health status change does not affect the container status
// no need to process this in task manager
if event.Type == containerresource.ContainerHealthEvent {
if cont.Container.HealthStatusShouldBeReported() {
seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v",
cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health)
cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health)
}
return
}
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if !ok {
seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s",
task.Arn, event.String())
return
}
seelog.Debugf("Task engine [%s]: writing docker event to the task: %s",
task.Arn, event.String())
managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event})
seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s",
task.Arn, event.String())
}
// StateChangeEvents returns channels to read task and container state changes. These
// changes should be read as soon as possible as them not being read will block
// processing the task referenced by the event.
func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event {
return engine.stateChangeEvents
}
// AddTask starts tracking a task
func (engine *DockerTaskEngine) AddTask(task *apitask.Task) {
defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")()
err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager,
engine.resourceFields, engine.client, engine.ctx)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err)
task.SetKnownStatus(apitaskstatus.TaskStopped)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
engine.emitTaskEvent(task, err.Error())
return
}
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
existingTask, exists := engine.state.TaskByArn(task.Arn)
if !exists {
// This will update the container desired status
task.UpdateDesiredStatus()
// This will update any dependencies for awsvpc network mode before the task is started.
engine.updateTaskENIDependencies(task)
engine.state.AddTask(task)
if dependencygraph.ValidDependencies(task, engine.cfg) {
engine.startTask(task)
} else {
seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn)
task.SetKnownStatus(apitaskstatus.TaskStopped)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
err := TaskDependencyError{task.Arn}
engine.emitTaskEvent(task, err.Error())
}
return
}
// Update task
engine.updateTaskUnsafe(existingTask, task)
}
// ListTasks returns the tasks currently managed by the DockerTaskEngine
func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) {
return engine.state.AllTasks(), nil
}
// GetTaskByArn returns the task identified by that ARN
func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) {
return engine.state.TaskByArn(arn)
}
func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
switch container.Type {
case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause:
// pause images are managed at startup
return dockerapi.DockerContainerMetadata{}
}
if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) {
// Record the pullStoppedAt timestamp
defer func() {
timestamp := engine.time().Now()
task.SetPullStoppedAt(timestamp)
}()
seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name)
return engine.concurrentPull(task, container)
}
// No pull image is required, the cached image will be used.
// Add the container that uses the cached image to the pulled container state.
dockerContainer := &apicontainer.DockerContainer{
Container: container,
}
engine.state.AddPulledContainer(dockerContainer, task)
// No pull image is required, just update container reference and use cached image.
engine.updateContainerReference(false, container, task.Arn)
// Return the metadata without any error
return dockerapi.DockerContainerMetadata{Error: nil}
}
// imagePullRequired returns true if pulling image is required, or return false if local image cache
// should be used, by inspecting the agent pull behavior variable defined in config. The caller has
// to make sure the container passed in is not an internal container.
func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType,
container *apicontainer.Container,
taskArn string) bool {
switch imagePullBehavior {
case config.ImagePullOnceBehavior:
// If this image has been pulled successfully before, don't pull the image,
// otherwise pull the image as usual, regardless whether the image exists or not
// (the image can be prepopulated with the AMI and never be pulled).
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && imageState.GetPullSucceeded() {
seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again",
taskArn, container.Image, container.Name)
return false
}
return true
case config.ImagePullPreferCachedBehavior:
// If the behavior is prefer cached, don't pull if we found cached image
// by inspecting the image.
_, err := engine.client.InspectImage(container.Image)
if err != nil {
return true
}
seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s",
taskArn, container.Image, container.Name)
return false
default:
// Need to pull the image for always and default agent pull behavior
return true
}
}
func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s",
task.Arn, container.Image, container.Name)
ImagePullDeleteLock.RLock()
seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s",
task.Arn, container.Image, container.Name)
defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s",
task.Arn, container.Image, container.Name)
defer ImagePullDeleteLock.RUnlock()
// Record the task pull_started_at timestamp
pullStart := engine.time().Now()
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s",
task.Arn, pullStart)
}
metadata := engine.pullAndUpdateContainerReference(task, container)
if metadata.Error == nil {
seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s",
task.Arn, container.Image, container.Name, time.Since(pullStart).String())
} else {
seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v",
task.Arn, container.Image, container.Name, metadata.Error)
}
return metadata
}
func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
// If a task is blocked here for some time, and before it starts pulling image,
// the task's desired status is set to stopped, then don't pull the image
if task.GetDesiredStatus() == apitaskstatus.TaskStopped {
seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s",
task.Arn, container.Image, container.Name)
container.SetDesiredStatus(containerstatus.ContainerStopped)
return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
}
// Set the credentials for pull from ECR if necessary
if container.ShouldPullWithExecutionRole() {
iamCredentials, ok, credentialsType := engine.getPullImageIAMCredentials(task, container)
if !ok {
seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s with %sExecutionCredentials",
task.Arn, container.Image, container.Name, credentialsType)
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotPullECRContainerError{
FromError: errors.New("engine ecr credentials: not found"),
},
}
}
seelog.Infof("Set RegistryAuthCredentials with %sExecutionCredentials for container [%s] of task [%s]", credentialsType, container.Name, task.Arn)
container.SetRegistryAuthCredentials(iamCredentials)
// Clean up the ECR pull credentials after pulling
defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{})
}
// Apply registry auth data from ASM if required
if container.ShouldPullWithASMAuth() {
if err := task.PopulateASMAuthData(container); err != nil {
seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s",
task.Arn, container.Image, container.Name)
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotPullContainerAuthError{
FromError: errors.New("engine docker private registry credentials: not found"),
},
}
}
defer container.SetASMDockerAuthConfig(types.AuthConfig{})
}
metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, engine.cfg.ImagePullTimeout)
// Don't add internal images(created by ecs-agent) into imagemanger state
if container.IsInternal() {
return metadata
}
pullSucceeded := metadata.Error == nil
findCachedImage := false
if !pullSucceeded {
// If Agent failed to pull an image when
// 1. DependentContainersPullUpfront is enabled
// 2. ImagePullBehavior is not set to always
// search the image in local cached images
if engine.cfg.DependentContainersPullUpfront.Enabled() && engine.cfg.ImagePullBehavior != config.ImagePullAlwaysBehavior {
if _, err := engine.client.InspectImage(container.Image); err != nil {
seelog.Errorf("Task engine [%s]: failed to find cached image %s for container %s",
task.Arn, container.Image, container.Name)
// Stop the task if the container is an essential container,
// and the image is not available in both remote and local caches
if container.IsEssential() {
task.SetDesiredStatus(apitaskstatus.TaskStopped)
engine.emitTaskEvent(task, fmt.Sprintf("%s: %s", metadata.Error.ErrorName(), metadata.Error.Error()))
}
return dockerapi.DockerContainerMetadata{Error: metadata.Error}
}
seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s",
task.Arn, container.Image, container.Name)
findCachedImage = true
}
}
if pullSucceeded || findCachedImage {
dockerContainer := &apicontainer.DockerContainer{
Container: container,
}
engine.state.AddPulledContainer(dockerContainer, task)
}
engine.updateContainerReference(pullSucceeded, container, task.Arn)
return metadata
}
func (engine *DockerTaskEngine) getPullImageIAMCredentials(task *apitask.Task, container *apicontainer.Container) (credentials.IAMRoleCredentials, bool, string) {
if container.GetExecutionCredentialsID() != "" {
executionCredentials, ok := engine.credentialsManager.GetContainerCredentials(container.GetExecutionCredentialsID())
return executionCredentials.GetIAMRoleCredentials(), ok, "container"
} else {
executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID())
return executionCredentials.GetIAMRoleCredentials(), ok, "task"
}
}
func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) {
err := engine.imageManager.RecordContainerReference(container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v",
taskArn, err)
}
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && pullSucceeded {
// Only need to update the pullSucceeded flag of the image state when its not yet set to true.
if !imageState.GetPullSucceeded() {
imageState.SetPullSucceeded(true)
err = engine.dataClient.SaveImageState(imageState)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to save image state: %v",
taskArn, err)
}
}
}
engine.state.AddImageState(imageState)
}
func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerContainerName := ""
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
containerMap = make(map[string]*apicontainer.DockerContainer)
} else {
// looking for container that has docker name but not created
for _, v := range containerMap {
if v.Container.Name == container.Name {
dockerContainerName = v.DockerName
break
}
}
}
// Resolve HostConfig
// we have to do this in create, not start, because docker no longer handles
// merging create config with start hostconfig the same; e.g. memory limits
// get lost
dockerClientVersion, versionErr := client.APIVersion()
if versionErr != nil {
return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}}
}
hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg)
if hcerr != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)}
}
if container.AWSLogAuthExecutionRole() {
err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager, container)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
firelensConfig := container.GetFirelensConfig()
if firelensConfig != nil {
err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg, container.Name)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
cerr := task.PopulateSecretLogOptionsToFirelensContainer(container)
if cerr != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)}
}
if firelensConfig.Type == firelens.FirelensConfigTypeFluentd {
// For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access
// the socket created by Docker.
container.MergeEnvironmentVariables(map[string]string{
"FLUENT_UID": "0",
})
}
}
// If the container is using a special log driver type "awsfirelens", it means the container wants to use
// the firelens container to send logs. In this case, override the log driver type to be fluentd
// and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container.
// Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge
// and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/.
if hostConfig.LogConfig.Type == logDriverTypeFirelens {
firelensContainers := task.GetFirelensContainers()
firelensVersion := firelensContainers[0].FirelensConfig.Version
hostConfig.LogConfig = getFirelensLogConfig(task, container, firelensVersion, hostConfig, engine.cfg)
if firelensVersion != "v2" {
if task.IsNetworkModeAWSVPC() {
container.MergeEnvironmentVariables(map[string]string{
fluentNetworkHost: FluentAWSVPCHostValue,
fluentNetworkPort: FluentNetworkPortValue,
})
} else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode {
ipAddress, ok := getContainerHostIP(firelensContainers[0].GetNetworkSettings())
if !ok {
err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)}
}
container.MergeEnvironmentVariables(map[string]string{
fluentNetworkHost: ipAddress,
fluentNetworkPort: FluentNetworkPortValue,
})
}
}
// TODO: for firelens v2, configure COLLECTOR_HOST after the design is finalized for control plane
}
//Apply the log driver secret into container's LogConfig and Env secrets to container.Environment
hasSecretAsEnvOrLogDriver := func(s containerresource.Secret) bool {
return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver
}
if container.HasSecret(hasSecretAsEnvOrLogDriver) {
err := task.PopulateSecrets(hostConfig, container)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
// Populate credentialspec resource
if container.RequiresCredentialSpec() {
seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn)
var credSpecResource *credentialspec.CredentialSpecResource
resource, ok := task.GetCredentialSpecResource()
if !ok || len(resource) <= 0 {
resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)}
}
credSpecResource = resource[0].(*credentialspec.CredentialSpecResource)
containerCredSpec, err := container.GetCredentialSpec()
if err == nil && containerCredSpec != "" {
// CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json
desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec)
if err != nil || desiredCredSpecInjection == "" {
missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)}
}
// Inject containers' hostConfig.SecurityOpt with the credentialspec resource
seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection)
if len(hostConfig.SecurityOpt) == 0 {
hostConfig.SecurityOpt = []string{desiredCredSpecInjection}
} else {
for idx, opt := range hostConfig.SecurityOpt {
if strings.HasPrefix(opt, "credentialspec:") {
hostConfig.SecurityOpt[idx] = desiredCredSpecInjection
}
}
}
} else {
emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)}
}
}
if container.ShouldCreateWithEnvFiles() {
err := task.MergeEnvVarsFromEnvfiles(container)
if err != nil {
seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name)
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
if execcmd.IsExecEnabledContainer(container) {
tID, err := task.GetID()
if err != nil {
herr := &apierrors.HostConfigError{Msg: err.Error()}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)}
}
err = engine.execCmdMgr.InitializeContainer(tID, container, hostConfig)
if err != nil {
seelog.Warnf("Exec Agent initialization: %v . Continuing to start container without enabling exec feature.", err)
// Emit a managedagent state chnage event if exec agent initialization fails
engine.tasksLock.RLock()
mTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", err))
} else {
seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name)
}
}
}
config, err := task.DockerConfig(container, dockerClientVersion)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
// Augment labels with some metadata from the agent. Explicitly do this last
// such that it will always override duplicates in the provided raw config
// data.
config.Labels[labelTaskARN] = task.Arn
config.Labels[labelContainerName] = container.Name
config.Labels[labelTaskDefinitionFamily] = task.Family
config.Labels[labelTaskDefinitionVersion] = task.Version
config.Labels[labelCluster] = engine.cfg.Cluster
if dockerContainerName == "" {
// only alphanumeric and hyphen characters are allowed
reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+")
name := reInvalidChars.ReplaceAllString(container.Name, "")
dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
// Pre-add the container in case we stop before the next, more useful,
// AddContainer call. This ensures we have a way to get the container if
// we die before 'createContainer' returns because we can inspect by
// name
engine.state.AddContainer(&apicontainer.DockerContainer{
DockerName: dockerContainerName,
Container: container,
}, task)
seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s",
task.Arn, container.Name, dockerContainerName)
}
// Create metadata directory and file then populate it with common metadata of all containers of this task
// Afterwards add this directory to the container's mounts if file creation was successful
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout)
if infoErr != nil {
seelog.Warnf("Task engine [%s]: unable to get docker info : %v",
task.Arn, infoErr)
}
mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions)
if mderr != nil {
seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v",
task.Arn, container.Name, mderr)
}
}
createContainerBegin := time.Now()
metadata := client.CreateContainer(engine.ctx, config, hostConfig,
dockerContainerName, engine.cfg.ContainerCreateTimeout)
if metadata.DockerID != "" {
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s",
task.Arn, container.Name, metadata.DockerID)
dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID,
DockerName: dockerContainerName,
Container: container}
engine.state.AddContainer(dockerContainer, task)
engine.saveDockerContainerData(dockerContainer)
}
container.SetLabels(config.Labels)
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s",
task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin))
container.SetRuntimeID(metadata.DockerID)
return metadata
}
func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, firelensVersion string,
hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig {
fields := strings.Split(task.Arn, "/")
taskID := fields[len(fields)-1]
var tag, dataLogDriverPath string
switch firelensVersion {
case "v2":
tag = fmt.Sprintf(fluentTagDockerFirelensV2Format, taskID, container.Name)
dataLogDriverPath = dataLogDriverPathFirelensV2
default:
tag = fmt.Sprintf(fluentTagDockerFirelensV1Format, container.Name, taskID)
dataLogDriverPath = dataLogDriverPathFirelensV1
}
fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath)
logConfig := hostConfig.LogConfig
bufferLimit, bufferLimitExists := logConfig.Config[apitask.FirelensLogDriverBufferLimitOption]
logConfig.Type = logDriverTypeFluentd
logConfig.Config = make(map[string]string)
logConfig.Config[logDriverTag] = tag
logConfig.Config[logDriverFluentdAddress] = fluentd
logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true)
logConfig.Config[logDriverSubSecondPrecision] = strconv.FormatBool(true)
if bufferLimitExists {
logConfig.Config[logDriverBufferLimit] = bufferLimit
}
seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig)
return logConfig
}
func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID())
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{
FromError: err,
},
}
}
startContainerBegin := time.Now()
dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout)
if dockerContainerMD.Error != nil {
return dockerContainerMD
}
seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s",
task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin))
// Get metadata through container inspection and available task information then write this to the metadata file
// Performs this in the background to avoid delaying container start
// TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and
// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
go func() {
err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name)
if err != nil {
seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, container.Name, err)
return
}
container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, container.Name)
}()
}
// If container is a firelens container, fluent host is needed to be added to the environment variable for the task.
// For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode,
// there is a need to wait for the IP to be present before the container using the firelens can be created.
if container.GetFirelensConfig() != nil {
if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) {
_, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings)
if !gotContainerIP {
getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier)
contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute)
defer cancel()
err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error {
inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID,
dockerclient.InspectContainerTimeout)
if err != nil {
return err
}
_, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings)
if gotIPBridge {
dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings
return nil
} else {
return errors.New("Bridge IP not available to use for firelens")
}
})
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{FromError: err},
}
}
}
}
}
if execcmd.IsExecEnabledContainer(container) {
if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
reason := "ExecuteCommandAgent started"
if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil {
reason = err.Error()
seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err)
}
engine.tasksLock.RLock()
mTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
// whether we started or failed to start, we'll want to emit a state change event
// redundant state change events like RUNNING->RUNNING are allowed
if ok {
mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason)
} else {
seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name)
}
}
}
// On Windows, we need to invoke CNI plugins for all containers
// invokePluginsForContainer will return nil for other platforms
if dockerContainerMD.Error == nil && task.IsNetworkModeAWSVPC() && !container.IsInternal() {
err := engine.invokePluginsForContainer(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrapf(err, "startContainer: cni plugin invocation failed"),
},
}
}
}
return dockerContainerMD
}
func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: setting up container resources for container [%s]",
task.Arn, container.Name)
containerInspectOutput, err := engine.inspectContainer(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: cannot setup task network namespace due to error inspecting pause container"),
},
}
}
task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid))
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: unable to build cni configuration"),
},
}
}
// Invoke the libcni to config the network namespace for the container
result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrap(err,
"container resource provisioning: failed to setup network namespace")},
}
}
// This is the IP of the task assigned on the bridge for IAM Task roles
taskIP := result.IPs[0].Address.IP.String()
seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP)
engine.state.AddTaskIPAddress(taskIP, task.Arn)
task.SetLocalIPAddress(taskIP)
engine.saveTaskData(task)
// Invoke additional commands required to configure the task namespace routing.
err = engine.namespaceHelper.ConfigureTaskNamespaceRouting(engine.ctx, task.GetPrimaryENI(), cniConfig, result)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrapf(err,
"container resource provisioning: failed to setup network namespace")},
}
}
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
}
}
// checkTearDownPauseContainer idempotently tears down the pause container network when the pause container's known
//or desired status is stopped.
func (engine *DockerTaskEngine) checkTearDownPauseContainer(task *apitask.Task) {
if !task.IsNetworkModeAWSVPC() {
return
}
for _, container := range task.Containers {
// Cleanup the pause container network namespace before stop the container
if container.Type == apicontainer.ContainerCNIPause {
// Clean up if the pause container has stopped or will stop
if container.KnownTerminal() || container.DesiredTerminal() {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err)
}
}
return
}
}
}
// cleanupPauseContainerNetwork will clean up the network namespace of pause container
func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error {
// This operation is idempotent
if container.IsContainerTornDown() {
return nil
}
delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second
if engine.handleDelay != nil && delay > 0 {
seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay)
engine.handleDelay(delay)
}
containerInspectOutput, err := engine.inspectContainer(task, container)
if err != nil {
return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container")
}
seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false)
if err != nil {
return errors.Wrapf(err,
"engine: failed cleanup task network namespace, task: %s", task.String())
}
err = engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout)
if err != nil {
return err
}
container.SetContainerTornDown(true)
seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn)
return nil
}
// buildCNIConfigFromTaskContainer builds a CNI config for the task and container.
func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer(
task *apitask.Task,
containerInspectOutput *types.ContainerJSON,
includeIPAMConfig bool) (*ecscni.Config, error) {
cniConfig := &ecscni.Config{
BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata.Enabled(),
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
InstanceENIDNSServerList: engine.cfg.InstanceENIDNSServerList,
}
if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 {
cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address
}
if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 {
cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes
}
cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid)
cniConfig.ContainerID = containerInspectOutput.ID
cniConfig.ContainerNetNS = ""
// For pause containers, NetNS would be none
// For other containers, NetNS would be of format container:<pause_container_ID>
if containerInspectOutput.HostConfig.NetworkMode.IsNone() {
cniConfig.ContainerNetNS = containerInspectOutput.HostConfig.NetworkMode.NetworkName()
} else if containerInspectOutput.HostConfig.NetworkMode.IsContainer() {
cniConfig.ContainerNetNS = fmt.Sprintf("container:%s", containerInspectOutput.HostConfig.NetworkMode.ConnectedContainer())
} else {
return nil, errors.New("engine: failed to build cni configuration from the task due to invalid container network namespace")
}
cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig)
if err != nil {
return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task")
}
return cniConfig, nil
}
func (engine *DockerTaskEngine) inspectContainer(task *apitask.Task, container *apicontainer.Container) (*types.ContainerJSON, error) {
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return nil, err
}
return engine.client.InspectContainer(engine.ctx, dockerID, dockerclient.InspectContainerTimeout)
}
func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name)
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{
FromError: err,
},
}
}
// Cleanup the pause container network namespace before stop the container
if container.Type == apicontainer.ContainerCNIPause {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v",
task.Arn, err)
}
}
apiTimeoutStopContainer := container.GetStopTimeout()
if apiTimeoutStopContainer <= 0 {
apiTimeoutStopContainer = engine.cfg.DockerStopTimeout
}
return engine.stopDockerContainer(dockerID, container.Name, apiTimeoutStopContainer)
}
// stopDockerContainer attempts to stop the container, retrying only in case of time out errors.
// If the maximum number of retries is reached, the container is marked as stopped. This is because docker sometimes
// deadlocks when trying to stop a container but the actual container process is stopped.
// for more information, see: https://github.com/moby/moby/issues/41587
func (engine *DockerTaskEngine) stopDockerContainer(dockerID, containerName string, apiTimeoutStopContainer time.Duration) dockerapi.DockerContainerMetadata {
var md dockerapi.DockerContainerMetadata
backoff := newExponentialBackoff(engine.stopContainerBackoffMin, engine.stopContainerBackoffMax, stopContainerBackoffJitter, stopContainerBackoffMultiplier)
for i := 0; i < stopContainerMaxRetryCount; i++ {
md = engine.client.StopContainer(engine.ctx, dockerID, apiTimeoutStopContainer)
if md.Error == nil {
return md
}
cannotStopContainerError, ok := md.Error.(cannotStopContainerError)
if ok && !cannotStopContainerError.IsRetriableError() {
return md
}
if i < stopContainerMaxRetryCount-1 {
retryIn := backoff.Duration()
logger.Warn(fmt.Sprintf("Error stopping container, retrying in %v", retryIn), logger.Fields{
field.Container: containerName,
field.RuntimeID: dockerID,
field.Error: md.Error,
"attempt": i + 1,
})
time.Sleep(retryIn)
}
}
return md
}
func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error {
seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name)
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return err
}
return engine.client.RemoveContainer(engine.ctx, dockerID, dockerclient.RemoveContainerTimeout)
}
// updateTaskUnsafe determines if a new transition needs to be applied to the
// referenced task, and if needed applies it. It should not be called anywhere
// but from 'AddTask' and is protected by the tasksLock lock there.
func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) {
managedTask, ok := engine.managedTasks[task.Arn]
if !ok {
seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.",
task.Arn)
return
}
// Keep the lock because sequence numbers cannot be correct unless they are
// also read in the order addtask was called
// This does block the engine's ability to ingest any new events (including
// stops for past tasks, ack!), but this is necessary for correctness
updateDesiredStatus := update.GetDesiredStatus()
seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
managedTask.emitACSTransition(acsTransition{
desiredStatus: updateDesiredStatus,
seqnum: update.StopSequenceNumber,
})
seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
}
// transitionContainer calls applyContainerState, and then notifies the managed
// task of the change. transitionContainer is called by progressTask and
// by handleStoppedToRunningContainerTransition.
func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to containerstatus.ContainerStatus) {
// Let docker events operate async so that we can continue to handle ACS / other requests
// This is safe because 'applyContainerState' will not mutate the task
metadata := engine.applyContainerState(task, container, to)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: to,
DockerContainerMetadata: metadata,
},
})
}
}
// applyContainerState moves the container to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState containerstatus.ContainerStatus) dockerapi.DockerContainerMetadata {
transitionFunction, ok := engine.transitionFunctionMap()[nextState]
if !ok {
seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s",
task.Arn, container.Name, nextState.String())
return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}}
}
metadata := transitionFunction(task, container)
if metadata.Error != nil {
seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v",
task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error)
} else {
seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]",
task.Arn, container.Name, container.GetRuntimeID(), nextState.String())
}
return metadata
}
// transitionFunctionMap provides the logic for the simple state machine of the
// DockerTaskEngine. Each desired state maps to a function that can be called
// to try and move the task to that desired state.
func (engine *DockerTaskEngine) transitionFunctionMap() map[containerstatus.ContainerStatus]transitionApplyFunc {
return engine.containerStatusToTransitionFunction
}
type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata)
// State is a function primarily meant for testing usage; it is explicitly not
// part of the TaskEngine interface and should not be relied upon.
// It returns an internal representation of the state of this DockerTaskEngine.
func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState {
return engine.state
}
// Version returns the underlying docker version.
func (engine *DockerTaskEngine) Version() (string, error) {
return engine.client.Version(engine.ctx, dockerclient.VersionTimeout)
}
func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) {
err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name)
if err != nil {
seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, cont.Container.Name, err)
} else {
cont.Container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, cont.Container.Name)
}
}
func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) {
if networkSettings == nil {
return "", false
} else if networkSettings.IPAddress != "" {
return networkSettings.IPAddress, true
} else if len(networkSettings.Networks) > 0 {
for mode, network := range networkSettings.Networks {
if mode == apitask.BridgeNetworkMode && network.IPAddress != "" {
return network.IPAddress, true
}
}
}
return "", false
}
func (engine *DockerTaskEngine) getDockerID(task *apitask.Task, container *apicontainer.Container) (string, error) {
runtimeID := container.GetRuntimeID()
if runtimeID != "" {
return runtimeID, nil
}
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return "", errors.Errorf("container name=%s belongs to unrecognized task taskArn=%s", container.Name, task.Arn)
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return "", errors.Errorf("container name=%s not recognized by agent", container.Name)
}
if dockerContainer.DockerID == "" {
return dockerContainer.DockerName, nil
}
return dockerContainer.DockerID, nil
}
| 1 | 26,678 | Are these values expected to change over time? Why not inject these values in ECS backend instead of agent doing it? | aws-amazon-ecs-agent | go |
@@ -31,6 +31,17 @@ func run(app *cli.Context, cfg *cmds.Server) error {
return err
}
+ nodeName := app.String("node-name")
+ if nodeName == "" {
+ h, err := os.Hostname()
+ if err != nil {
+ return err
+ }
+ nodeName = h
+ }
+
+ os.Setenv("NODE_NAME", nodeName)
+
var serverConfig server.Config
serverConfig.DisableAgent = true
serverConfig.ControlConfig.DataDir = dataDir | 1 | package etcdsnapshot
import (
"context"
"errors"
"os"
"path/filepath"
"github.com/erikdubbelboer/gspt"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/cluster"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/etcd"
"github.com/rancher/k3s/pkg/server"
"github.com/rancher/wrangler/pkg/signals"
"github.com/urfave/cli"
)
func Run(app *cli.Context) error {
if err := cmds.InitLogging(); err != nil {
return err
}
return run(app, &cmds.ServerConfig)
}
func run(app *cli.Context, cfg *cmds.Server) error {
gspt.SetProcTitle(os.Args[0])
dataDir, err := server.ResolveDataDir(cfg.DataDir)
if err != nil {
return err
}
var serverConfig server.Config
serverConfig.DisableAgent = true
serverConfig.ControlConfig.DataDir = dataDir
serverConfig.ControlConfig.EtcdSnapshotName = cfg.EtcdSnapshotName
serverConfig.ControlConfig.EtcdSnapshotDir = cfg.EtcdSnapshotDir
serverConfig.ControlConfig.EtcdSnapshotRetention = 0 // disable retention check
serverConfig.ControlConfig.EtcdS3 = cfg.EtcdS3
serverConfig.ControlConfig.EtcdS3Endpoint = cfg.EtcdS3Endpoint
serverConfig.ControlConfig.EtcdS3EndpointCA = cfg.EtcdS3EndpointCA
serverConfig.ControlConfig.EtcdS3SkipSSLVerify = cfg.EtcdS3SkipSSLVerify
serverConfig.ControlConfig.EtcdS3AccessKey = cfg.EtcdS3AccessKey
serverConfig.ControlConfig.EtcdS3SecretKey = cfg.EtcdS3SecretKey
serverConfig.ControlConfig.EtcdS3BucketName = cfg.EtcdS3BucketName
serverConfig.ControlConfig.EtcdS3Region = cfg.EtcdS3Region
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
serverConfig.ControlConfig.Runtime = &config.ControlRuntime{}
serverConfig.ControlConfig.Runtime.ETCDServerCA = filepath.Join(dataDir, "tls", "etcd", "server-ca.crt")
serverConfig.ControlConfig.Runtime.ClientETCDCert = filepath.Join(dataDir, "tls", "etcd", "client.crt")
serverConfig.ControlConfig.Runtime.ClientETCDKey = filepath.Join(dataDir, "tls", "etcd", "client.key")
ctx := signals.SetupSignalHandler(context.Background())
initialized, err := etcd.NewETCD().IsInitialized(ctx, &serverConfig.ControlConfig)
if err != nil {
return err
}
if !initialized {
return errors.New("managed etcd database has not been initialized")
}
cluster := cluster.New(&serverConfig.ControlConfig)
if err := cluster.Bootstrap(ctx); err != nil {
return err
}
return cluster.Snapshot(ctx, &serverConfig.ControlConfig)
}
| 1 | 9,446 | as best i can tell, you are setting this here in order to make it available in `(e *ETCD) Snapshot(ctx context.Context, config *config.Control) error`. It feels wrong to pass state like this through an env var. Could you instead add a `nodeName` property to the ETCD struct? | k3s-io-k3s | go |
@@ -38,6 +38,8 @@ var skipTests = map[string]string{
"covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
+ "drop_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
+ "keep_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)",
}
| 1 | package tests_test
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/influxdata/flux"
_ "github.com/influxdata/flux/functions/inputs" // Import the built-in inputs
_ "github.com/influxdata/flux/functions/outputs" // Import the built-in outputs
_ "github.com/influxdata/flux/functions/tests" // Import the built-in functions
_ "github.com/influxdata/flux/functions/transformations" // Import the built-in functions
"github.com/influxdata/flux/lang"
_ "github.com/influxdata/flux/options" // Import the built-in options
"github.com/influxdata/flux/querytest"
)
func init() {
flux.RegisterBuiltIn("loadTest", loadTestBuiltin)
flux.FinalizeBuiltIns()
}
var loadTestBuiltin = `
// loadData is a function that's referenced in all the transformation tests.
// it's registered here so that we can register a different loadData function for
// each platform/binary.
testLoadStorage = (csv) => fromCSV(csv:csv)
testLoadMem = (csv) => fromCSV(csv: csv)`
var skipTests = map[string]string{
"string_max": "error: invalid use of function: *functions.MaxSelector has no implementation for type string (https://github.com/influxdata/platform/issues/224)",
"null_as_value": "null not supported as value in influxql (https://github.com/influxdata/platform/issues/353)",
"string_interp": "string interpolation not working as expected in flux (https://github.com/influxdata/platform/issues/404)",
"to": "to functions are not supported in the testing framework (https://github.com/influxdata/flux/issues/77)",
"covariance_missing_column_1": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)",
}
var querier = querytest.NewQuerier()
type AssertionError interface {
Assertion() bool
}
func withEachFluxFile(t testing.TB, fn func(prefix, caseName string)) {
dir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
path := filepath.Join(dir, "testdata")
os.Chdir(path)
fluxFiles, err := filepath.Glob("*.flux")
if err != nil {
t.Fatalf("error searching for Flux files: %s", err)
}
for _, fluxFile := range fluxFiles {
ext := filepath.Ext(fluxFile)
prefix := fluxFile[0 : len(fluxFile)-len(ext)]
_, caseName := filepath.Split(prefix)
fn(prefix, caseName)
}
}
func Test_QueryEndToEnd(t *testing.T) {
withEachFluxFile(t, func(prefix, caseName string) {
reason, skip := skipTests[caseName]
fluxName := caseName + ".flux"
t.Run(fluxName, func(t *testing.T) {
if skip {
t.Skip(reason)
}
testFlux(t, querier, prefix, ".flux")
})
})
}
func Benchmark_QueryEndToEnd(b *testing.B) {
withEachFluxFile(b, func(prefix, caseName string) {
reason, skip := skipTests[caseName]
fluxName := caseName + ".flux"
b.Run(fluxName, func(b *testing.B) {
if skip {
b.Skip(reason)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testFlux(b, querier, prefix, ".flux")
}
})
})
}
func testFlux(t testing.TB, querier *querytest.Querier, prefix, queryExt string) {
q, err := ioutil.ReadFile(prefix + queryExt)
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
c := lang.FluxCompiler{
Query: string(q),
}
r, err := querier.C.Query(context.Background(), c)
if err != nil {
t.Fatalf("test error %s", err)
}
defer r.Done()
result, ok := <-r.Ready()
if !ok {
t.Fatalf("TEST error retrieving query result: %s", r.Err())
}
for _, v := range result {
err := v.Tables().Do(func(tbl flux.Table) error {
return nil
})
if err != nil {
if assertionErr, ok := err.(AssertionError); ok {
t.Error(assertionErr)
} else {
t.Fatal(err)
}
}
}
}
| 1 | 9,372 | These seem to support the new test framework below, but the `.in.csv` files are still committed. | influxdata-flux | go |
@@ -227,8 +227,11 @@ namespace Nethermind.Blockchain.Receipts
for (int i = 0; i < txReceipts.Length; i++)
{
- var txHash = block.Transactions[i].Hash;
- _transactionDb.Set(txHash, block.Hash.Bytes);
+ if (txReceipts[i].Removed == false)
+ {
+ var txHash = block.Transactions[i].Hash;
+ _transactionDb.Set(txHash, block.Hash.Bytes);
+ }
}
if (blockNumber < MigratedBlockNumber) | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.IO;
using Nethermind.Core;
using Nethermind.Core.Caching;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Db;
using Nethermind.Serialization.Rlp;
#pragma warning disable 618
namespace Nethermind.Blockchain.Receipts
{
public class PersistentReceiptStorage : IReceiptStorage
{
private readonly IColumnsDb<ReceiptsColumns> _database;
private readonly ISpecProvider _specProvider;
private readonly IReceiptsRecovery _receiptsRecovery;
private long? _lowestInsertedReceiptBlock;
private readonly IDbWithSpan _blocksDb;
private readonly IDb _transactionDb;
private static readonly Keccak MigrationBlockNumberKey = Keccak.Compute(nameof(MigratedBlockNumber));
private long _migratedBlockNumber;
private static readonly ReceiptStorageDecoder StorageDecoder = ReceiptStorageDecoder.Instance;
private const int CacheSize = 64;
private readonly ICache<Keccak, TxReceipt[]> _receiptsCache = new LruCache<Keccak, TxReceipt[]>(CacheSize, CacheSize, "receipts");
public PersistentReceiptStorage(IColumnsDb<ReceiptsColumns> receiptsDb, ISpecProvider specProvider, IReceiptsRecovery receiptsRecovery)
{
long Get(Keccak key, long defaultValue) => _database.Get(key)?.ToLongFromBigEndianByteArrayWithoutLeadingZeros() ?? defaultValue;
_database = receiptsDb ?? throw new ArgumentNullException(nameof(receiptsDb));
_specProvider = specProvider ?? throw new ArgumentNullException(nameof(specProvider));
_receiptsRecovery = receiptsRecovery ?? throw new ArgumentNullException(nameof(receiptsRecovery));
_blocksDb = _database.GetColumnDb(ReceiptsColumns.Blocks);
_transactionDb = _database.GetColumnDb(ReceiptsColumns.Transactions);
byte[] lowestBytes = _database.Get(Keccak.Zero);
_lowestInsertedReceiptBlock = lowestBytes == null ? (long?) null : new RlpStream(lowestBytes).DecodeLong();
_migratedBlockNumber = Get(MigrationBlockNumberKey, long.MaxValue);
}
public Keccak FindBlockHash(Keccak txHash)
{
var blockHashData = _transactionDb.Get(txHash);
return blockHashData == null ? FindReceiptObsolete(txHash)?.BlockHash : new Keccak(blockHashData);
}
// Find receipt stored with old - obsolete format.
private TxReceipt FindReceiptObsolete(Keccak hash)
{
var receiptData = _database.GetSpan(hash);
try
{
return DeserializeReceiptObsolete(hash, receiptData);
}
finally
{
_database.DangerousReleaseMemory(receiptData);
}
}
private static TxReceipt DeserializeReceiptObsolete(Keccak hash, Span<byte> receiptData)
{
if (!receiptData.IsNullOrEmpty())
{
var context = new Rlp.ValueDecoderContext(receiptData);
try
{
var receipt = StorageDecoder.Decode(ref context, RlpBehaviors.Storage);
receipt.TxHash = hash;
return receipt;
}
catch (RlpException)
{
context.Position = 0;
var receipt = StorageDecoder.Decode(ref context);
receipt.TxHash = hash;
return receipt;
}
}
return null;
}
public TxReceipt[] Get(Block block)
{
if (block.ReceiptsRoot == Keccak.EmptyTreeHash)
{
return Array.Empty<TxReceipt>();
}
if (_receiptsCache.TryGet(block.Hash, out var receipts))
{
return receipts;
}
var receiptsData = _blocksDb.GetSpan(block.Hash);
try
{
bool shouldCache = true;
if (!receiptsData.IsNullOrEmpty())
{
receipts = DecodeArray(receiptsData);
}
else
{
// didn't bring performance uplift that was expected
// var data = _database.MultiGet(block.Transactions.Select(t => t.Hash));
// return data.Select(kvp => DeserializeObsolete(new Keccak(kvp.Key), kvp.Value)).ToArray();
receipts = new TxReceipt[block.Transactions.Length];
for (int i = 0; i < block.Transactions.Length; i++)
{
receipts[i] = FindReceiptObsolete(block.Transactions[i].Hash);
shouldCache &= receipts[i] != null;
}
}
shouldCache &= receipts.Length > 0;
if (shouldCache)
{
_receiptsCache.Set(block.Hash, receipts);
}
return receipts;
}
finally
{
_blocksDb.DangerousReleaseMemory(receiptsData);
}
}
private static TxReceipt[] DecodeArray(in Span<byte> receiptsData)
{
var decoderContext = new Rlp.ValueDecoderContext(receiptsData);
try
{
return StorageDecoder.DecodeArray(ref decoderContext, RlpBehaviors.Storage);
}
catch (RlpException)
{
decoderContext.Position = 0;
return StorageDecoder.DecodeArray(ref decoderContext);
}
}
public TxReceipt[] Get(Keccak blockHash)
{
if (_receiptsCache.TryGet(blockHash, out var receipts))
{
return receipts;
}
var receiptsData = _blocksDb.GetSpan(blockHash);
try
{
if (receiptsData.IsNullOrEmpty())
{
return Array.Empty<TxReceipt>();
}
else
{
receipts = DecodeArray(receiptsData);
_receiptsCache.Set(blockHash, receipts);
return receipts;
}
}
finally
{
_blocksDb.DangerousReleaseMemory(receiptsData);
}
}
public bool CanGetReceiptsByHash(long blockNumber) => blockNumber >= MigratedBlockNumber;
public bool TryGetReceiptsIterator(long blockNumber, Keccak blockHash, out ReceiptsIterator iterator)
{
if (_receiptsCache.TryGet(blockHash, out var receipts))
{
iterator = new ReceiptsIterator(receipts);
return true;
}
var result = CanGetReceiptsByHash(blockNumber);
var receiptsData = _blocksDb.GetSpan(blockHash);
iterator = result ? new ReceiptsIterator(receiptsData, _blocksDb) : new ReceiptsIterator();
return result;
}
public void Insert(Block block, params TxReceipt[] txReceipts)
{
txReceipts ??= Array.Empty<TxReceipt>();
if (block.Transactions.Length != txReceipts.Length)
{
throw new InvalidDataException(
$"Block {block.ToString(Block.Format.FullHashAndNumber)} has different numbers " +
$"of transactions {block.Transactions.Length} and receipts {txReceipts.Length}.");
}
_receiptsRecovery.TryRecover(block, txReceipts);
var blockNumber = block.Number;
var spec = _specProvider.GetSpec(blockNumber);
RlpBehaviors behaviors = spec.IsEip658Enabled ? RlpBehaviors.Eip658Receipts | RlpBehaviors.Storage : RlpBehaviors.Storage;
_blocksDb.Set(block.Hash, StorageDecoder.Encode(txReceipts, behaviors).Bytes);
for (int i = 0; i < txReceipts.Length; i++)
{
var txHash = block.Transactions[i].Hash;
_transactionDb.Set(txHash, block.Hash.Bytes);
}
if (blockNumber < MigratedBlockNumber)
{
MigratedBlockNumber = blockNumber;
}
_receiptsCache.Set(block.Hash, txReceipts);
ReceiptsInserted?.Invoke(this, new ReceiptsEventArgs(block.Header, txReceipts));
}
public long? LowestInsertedReceiptBlockNumber
{
get => _lowestInsertedReceiptBlock;
set
{
_lowestInsertedReceiptBlock = value;
if (value.HasValue)
{
_database.Set(Keccak.Zero, Rlp.Encode(value.Value).Bytes);
}
}
}
public long MigratedBlockNumber
{
get => _migratedBlockNumber;
set
{
_migratedBlockNumber = value;
_database.Set(MigrationBlockNumberKey, MigratedBlockNumber.ToBigEndianByteArrayWithoutLeadingZeros());
}
}
internal void ClearCache()
{
_receiptsCache.Clear();
}
public event EventHandler<ReceiptsEventArgs> ReceiptsInserted;
}
}
| 1 | 25,716 | !txReceipts[i].Removed and We can check it for first item outside of for loop, we don't expect mixed batches | NethermindEth-nethermind | .cs |
@@ -40,7 +40,7 @@ const (
prodEnvFlag = "prod"
deployFlag = "deploy"
resourcesFlag = "resources"
- githubURLFlag = "github-url"
+ repoURLFlag = "url"
githubAccessTokenFlag = "github-access-token"
gitBranchFlag = "git-branch"
envsFlag = "environments" | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"fmt"
"strings"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/template"
)
// Long flag names.
const (
// Common flags.
nameFlag = "name"
appFlag = "app"
envFlag = "env"
workloadFlag = "workload"
svcTypeFlag = "svc-type"
jobTypeFlag = "job-type"
typeFlag = "type"
profileFlag = "profile"
yesFlag = "yes"
jsonFlag = "json"
allFlag = "all"
// Command specific flags.
dockerFileFlag = "dockerfile"
imageTagFlag = "tag"
resourceTagsFlag = "resource-tags"
stackOutputDirFlag = "output-dir"
limitFlag = "limit"
followFlag = "follow"
sinceFlag = "since"
startTimeFlag = "start-time"
endTimeFlag = "end-time"
tasksFlag = "tasks"
prodEnvFlag = "prod"
deployFlag = "deploy"
resourcesFlag = "resources"
githubURLFlag = "github-url"
githubAccessTokenFlag = "github-access-token"
gitBranchFlag = "git-branch"
envsFlag = "environments"
domainNameFlag = "domain"
localFlag = "local"
deleteSecretFlag = "delete-secret"
svcPortFlag = "port"
storageTypeFlag = "storage-type"
storagePartitionKeyFlag = "partition-key"
storageSortKeyFlag = "sort-key"
storageNoSortFlag = "no-sort"
storageLSIConfigFlag = "lsi"
storageNoLSIFlag = "no-lsi"
taskGroupNameFlag = "task-group-name"
countFlag = "count"
cpuFlag = "cpu"
memoryFlag = "memory"
imageFlag = "image"
taskRoleFlag = "task-role"
executionRoleFlag = "execution-role"
subnetsFlag = "subnets"
securityGroupsFlag = "security-groups"
envVarsFlag = "env-vars"
commandFlag = "command"
taskDefaultFlag = "default"
vpcIDFlag = "import-vpc-id"
publicSubnetsFlag = "import-public-subnets"
privateSubnetsFlag = "import-private-subnets"
vpcCIDRFlag = "override-vpc-cidr"
publicSubnetCIDRsFlag = "override-public-cidrs"
privateSubnetCIDRsFlag = "override-private-cidrs"
defaultConfigFlag = "default-config"
accessKeyIDFlag = "aws-access-key-id"
secretAccessKeyFlag = "aws-secret-access-key"
sessionTokenFlag = "aws-session-token"
regionFlag = "region"
retriesFlag = "retries"
timeoutFlag = "timeout"
scheduleFlag = "schedule"
)
// Short flag names.
// A short flag only exists if the flag or flag set is mandatory by the command.
const (
nameFlagShort = "n"
appFlagShort = "a"
envFlagShort = "e"
typeFlagShort = "t"
workloadFlagShort = "w"
dockerFileFlagShort = "d"
imageFlagShort = "i"
githubURLFlagShort = "u"
githubAccessTokenFlagShort = "t"
gitBranchFlagShort = "b"
envsFlagShort = "e"
scheduleFlagShort = "s"
)
// Descriptions for flags.
var (
svcTypeFlagDescription = fmt.Sprintf(`Type of service to create. Must be one of:
%s`, strings.Join(template.QuoteSliceFunc(manifest.ServiceTypes), ", "))
imageFlagDescription = fmt.Sprintf(`The location of an existing Docker image.
Mutually exclusive with -%s, --%s`, dockerFileFlagShort, dockerFileFlag)
dockerFileFlagDescription = fmt.Sprintf(`Path to the Dockerfile.
Mutually exclusive with -%s, --%s`, imageFlagShort, imageFlag)
storageTypeFlagDescription = fmt.Sprintf(`Type of storage to add. Must be one of:
%s`, strings.Join(template.QuoteSliceFunc(storageTypes), ", "))
jobTypeFlagDescription = fmt.Sprintf(`Type of job to create. Must be one of:
%s`, strings.Join(template.QuoteSliceFunc(manifest.JobTypes), ", "))
wkldTypeFlagDescription = fmt.Sprintf(`Type of job or svc to create. Must be one of:
%s`, strings.Join(template.QuoteSliceFunc(manifest.WorkloadTypes), ", "))
subnetsFlagDescription = fmt.Sprintf(`Optional. The subnet IDs for the task to use. Can be specified multiple times.
Cannot be specified with '%s', '%s' or '%s'.`, appFlag, envFlag, taskDefaultFlag)
securityGroupsFlagDescription = fmt.Sprintf(`Optional. The security group IDs for the task to use. Can be specified multiple times.
Cannot be specified with '%s' or '%s'.`, appFlag, envFlag)
taskDefaultFlagDescription = fmt.Sprintf(`Optional. Run tasks in default cluster and default subnets.
Cannot be specified with '%s', '%s' or '%s'.`, appFlag, envFlag, subnetsFlag)
taskEnvFlagDescription = fmt.Sprintf(`Optional. Name of the environment.
Cannot be specified with '%s', '%s' or '%s'`, taskDefaultFlag, subnetsFlag, securityGroupsFlag)
taskAppFlagDescription = fmt.Sprintf(`Optional. Name of the application.
Cannot be specified with '%s', '%s' or '%s'`, taskDefaultFlag, subnetsFlag, securityGroupsFlag)
)
const (
appFlagDescription = "Name of the application."
envFlagDescription = "Name of the environment."
svcFlagDescription = "Name of the service."
jobFlagDescription = "Name of the job."
workloadFlagDescription = "Name of the service or job."
pipelineFlagDescription = "Name of the pipeline."
profileFlagDescription = "Name of the profile."
yesFlagDescription = "Skips confirmation prompt."
jsonFlagDescription = "Optional. Outputs in JSON format."
imageTagFlagDescription = `Optional. The container image tag.`
resourceTagsFlagDescription = `Optional. Labels with a key and value separated with commas.
Allows you to categorize resources.`
stackOutputDirFlagDescription = "Optional. Writes the stack template and template configuration to a directory."
prodEnvFlagDescription = "If the environment contains production services."
limitFlagDescription = `Optional. The maximum number of log events returned. Default is 10
unless any time filtering flags are set.`
followFlagDescription = "Optional. Specifies if the logs should be streamed."
sinceFlagDescription = `Optional. Only return logs newer than a relative duration like 5s, 2m, or 3h.
Defaults to all logs. Only one of start-time / since may be used.`
startTimeFlagDescription = `Optional. Only return logs after a specific date (RFC3339).
Defaults to all logs. Only one of start-time / since may be used.`
endTimeFlagDescription = `Optional. Only return logs before a specific date (RFC3339).
Defaults to all logs. Only one of end-time / follow may be used.`
tasksLogsFlagDescription = "Optional. Only return logs from specific task IDs."
deployTestFlagDescription = `Deploy your service or job to a "test" environment.`
githubURLFlagDescription = "GitHub repository URL for your service."
githubAccessTokenFlagDescription = "GitHub personal access token for your repository."
gitBranchFlagDescription = "Branch used to trigger your pipeline."
pipelineEnvsFlagDescription = "Environments to add to the pipeline."
domainNameFlagDescription = "Optional. Your existing custom domain name."
envResourcesFlagDescription = "Optional. Show the resources in your environment."
svcResourcesFlagDescription = "Optional. Show the resources in your service."
pipelineResourcesFlagDescription = "Optional. Show the resources in your pipeline."
localSvcFlagDescription = "Only show services in the workspace."
localJobFlagDescription = "Only show jobs in the workspace."
deleteSecretFlagDescription = "Deletes AWS Secrets Manager secret associated with a pipeline source repository."
svcPortFlagDescription = "Optional. The port on which your service listens."
storageFlagDescription = "Name of the storage resource to create."
storageWorkloadFlagDescription = "Name of the service or job to associate with storage."
storagePartitionKeyFlagDescription = `Partition key for the DDB table.
Must be of the format '<keyName>:<dataType>'.`
storageSortKeyFlagDescription = `Optional. Sort key for the DDB table.
Must be of the format '<keyName>:<dataType>'.`
storageNoSortFlagDescription = "Optional. Skip configuring sort keys."
storageNoLSIFlagDescription = `Optional. Don't ask about configuring alternate sort keys.`
storageLSIConfigFlagDescription = `Optional. Attribute to use as an alternate sort key. May be specified up to 5 times.
Must be of the format '<keyName>:<dataType>'.`
countFlagDescription = "Optional. The number of tasks to set up."
cpuFlagDescription = "Optional. The number of CPU units to reserve for each task."
memoryFlagDescription = "Optional. The amount of memory to reserve in MiB for each task."
taskRoleFlagDescription = "Optional. The ARN of the role for the task to use."
executionRoleFlagDescription = "Optional. The ARN of the role that grants the container agent permission to make AWS API calls."
envVarsFlagDescription = "Optional. Environment variables specified by key=value separated with commas."
commandFlagDescription = `Optional. The command that is passed to "docker run" to override the default command.`
taskGroupFlagDescription = `Optional. The group name of the task.
Tasks with the same group name share the same set of resources.
(default directory name)`
taskImageTagFlagDescription = `Optional. The container image tag in addition to "latest".`
vpcIDFlagDescription = "Optional. Use an existing VPC ID."
publicSubnetsFlagDescription = "Optional. Use existing public subnet IDs."
privateSubnetsFlagDescription = "Optional. Use existing private subnet IDs."
vpcCIDRFlagDescription = "Optional. Global CIDR to use for VPC (default 10.0.0.0/16)."
publicSubnetCIDRsFlagDescription = "Optional. CIDR to use for public subnets (default 10.0.0.0/24,10.0.1.0/24)."
privateSubnetCIDRsFlagDescription = "Optional. CIDR to use for private subnets (default 10.0.2.0/24,10.0.3.0/24)."
defaultConfigFlagDescription = "Optional. Skip prompting and use default environment configuration."
accessKeyIDFlagDescription = "Optional. An AWS access key."
secretAccessKeyFlagDescription = "Optional. An AWS secret access key."
sessionTokenFlagDescription = "Optional. An AWS session token for temporary credentials."
envRegionTokenFlagDescription = "Optional. An AWS region where the environment will be created."
retriesFlagDescription = "Optional. The number of times to try restarting the job on a failure."
timeoutFlagDescription = `Optional. The total execution time for the task, including retries.
Accepts valid Go duration strings. For example: "2h", "1h30m", "900s".`
scheduleFlagDescription = `The schedule on which to run this job.
Accepts cron expressions of the format (M H DoM M DoW) and schedule definition strings.
For example: "0 * * * *", "@daily", "@weekly", "@every 1h30m".
AWS Schedule Expressions of the form "rate(10 minutes)" or "cron(0 12 L * ? 2021)"
are also accepted.`
upgradeAllEnvsDescription = "Optional. Upgrade all environments."
)
| 1 | 15,970 | We can't unfortunately change any of our old flags :( it'd be a breaking change. Instead, we can keep both `githubURLFlag` and `repoURLFlag` and make the description of `githubURLFlagDescription` as "(Deprecated) Use --url instead. GitHub respository URL for your service." It would be even better if we can make `githubURLFlag` as hidden but keep in the command. This way old commands will still work with `--github-url` but it won't appear anymore in the help menu. | aws-copilot-cli | go |
@@ -61,7 +61,10 @@ Puppet::Functions.create_function(:run_plan, Puppet::Functions::InternalFunction
return result
end
+
# Could not find plan
- raise ArgumentError, "Function #{self.class.name}(): Unknown plan: '#{plan_name}'"
+ raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
+ Puppet::Pops::Issues.issue(:UNKNOWN_PLAN) { Bolt::Error.unknown_plan(plan_name) }
+ )
end
end | 1 | # Runs the `plan` referenced by its name passing giving arguments to it given as a hash of name to value mappings.
# A plan is autoloaded from under <root>/plans if not already defined.
#
# @example defining and running a plan
# plan myplan($x) {
# # do things with tasks
# notice "plan done with param x = ${x}"
# }
# run_plan('myplan', { x => 'testing' })
#
require 'bolt/error'
Puppet::Functions.create_function(:run_plan, Puppet::Functions::InternalFunction) do
dispatch :run_plan do
scope_param
param 'String', :plan_name
optional_param 'Hash', :named_args
end
def run_plan(scope, plan_name, named_args = {})
unless Puppet[:tasks]
raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
Puppet::Pops::Issues::TASK_OPERATION_NOT_SUPPORTED_WHEN_COMPILING, operation: 'run_plan'
)
end
executor = Puppet.lookup(:bolt_executor) { nil }
unless executor && Puppet.features.bolt?
raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
Puppet::Pops::Issues::TASK_MISSING_BOLT, action: _('run a plan')
)
end
params = named_args.reject { |k, _| k.start_with?('_') }
loaders = closure_scope.compiler.loaders
# The perspective of the environment is wanted here (for now) to not have to
# require modules to have dependencies defined in meta data.
loader = loaders.private_environment_loader
if loader && (func = loader.load(:plan, plan_name))
# TODO: Add profiling around this
if (run_as = named_args['_run_as'])
old_run_as = executor.run_as
executor.run_as = run_as
end
begin
result = func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
rescue Puppet::PreformattedError => err
if named_args['_catch_errors'] &&
err.respond_to?(:cause) && err.cause && err.cause.is_a?(Bolt::Error)
result = err.cause.to_puppet_error
else
raise err
end
ensure
if run_as
executor.run_as = old_run_as
end
end
return result
end
# Could not find plan
raise ArgumentError, "Function #{self.class.name}(): Unknown plan: '#{plan_name}'"
end
end
| 1 | 7,829 | This provides file and line number if applicable. | puppetlabs-bolt | rb |
@@ -10,6 +10,7 @@ import math
from matplotlib import pyplot as plt
import os
import pandas as pd
+import numpy as np
import random
import re
import shutil | 1 | # This file is generated automatically through:
# d2lbook build lib
# Don't edit it directly
# Defined in file: ./chapter_preface/index.md
import collections
from collections import defaultdict
from IPython import display
import math
from matplotlib import pyplot as plt
import os
import pandas as pd
import random
import re
import shutil
import sys
import tarfile
import time
import requests
import zipfile
import hashlib
d2l = sys.modules[__name__]
# Defined in file: ./chapter_preface/index.md
import numpy as np
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms
# Defined in file: ./chapter_preliminaries/ndarray.md
numpy = lambda a: a.detach().numpy()
size = lambda a: a.numel()
reshape = lambda a, *args: a.reshape(*args)
ones = torch.ones
zeros = torch.zeros
# Defined in file: ./chapter_preliminaries/pandas.md
def mkdir_if_not_exist(path): #@save
if not isinstance(path, str):
path = os.path.join(*path)
if not os.path.exists(path):
os.makedirs(path)
# Defined in file: ./chapter_preliminaries/calculus.md
def use_svg_display(): #@save
"""Use the svg format to display a plot in Jupyter."""
display.set_matplotlib_formats('svg')
# Defined in file: ./chapter_preliminaries/calculus.md
def set_figsize(figsize=(3.5, 2.5)): #@save
"""Set the figure size for matplotlib."""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
# Defined in file: ./chapter_preliminaries/calculus.md
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""Set the axes for matplotlib."""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
# Defined in file: ./chapter_preliminaries/calculus.md
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""Plot data instances."""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# Return True if `X` (ndarray or list) has 1 axis
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
# Defined in file: ./chapter_linear-networks/linear-regression.md
class Timer: #@save
"""Record multiple running times."""
def __init__(self):
self.times = []
self.start()
def start(self):
"""Start the timer."""
self.tik = time.time()
def stop(self):
"""Stop the timer and record the time in a list."""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""Return the average time."""
return sum(self.times) / len(self.times)
def sum(self):
"""Return the sum of time."""
return sum(self.times)
def cumsum(self):
"""Return the accumulated time."""
return np.array(self.times).cumsum().tolist()
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def synthetic_data(w, b, num_examples): #@save
"""Generate y = Xw + b + noise."""
X = torch.zeros(size=(num_examples, len(w))).normal_()
y = torch.matmul(X, w) + b
y += torch.zeros(size=y.shape).normal_(std=0.01)
return X, y
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def linreg(X, w, b): #@save
return torch.matmul(X, w) + b
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def squared_loss(y_hat, y): #@save
return (y_hat - d2l.reshape(y, y_hat.shape)) ** 2 / 2
# Defined in file: ./chapter_linear-networks/linear-regression-scratch.md
def sgd(params, lr, batch_size): #@save
for param in params:
param.data.sub_(lr*param.grad/batch_size)
param.grad.data.zero_()
# Defined in file: ./chapter_linear-networks/linear-regression-concise.md
def load_array(data_arrays, batch_size, is_train=True): #@save
"""Construct a PyTorch data loader."""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
# Defined in file: ./chapter_linear-networks/image-classification-dataset.md
def get_fashion_mnist_labels(labels): #@save
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# Defined in file: ./chapter_linear-networks/image-classification-dataset.md
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(d2l.numpy(img))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
# Defined in file: ./chapter_linear-networks/image-classification-dataset.md
def get_dataloader_workers(): #@save
"""Use 4 processes to read the data."""
return 4
# Defined in file: ./chapter_linear-networks/image-classification-dataset.md
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""Download the Fashion-MNIST dataset and then load into memory."""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def accuracy(y_hat, y): #@save
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
return float((y_hat.type(y.dtype) == y).sum())
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def evaluate_accuracy(net, data_iter): #@save
metric = Accumulator(2) # num_corrected_examples, num_examples
for _, (X, y) in enumerate(data_iter):
metric.add(accuracy(net(X), y), sum(y.shape))
return metric[0] / metric[1]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
class Accumulator: #@save
"""Sum a list of numbers over time."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a+float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def train_epoch_ch3(net, train_iter, loss, updater): #@save
metric = Accumulator(3) # train_loss_sum, train_acc_sum, num_examples
for X, y in train_iter:
# Compute gradients and update parameters
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.backward()
updater.step()
metric.add(float(l)*len(y), accuracy(y_hat, y), y.size().numel())
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.size().numel())
# Return training loss and training accuracy
return metric[0]/metric[2], metric[1]/metric[2]
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
class Animator: #@save
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear', fmts=None,
nrows=1, ncols=1, figsize=(3.5, 2.5)):
"""Incrementally plot multiple lines."""
if legend is None: legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1: self.axes = [self.axes, ]
# Use a lambda to capture arguments
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
"""Add multiple data points into the figure."""
if not hasattr(y, "__len__"): y = [y]
n = len(y)
if not hasattr(x, "__len__"): x = [x] * n
if not self.X: self.X = [[] for _ in range(n)]
if not self.Y: self.Y = [[] for _ in range(n)]
if not self.fmts: self.fmts = ['-'] * n
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater): #@save
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch+1, train_metrics+(test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
# Defined in file: ./chapter_linear-networks/softmax-regression-scratch.md
def predict_ch3(net, test_iter, n=6): #@save
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
titles = [true+'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(X[0:n].reshape(n, 28, 28), 1, n, titles=titles[0:n])
# Defined in file: ./chapter_multilayer-perceptrons/underfit-overfit.md
def evaluate_loss(net, data_iter, loss): #@save
"""Evaluate the loss of a model on the given dataset."""
metric = d2l.Accumulator(2) # sum_loss, num_examples
for X, y in data_iter:
l = loss(net(X), y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
DATA_HUB = dict() #@save
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/' #@save
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/' #@save
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
def download(name, cache_dir=os.path.join('..', 'data')): #@save
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
d2l.mkdir_if_not_exist(cache_dir)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data: break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
def download_extract(name, folder=None): #@save
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
def download_all(): #@save
"""Download all files in the DATA_HUB"""
for name in DATA_HUB:
download(name)
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
DATA_HUB['kaggle_house_train'] = ( #@save
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
DATA_HUB['kaggle_house_test'] = ( #@save
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
# Defined in file: ./chapter_deep-learning-computation/use-gpu.md
def try_gpu(i=0): #@save
"""Return gpu(i) if exists, otherwise return cpu()."""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
# Defined in file: ./chapter_deep-learning-computation/use-gpu.md
def try_all_gpus(): #@save
"""Return all available GPUs, or [cpu(),] if no GPU exists."""
ctxes = [torch.device(f'cuda:{i}')
for i in range(torch.cuda.device_count())]
return ctxes if ctxes else [torch.device('cpu')]
# Defined in file: ./chapter_convolutional-neural-networks/conv-layer.md
def corr2d(X, K): #@save
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
# Defined in file: ./chapter_convolutional-neural-networks/lenet.md
def evaluate_accuracy_gpu(net, data_iter, device=None): #@save
if not device:
device = next(iter(net.parameters())).device
metric = d2l.Accumulator(2) # num_corrected_examples, num_examples
for X, y in data_iter:
X, y = X.to(device), y.to(device)
metric.add(d2l.accuracy(net(X), y), sum(y.shape))
return metric[0] / metric[1]
# Defined in file: ./chapter_convolutional-neural-networks/lenet.md
def train_ch6(net, train_iter, test_iter, num_epochs, lr,
device=d2l.try_gpu()):
"""Train and evaluate a model with CPU or GPU."""
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
print('training on', device)
net.to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[0, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer = d2l.Timer()
for epoch in range(num_epochs):
metric = d2l.Accumulator(3) # train_loss, train_acc, num_examples
for i, (X, y) in enumerate(train_iter):
timer.start()
net.train()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
metric.add(l*X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_loss, train_acc = metric[0]/metric[2], metric[1]/metric[2]
if (i+1) % 50 == 0:
animator.add(epoch + i/len(train_iter),
(train_loss, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch+1, (None, None, test_acc))
print('loss %.3f, train acc %.3f, test acc %.3f' % (
train_loss, train_acc, test_acc))
print('%.1f examples/sec on %s' % (
metric[2]*num_epochs/timer.sum(), device))
# Defined in file: ./chapter_convolutional-modern/resnet.md
class Residual(nn.Module): #@save
def __init__(self, input_channels, num_channels,
use_1x1conv=False, strides=1):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels,
kernel_size=3, padding=1, stride=strides)
self.conv2 = nn.Conv2d(num_channels, num_channels,
kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def read_time_machine(): #@save
"""Load the time machine book into a list of sentences."""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line.strip().lower())
for line in lines]
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def tokenize(lines, token='word'): #@save
"""Split sentences into word or char tokens."""
if token == 'word':
return [line.split(' ') for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type '+token)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
class Vocab: #@save
def __init__(self, tokens, min_freq=0, reserved_tokens=None):
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def count_corpus(sentences): #@save
# Flatten a list of token lists into a list of tokens
tokens = [tk for line in sentences for tk in line]
return collections.Counter(tokens)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def load_corpus_time_machine(max_tokens=-1): #@save
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[tk] for line in tokens for tk in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
# Defined in file: ./chapter_recurrent-neural-networks/language-models-and-dataset.md
def seq_data_iter_random(corpus, batch_size, num_steps):
# Offset the iterator over the data for uniform starts
corpus = corpus[random.randint(0, num_steps):]
# Subtract 1 extra since we need to account for label
num_examples = ((len(corpus) - 1) // num_steps)
example_indices = list(range(0, num_examples * num_steps, num_steps))
random.shuffle(example_indices)
def data(pos):
# This returns a sequence of the length num_steps starting from pos
return corpus[pos: pos + num_steps]
# Discard half empty batches
num_batches = num_examples // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Batch_size indicates the random examples read each time
batch_indices = example_indices[i:(i+batch_size)]
X = [data(j) for j in batch_indices]
Y = [data(j + 1) for j in batch_indices]
yield torch.Tensor(X), torch.Tensor(Y)
# Defined in file: ./chapter_recurrent-neural-networks/language-models-and-dataset.md
def seq_data_iter_consecutive(corpus, batch_size, num_steps):
# Offset for the iterator over the data for uniform starts
offset = random.randint(0, num_steps)
# Slice out data - ignore num_steps and just wrap around
num_indices = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = torch.Tensor(corpus[offset:offset+num_indices])
Ys = torch.Tensor(corpus[offset+1:offset+1+num_indices])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_batches * num_steps, num_steps):
X = Xs[:, i:(i+num_steps)]
Y = Ys[:, i:(i+num_steps)]
yield X, Y
# Defined in file: ./chapter_recurrent-neural-networks/language-models-and-dataset.md
class SeqDataLoader:
"""A iterator to load sequence data."""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_consecutive
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
# Defined in file: ./chapter_recurrent-neural-networks/language-models-and-dataset.md
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
max_tokens=10000):
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def read_data_nmt():
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:
return f.read()
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def preprocess_nmt(text):
def no_space(char, prev_char):
return char in set(',.!') and prev_char != ' '
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
out = [' ' + char if i > 0 and no_space(char, text[i-1]) else char
for i, char in enumerate(text)]
return ''.join(out)
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def tokenize_nmt(text, num_examples=None):
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def truncate_pad(line, num_steps, padding_token):
if len(line) > num_steps:
return line[:num_steps] # Trim
return line + [padding_token] * (num_steps - len(line)) # Pad
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def build_array(lines, vocab, num_steps, is_source):
lines = [vocab[l] for l in lines]
if not is_source:
lines = [[vocab['<bos>']] + l + [vocab['<eos>']] for l in lines]
array = torch.tensor([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = (array != vocab['<pad>']).sum(dim=1)
return array, valid_len
# Defined in file: ./chapter_recurrent-modern/machine-translation-and-dataset.md
def load_data_nmt(batch_size, num_steps, num_examples=1000):
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=3,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = d2l.Vocab(target, min_freq=3,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array(
source, src_vocab, num_steps, True)
tgt_array, tgt_valid_len = build_array(
target, tgt_vocab, num_steps, False)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return src_vocab, tgt_vocab, data_iter
# Defined in file: ./chapter_recurrent-modern/encoder-decoder.md
class Encoder(nn.Module):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-modern/encoder-decoder.md
class Decoder(nn.Module):
"""The base decoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-modern/encoder-decoder.md
class EncoderDecoder(nn.Module):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
# Defined in file: ./chapter_attention-mechanisms/attention.md
def masked_softmax(X, valid_len):
# X: 3-D tensor, valid_len: 1-D or 2-D tensor
if valid_len is None:
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
if valid_len.dim() == 1:
valid_len = torch.repeat_interleave(valid_len, repeats=shape[1], dim=0)
else:
valid_len = valid_len.reshape(-1)
# Fill masked elements with a large negative, whose exp is 0
X = X.reshape(-1, shape[-1])
for count, row in enumerate(X):
row[int(valid_len[count]):]=-1e6
return nn.functional.softmax(X.reshape(shape), dim=-1)
# Defined in file: ./chapter_attention-mechanisms/attention.md
class DotProductAttention(nn.Module):
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# query: (batch_size, #queries, d)
# key: (batch_size, #kv_pairs, d)
# value: (batch_size, #kv_pairs, dim_v)
# valid_len: either (batch_size, ) or (batch_size, xx)
def forward(self, query, key, value, valid_len=None):
d = query.shape[-1]
# set transpose_b=True to swap the last two dimensions of key
scores = torch.bmm(query, key.transpose(1,2)) / math.sqrt(d)
attention_weights = self.dropout(masked_softmax(scores, valid_len))
return torch.bmm(attention_weights, value)
# Defined in file: ./chapter_attention-mechanisms/attention.md
class MLPAttention(nn.Module):
def __init__(self, key_size, query_size, units, dropout, **kwargs):
super(MLPAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, units, bias=False)
self.W_q = nn.Linear(query_size, units, bias=False)
self.v = nn.Linear(units, 1, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, valid_len):
query, key = self.W_k(query), self.W_q(key)
# expand query to (batch_size, #querys, 1, units), and key to
# (batch_size, 1, #kv_pairs, units). Then plus them with broadcast.
features = query.unsqueeze(2) + key.unsqueeze(1)
scores = self.v(features).squeeze(-1)
attention_weights = self.dropout(masked_softmax(scores, valid_len))
return torch.bmm(attention_weights, value)
| 1 | 18,999 | This is already imported. Please check the file. | d2l-ai-d2l-en | py |
@@ -1676,6 +1676,11 @@ public class WindowWidget extends UIWidget implements SessionChangeListener,
if (aSelection.isActionAvailable(action)) {
aSelection.execute(action);
}
+ if (GeckoSession.SelectionActionDelegate.ACTION_COPY.equals(action) &&
+ aSelection.isActionAvailable(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT)) {
+ // Don't keep the text selected after it's copied.
+ aSelection.execute(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT);
+ }
}
@Override | 1 | /* -*- Mode: Java; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: nil; -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.vrbrowser.ui.widgets;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Matrix;
import android.graphics.PointF;
import android.graphics.Rect;
import android.graphics.SurfaceTexture;
import android.util.Log;
import android.util.Pair;
import android.view.KeyEvent;
import android.view.MotionEvent;
import android.view.Surface;
import android.view.View;
import android.view.inputmethod.EditorInfo;
import android.view.inputmethod.InputConnection;
import androidx.annotation.IntDef;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.StringRes;
import androidx.annotation.UiThread;
import org.jetbrains.annotations.NotNull;
import org.mozilla.geckoview.GeckoResult;
import org.mozilla.geckoview.GeckoSession;
import org.mozilla.geckoview.PanZoomController;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.VRBrowserApplication;
import org.mozilla.vrbrowser.browser.HistoryStore;
import org.mozilla.vrbrowser.browser.PromptDelegate;
import org.mozilla.vrbrowser.browser.SessionChangeListener;
import org.mozilla.vrbrowser.browser.SettingsStore;
import org.mozilla.vrbrowser.browser.VideoAvailabilityListener;
import org.mozilla.vrbrowser.browser.engine.Session;
import org.mozilla.vrbrowser.browser.engine.SessionStore;
import org.mozilla.vrbrowser.telemetry.TelemetryWrapper;
import org.mozilla.vrbrowser.ui.adapters.Bookmark;
import org.mozilla.vrbrowser.ui.callbacks.BookmarksCallback;
import org.mozilla.vrbrowser.ui.callbacks.HistoryCallback;
import org.mozilla.vrbrowser.ui.callbacks.LibraryItemContextMenuClickCallback;
import org.mozilla.vrbrowser.ui.views.BookmarksView;
import org.mozilla.vrbrowser.ui.views.HistoryView;
import org.mozilla.vrbrowser.ui.widgets.dialogs.BaseAppDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.ClearCacheDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.MessageDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.SelectionActionWidget;
import org.mozilla.vrbrowser.ui.widgets.menus.ContextMenuWidget;
import org.mozilla.vrbrowser.ui.widgets.menus.LibraryMenuWidget;
import org.mozilla.vrbrowser.ui.widgets.prompts.AlertPromptWidget;
import org.mozilla.vrbrowser.ui.widgets.prompts.ConfirmPromptWidget;
import org.mozilla.vrbrowser.ui.widgets.prompts.PromptWidget;
import org.mozilla.vrbrowser.ui.widgets.settings.SettingsWidget;
import org.mozilla.vrbrowser.utils.ConnectivityReceiver;
import org.mozilla.vrbrowser.utils.SystemUtils;
import org.mozilla.vrbrowser.utils.ViewUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.GregorianCalendar;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.function.Consumer;
import mozilla.components.concept.storage.PageObservation;
import mozilla.components.concept.storage.PageVisit;
import mozilla.components.concept.storage.RedirectSource;
import mozilla.components.concept.storage.VisitInfo;
import mozilla.components.concept.storage.VisitType;
import static org.mozilla.vrbrowser.utils.ServoUtils.isInstanceOfServoSession;
public class WindowWidget extends UIWidget implements SessionChangeListener,
GeckoSession.ContentDelegate, GeckoSession.NavigationDelegate, VideoAvailabilityListener,
GeckoSession.HistoryDelegate, GeckoSession.ProgressDelegate, GeckoSession.SelectionActionDelegate {
public interface HistoryViewDelegate {
default void onHistoryViewShown(WindowWidget aWindow) {}
default void onHistoryViewHidden(WindowWidget aWindow) {}
}
public interface BookmarksViewDelegate {
default void onBookmarksShown(WindowWidget aWindow) {}
default void onBookmarksHidden(WindowWidget aWindow) {}
}
@IntDef(value = { SESSION_RELEASE_DISPLAY, SESSION_DO_NOT_RELEASE_DISPLAY})
public @interface OldSessionDisplayAction {}
public static final int SESSION_RELEASE_DISPLAY = 0;
public static final int SESSION_DO_NOT_RELEASE_DISPLAY = 1;
private Surface mSurface;
private int mWidth;
private int mHeight;
private int mHandle;
private WidgetPlacement mWidgetPlacement;
private TopBarWidget mTopBar;
private TitleBarWidget mTitleBar;
private WidgetManagerDelegate mWidgetManager;
private AlertPromptWidget mAlertPrompt;
private ConfirmPromptWidget mConfirmPrompt;
private NoInternetWidget mNoInternetToast;
private MessageDialogWidget mAppDialog;
private ClearCacheDialogWidget mClearCacheDialog;
private ContextMenuWidget mContextMenu;
private SelectionActionWidget mSelectionMenu;
private LibraryMenuWidget mLibraryItemContextMenu;
private int mWidthBackup;
private int mHeightBackup;
private int mBorderWidth;
private Runnable mFirstDrawCallback;
private boolean mIsInVRVideoMode;
private View mView;
private Session mSession;
private int mWindowId;
private BookmarksView mBookmarksView;
private HistoryView mHistoryView;
private ArrayList<BookmarksViewDelegate> mBookmarksViewListeners;
private ArrayList<HistoryViewDelegate> mHistoryViewListeners;
private Windows.WindowPlacement mWindowPlacement = Windows.WindowPlacement.FRONT;
private Windows.WindowPlacement mWindowPlacementBeforeFullscreen = Windows.WindowPlacement.FRONT;
private float mMaxWindowScale = 3;
private boolean mIsRestored = false;
private CopyOnWriteArrayList<WindowListener> mListeners;
boolean mActive = false;
boolean mHovered = false;
boolean mClickedAfterFocus = false;
boolean mIsBookmarksVisible = false;
boolean mIsHistoryVisible = false;
private WidgetPlacement mPlacementBeforeFullscreen;
private WidgetPlacement mPlacementBeforeResize;
private boolean mIsResizing;
private boolean mIsFullScreen;
private boolean mAfterFirstPaint;
private boolean mCaptureOnPageStop;
private PromptDelegate mPromptDelegate;
private Executor mUIThreadExecutor;
public interface WindowListener {
default void onFocusRequest(@NonNull WindowWidget aWindow) {}
default void onBorderChanged(@NonNull WindowWidget aWindow) {}
default void onSessionChanged(@NonNull Session aOldSession, @NonNull Session aSession) {}
default void onFullScreen(@NonNull WindowWidget aWindow, boolean aFullScreen) {}
default void onVideoAvailabilityChanged(@NonNull WindowWidget aWindow) {}
}
public WindowWidget(Context aContext, int windowId, boolean privateMode) {
super(aContext);
mWindowId = windowId;
mSession = SessionStore.get().createSession(privateMode);
initialize(aContext);
}
public WindowWidget(Context aContext, int windowId, Session aSession) {
super(aContext);
mWindowId = windowId;
mSession = aSession;
initialize(aContext);
}
private void initialize(Context aContext) {
mWidgetManager = (WidgetManagerDelegate) aContext;
mBorderWidth = SettingsStore.getInstance(aContext).getTransparentBorderWidth();
mUIThreadExecutor = ((VRBrowserApplication)getContext().getApplicationContext()).getExecutors().mainThread();
mListeners = new CopyOnWriteArrayList<>();
setupListeners(mSession);
mBookmarksView = new BookmarksView(aContext);
mBookmarksView.addBookmarksListener(mBookmarksListener);
mBookmarksViewListeners = new ArrayList<>();
mHistoryView = new HistoryView(aContext);
mHistoryView.addHistoryListener(mHistoryListener);
mHistoryViewListeners = new ArrayList<>();
mHandle = ((WidgetManagerDelegate)aContext).newWidgetHandle();
mWidgetPlacement = new WidgetPlacement(aContext);
mPlacementBeforeFullscreen = new WidgetPlacement(aContext);
mPlacementBeforeResize = new WidgetPlacement(aContext);
mIsResizing = false;
mIsFullScreen = false;
initializeWidgetPlacement(mWidgetPlacement);
if (mSession.isPrivateMode()) {
mWidgetPlacement.clearColor = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_private_clear_color));
} else {
mWidgetPlacement.clearColor = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_blank_clear_color));
}
mTopBar = new TopBarWidget(aContext);
mTopBar.attachToWindow(this);
mTitleBar = new TitleBarWidget(aContext);
mTitleBar.attachToWindow(this);
mPromptDelegate = new PromptDelegate(getContext());
mPromptDelegate.attachToWindow(this);
setFocusable(true);
TelemetryWrapper.openWindowEvent(mWindowId);
if (mSession.getGeckoSession() != null) {
onCurrentSessionChange(null, mSession.getGeckoSession());
}
}
@Override
protected void initializeWidgetPlacement(WidgetPlacement aPlacement) {
int windowWidth = SettingsStore.getInstance(getContext()).getWindowWidth();
aPlacement.width = windowWidth + mBorderWidth * 2;
aPlacement.height = SettingsStore.getInstance(getContext()).getWindowHeight() + mBorderWidth * 2;
aPlacement.worldWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width) *
(float)windowWidth / (float)SettingsStore.WINDOW_WIDTH_DEFAULT;
aPlacement.density = 1.0f;
aPlacement.visible = true;
aPlacement.cylinder = true;
aPlacement.textureScale = 1.0f;
aPlacement.name = "Window";
// Check Windows.placeWindow method for remaining placement set-up
}
public void setPopUpDelegate(@Nullable PromptDelegate.PopUpDelegate delegate) {
mPromptDelegate.setPopupDelegate(delegate);
}
void setupListeners(Session aSession) {
aSession.addSessionChangeListener(this);
aSession.addContentListener(this);
aSession.addVideoAvailabilityListener(this);
aSession.addNavigationListener(this);
aSession.addProgressListener(this);
aSession.setHistoryDelegate(this);
aSession.addSelectionActionListener(this);
mWidgetManager.addConnectivityListener(mConnectivityDelegate);
}
void cleanListeners(Session aSession) {
aSession.removeSessionChangeListener(this);
aSession.removeContentListener(this);
aSession.removeVideoAvailabilityListener(this);
aSession.removeNavigationListener(this);
aSession.removeProgressListener(this);
aSession.setHistoryDelegate(null);
aSession.removeSelectionActionListener(this);
mWidgetManager.removeConnectivityListener(mConnectivityDelegate);
}
@Override
public void show(@ShowFlags int aShowFlags) {
if (!mWidgetPlacement.visible) {
mWidgetPlacement.visible = true;
}
mWidgetManager.updateWidget(this);
setFocusableInTouchMode(false);
if (aShowFlags == REQUEST_FOCUS) {
requestFocusFromTouch();
} else {
clearFocus();
}
mSession.setActive(true);
}
@Override
public void hide(@HideFlags int aHideFlag) {
if (mWidgetPlacement.visible) {
mWidgetPlacement.visible = false;
}
mWidgetManager.updateWidget(this);
clearFocus();
mSession.setActive(false);
}
@Override
protected void onDismiss() {
if (isBookmarksVisible()) {
hideBookmarks();
} else if (isHistoryVisible()) {
hideHistory();
} else {
if (mSession.canGoBack()) {
mSession.goBack();
}
}
}
@Override
public void onPause() {
super.onPause();
mSession.setActive(false);
}
@Override
public void onResume() {
super.onResume();
if (isVisible() || mIsInVRVideoMode) {
mSession.setActive(true);
}
}
public void close() {
TelemetryWrapper.closeWindowEvent(mWindowId);
hideContextMenus();
releaseWidget();
mBookmarksView.onDestroy();
mHistoryView.onDestroy();
SessionStore.get().destroySession(mSession);
if (mTopBar != null) {
mWidgetManager.removeWidget(mTopBar);
}
if (mTitleBar != null) {
mWidgetManager.removeWidget(mTitleBar);
}
mListeners.clear();
}
private ConnectivityReceiver.Delegate mConnectivityDelegate = connected -> {
if (mActive) {
if (mNoInternetToast == null) {
mNoInternetToast = new NoInternetWidget(getContext());
mNoInternetToast.mWidgetPlacement.parentHandle = getHandle();
mNoInternetToast.mWidgetPlacement.parentAnchorY = 0.0f;
mNoInternetToast.mWidgetPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.base_app_dialog_y_distance);
}
if (!connected && !mNoInternetToast.isVisible()) {
mNoInternetToast.show(REQUEST_FOCUS);
} else if (connected && mNoInternetToast.isVisible()) {
mNoInternetToast.hide(REMOVE_WIDGET);
}
}
};
public void loadHomeIfNotRestored() {
if (!mIsRestored) {
loadHome();
}
}
public void loadHome() {
if (mSession.isPrivateMode()) {
mSession.loadPrivateBrowsingPage();
} else {
mSession.loadUri(SettingsStore.getInstance(getContext()).getHomepage());
}
}
protected void setRestored(boolean restored) {
mIsRestored = restored;
}
private void setView(View view, boolean switchSurface) {
if (switchSurface) {
pauseCompositor();
}
mView = view;
removeView(view);
mView.setVisibility(VISIBLE);
addView(mView);
if (switchSurface) {
mWidgetPlacement.density = getContext().getResources().getDisplayMetrics().density;
if (mTexture != null && mSurface != null && mRenderer == null) {
// Create the UI Renderer for the current surface.
// Surface must be released when switching back to WebView surface or the browser
// will not render it correctly. See release code in unsetView().
mRenderer = new UISurfaceTextureRenderer(mSurface, mWidgetPlacement.textureWidth(), mWidgetPlacement.textureHeight());
}
mWidgetManager.updateWidget(this);
mWidgetManager.pushWorldBrightness(this, WidgetManagerDelegate.DEFAULT_DIM_BRIGHTNESS);
mWidgetManager.pushBackHandler(mBackHandler);
setWillNotDraw(false);
postInvalidate();
}
}
private void unsetView(View view, boolean switchSurface) {
if (mView != null && mView == view) {
mView = null;
removeView(view);
view.setVisibility(GONE);
if (switchSurface) {
setWillNotDraw(true);
if (mTexture != null) {
// Surface must be recreated here when not using layers.
// When using layers the new Surface is received via the setSurface() method.
if (mRenderer != null) {
mRenderer.release();
mRenderer = null;
}
mSurface = new Surface(mTexture);
}
mWidgetPlacement.density = 1.0f;
mWidgetManager.updateWidget(this);
mWidgetManager.popWorldBrightness(this);
mWidgetManager.popBackHandler(mBackHandler);
}
}
}
public boolean isBookmarksVisible() {
return (mView != null && mView == mBookmarksView);
}
public boolean isHistoryVisible() {
return (mView != null && mView == mHistoryView);
}
public int getWindowWidth() {
return mWidgetPlacement.width;
}
public int getWindowHeight() {
return mWidgetPlacement.height;
}
public void addBookmarksViewListener(@NonNull BookmarksViewDelegate listener) {
mBookmarksViewListeners.add(listener);
}
public void removeBookmarksViewListener(@NonNull BookmarksViewDelegate listener) {
mBookmarksViewListeners.remove(listener);
}
public void addHistoryViewListener(@NonNull HistoryViewDelegate listener) {
mHistoryViewListeners.add(listener);
}
public void removeHistoryViewListener(@NonNull HistoryViewDelegate listener) {
mHistoryViewListeners.remove(listener);
}
public void switchBookmarks() {
if (isHistoryVisible()) {
hideHistory(false);
showBookmarks(false);
} else if (isBookmarksVisible()) {
hideBookmarks();
} else {
showBookmarks();
}
}
public void showBookmarks() {
showBookmarks(true);
}
public void showBookmarks(boolean switchSurface) {
if (mView == null) {
setView(mBookmarksView, switchSurface);
mBookmarksView.onShow();
for (BookmarksViewDelegate listener : mBookmarksViewListeners) {
listener.onBookmarksShown(this);
}
mIsBookmarksVisible = true;
}
updateTitleBar();
}
public void hideBookmarks() {
hideBookmarks(true);
}
public void hideBookmarks(boolean switchSurface) {
if (mView != null) {
unsetView(mBookmarksView, switchSurface);
for (BookmarksViewDelegate listener : mBookmarksViewListeners) {
listener.onBookmarksHidden(this);
}
mIsBookmarksVisible = false;
}
}
public void switchHistory() {
if (isBookmarksVisible()) {
hideBookmarks(false);
showHistory(false);
} else if (isHistoryVisible()) {
hideHistory();
} else {
showHistory();
}
}
private void hideLibraryPanels() {
if (isBookmarksVisible()) {
hideBookmarks();
} else if (isHistoryVisible()) {
hideHistory();
}
}
public void showHistory() {
showHistory(true);
}
public void showHistory(boolean switchSurface) {
if (mView == null) {
setView(mHistoryView, switchSurface);
mHistoryView.onShow();
for (HistoryViewDelegate listener : mHistoryViewListeners) {
listener.onHistoryViewShown(this);
}
mIsHistoryVisible = true;
}
}
public void hideHistory() {
hideHistory(true);
}
public void hideHistory(boolean switchSurface) {
if (mView != null) {
unsetView(mHistoryView, switchSurface);
for (HistoryViewDelegate listener : mHistoryViewListeners) {
listener.onHistoryViewHidden(this);
}
mIsHistoryVisible = false;
}
}
public void pauseCompositor() {
if (mSession == null) {
return;
}
mSession.surfaceDestroyed();
}
public void resumeCompositor() {
if (mSession == null) {
return;
}
if (mSurface == null) {
return;
}
callSurfaceChanged();
}
public void enableVRVideoMode(int aVideoWidth, int aVideoHeight, boolean aResetBorder) {
if (!mIsInVRVideoMode) {
mWidthBackup = mWidth;
mHeightBackup = mHeight;
mIsInVRVideoMode = true;
}
boolean borderChanged = aResetBorder && mBorderWidth > 0;
if (aVideoWidth == mWidth && aVideoHeight == mHeight && !borderChanged) {
return;
}
if (aResetBorder) {
mBorderWidth = 0;
}
mWidgetPlacement.width = aVideoWidth + mBorderWidth * 2;
mWidgetPlacement.height = aVideoHeight + mBorderWidth * 2;
mWidgetManager.updateWidget(this);
}
public void disableVRVideoMode() {
if (!mIsInVRVideoMode || mWidthBackup == 0 || mHeightBackup == 0) {
return;
}
mIsInVRVideoMode = false;
int border = SettingsStore.getInstance(getContext()).getTransparentBorderWidth();
if (mWidthBackup == mWidth && mHeightBackup == mHeight && border == mBorderWidth) {
return;
}
mBorderWidth = border;
mWidgetPlacement.width = mWidthBackup;
mWidgetPlacement.height = mHeightBackup;
mWidgetManager.updateWidget(this);
}
public void setWindowPlacement(@NonNull Windows.WindowPlacement aPlacement) {
if (mActive) {
TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), false);
}
mWindowPlacement = aPlacement;
if (mActive) {
TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), true);
}
}
public @NonNull Windows.WindowPlacement getmWindowPlacementBeforeFullscreen() {
return mWindowPlacementBeforeFullscreen;
}
public @NonNull Windows.WindowPlacement getWindowPlacement() {
return mWindowPlacement;
}
@Override
public void resizeByMultiplier(float aspect, float multiplier) {
Pair<Float, Float> targetSize = getSizeForScale(multiplier, aspect);
handleResizeEvent(targetSize.first, targetSize.second);
}
public float getCurrentScale() {
float currentAspect = getCurrentAspect();
float currentWorldHeight = mWidgetPlacement.worldWidth / currentAspect;
float currentArea = mWidgetPlacement.worldWidth * currentWorldHeight;
float defaultWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width);
float defaultHeight = defaultWidth / SettingsStore.getInstance(getContext()).getWindowAspect();
float defaultArea = defaultWidth * defaultHeight;
return currentArea / defaultArea;
}
public float getCurrentAspect() {
return (float) mWidgetPlacement.width / (float) mWidgetPlacement.height;
}
public int getBorderWidth() {
return mBorderWidth;
}
public void setActiveWindow(boolean active) {
mActive = active;
if (active) {
SessionStore.get().setActiveSession(mSession);
GeckoSession session = mSession.getGeckoSession();
if (session != null) {
session.getTextInput().setView(this);
}
mSession.updateLastUse();
} else {
updateTitleBar();
}
hideContextMenus();
TelemetryWrapper.activePlacementEvent(mWindowPlacement.getValue(), mActive);
updateBorder();
}
private void updateTitleBar() {
if (isBookmarksVisible()) {
updateTitleBarUrl(getResources().getString(R.string.url_bookmarks_title));
} else if (isHistoryVisible()) {
updateTitleBarUrl(getResources().getString(R.string.url_history_title));
} else {
updateTitleBarUrl(mSession.getCurrentUri());
}
}
private void updateTitleBarUrl(String url) {
if (mTitleBar != null && url != null) {
mTitleBar.setIsInsecure(!mSession.isSecure());
if (url.startsWith("data") && mSession.isPrivateMode()) {
mTitleBar.setInsecureVisibility(GONE);
mTitleBar.setURL(getResources().getString(R.string.private_browsing_title));
} else if (url.equals(mSession.getHomeUri())) {
mTitleBar.setInsecureVisibility(GONE);
mTitleBar.setURL(getResources().getString(R.string.url_home_title, getResources().getString(R.string.app_name)));
} else if (url.equals(getResources().getString(R.string.url_bookmarks_title)) ||
url.equals(getResources().getString(R.string.url_history_title))) {
mTitleBar.setInsecureVisibility(GONE);
mTitleBar.setURL(url);
} else if (url.equals(getResources().getString(R.string.about_blank))) {
mTitleBar.setInsecureVisibility(GONE);
mTitleBar.setURL("");
} else {
mTitleBar.setURL(url);
}
}
}
public Session getSession() {
return mSession;
}
public TopBarWidget getTopBar() {
return mTopBar;
}
public void setTopBar(TopBarWidget aWidget) {
if (mTopBar != aWidget) {
mTopBar = aWidget;
mTopBar.attachToWindow(this);
}
}
public TitleBarWidget getTitleBar() {
return mTitleBar;
}
@Override
public void setSurfaceTexture(SurfaceTexture aTexture, final int aWidth, final int aHeight, Runnable aFirstDrawCallback) {
mFirstDrawCallback = aFirstDrawCallback;
if (mView != null) {
super.setSurfaceTexture(aTexture, aWidth, aHeight, aFirstDrawCallback);
} else {
GeckoSession session = mSession.getGeckoSession();
if (session == null) {
return;
}
if (aTexture == null) {
setWillNotDraw(true);
return;
}
mWidth = aWidth;
mHeight = aHeight;
mTexture = aTexture;
aTexture.setDefaultBufferSize(aWidth, aHeight);
mSurface = new Surface(aTexture);
callSurfaceChanged();
}
}
@Override
public void setSurface(Surface aSurface, final int aWidth, final int aHeight, Runnable aFirstDrawCallback) {
if (mView != null) {
super.setSurface(aSurface, aWidth, aHeight, aFirstDrawCallback);
} else {
mWidth = aWidth;
mHeight = aHeight;
mSurface = aSurface;
mFirstDrawCallback = aFirstDrawCallback;
if (mSurface != null) {
callSurfaceChanged();
} else {
mSession.surfaceDestroyed();
}
}
}
private void callSurfaceChanged() {
if (mSession != null) {
mSession.surfaceChanged(mSurface, mBorderWidth, mBorderWidth, mWidth - mBorderWidth * 2, mHeight - mBorderWidth * 2);
mSession.updateLastUse();
}
}
@Override
public void resizeSurface(final int aWidth, final int aHeight) {
if (mView != null) {
super.resizeSurface(aWidth, aHeight);
}
mWidth = aWidth;
mHeight = aHeight;
if (mTexture != null) {
mTexture.setDefaultBufferSize(aWidth, aHeight);
}
if (mSurface != null && mView == null) {
callSurfaceChanged();
}
}
@Override
public int getHandle() {
return mHandle;
}
@Override
public WidgetPlacement getPlacement() {
return mWidgetPlacement;
}
@Override
public void handleTouchEvent(MotionEvent aEvent) {
if (aEvent.getAction() == MotionEvent.ACTION_DOWN) {
if (!mActive) {
mClickedAfterFocus = true;
updateBorder();
// Focus this window
for (WindowListener listener: mListeners) {
listener.onFocusRequest(this);
}
// Return to discard first click after focus
return;
}
} else if (aEvent.getAction() == MotionEvent.ACTION_UP || aEvent.getAction() == MotionEvent.ACTION_CANCEL) {
mClickedAfterFocus = false;
updateBorder();
}
if (!mActive) {
// Do not send touch events to not focused windows.
return;
}
if (mView != null) {
super.handleTouchEvent(aEvent);
} else {
if (aEvent.getActionMasked() == MotionEvent.ACTION_DOWN) {
requestFocus();
requestFocusFromTouch();
}
GeckoSession session = mSession.getGeckoSession();
if (session != null) {
session.getPanZoomController().onTouchEvent(aEvent);
}
}
}
@Override
public void handleHoverEvent(MotionEvent aEvent) {
if (aEvent.getAction() == MotionEvent.ACTION_HOVER_ENTER) {
mHovered = true;
updateBorder();
} else if (aEvent.getAction() == MotionEvent.ACTION_HOVER_EXIT) {
mHovered = false;
updateBorder();
}
if (!mActive) {
// Do not send touch events to not focused windows.
return;
}
if (mView != null) {
super.handleHoverEvent(aEvent);
} else {
GeckoSession session = mSession.getGeckoSession();
if (session != null) {
session.getPanZoomController().onMotionEvent(aEvent);
}
}
}
protected void updateBorder() {
int color = 0;
if (!mActive && !mClickedAfterFocus && mHovered) {
color = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_border_hover));
} else if (mClickedAfterFocus) {
color = ViewUtils.ARGBtoRGBA(getContext().getColor(R.color.window_border_click));
}
if (mWidgetPlacement.borderColor != color) {
mWidgetPlacement.borderColor = color;
mWidgetManager.updateWidget(this);
for (WindowListener listener: mListeners) {
listener.onBorderChanged(this);
}
}
}
public void saveBeforeFullscreenPlacement() {
mWindowPlacementBeforeFullscreen = mWindowPlacement;
mPlacementBeforeFullscreen.copyFrom(mWidgetPlacement);
}
public void restoreBeforeFullscreenPlacement() {
mWindowPlacement = mWindowPlacementBeforeFullscreen;
mWidgetPlacement.copyFrom(mPlacementBeforeFullscreen);
}
public WidgetPlacement getBeforeFullscreenPlacement() {
return mPlacementBeforeFullscreen;
}
public void saveBeforeResizePlacement() {
mPlacementBeforeResize.copyFrom(mWidgetPlacement);
}
public void restoreBeforeResizePlacement() {
mWidgetPlacement.copyFrom(mPlacementBeforeResize);
}
public WidgetPlacement getBeforeResizePlacement() {
return mPlacementBeforeResize;
}
public void setIsResizing(boolean isResizing) {
mIsResizing = isResizing;
}
public boolean isResizing() {
return mIsResizing;
}
public void setIsFullScreen(boolean isFullScreen) {
if (isFullScreen != mIsFullScreen) {
mIsFullScreen = isFullScreen;
for (WindowListener listener: mListeners) {
listener.onFullScreen(this, isFullScreen);
}
}
}
public boolean isFullScreen() {
return mIsFullScreen;
}
public void addWindowListener(WindowListener aListener) {
if (!mListeners.contains(aListener)) {
mListeners.add(aListener);
}
}
public void removeWindowListener(WindowListener aListener) {
mListeners.remove(aListener);
}
public void waitForFirstPaint() {
setFirstPaintReady(false);
setFirstDrawCallback(() -> {
if (!isFirstPaintReady()) {
setFirstPaintReady(true);
mWidgetManager.updateWidget(WindowWidget.this);
}
});
mWidgetManager.updateWidget(this);
}
@Override
public void handleResizeEvent(float aWorldWidth, float aWorldHeight) {
int width = getWindowWidth(aWorldWidth);
float aspect = aWorldWidth / aWorldHeight;
int height = (int) Math.floor((float)width / aspect);
mWidgetPlacement.width = width + mBorderWidth * 2;
mWidgetPlacement.height = height + mBorderWidth * 2;
mWidgetPlacement.worldWidth = aWorldWidth;
mWidgetManager.updateWidget(this);
mWidgetManager.updateVisibleWidgets();
}
@Override
public void releaseWidget() {
cleanListeners(mSession);
GeckoSession session = mSession.getGeckoSession();
if (mSession != null) {
mSession.releaseDisplay();
}
if (session != null) {
session.getTextInput().setView(null);
}
if (mSurface != null) {
mSurface.release();
mSurface = null;
}
if (mTexture != null && mRenderer == null) {
// Custom SurfaceTexture used for GeckoView
mTexture.release();
mTexture = null;
}
mBookmarksView.removeBookmarksListener(mBookmarksListener);
mHistoryView.removeHistoryListener(mHistoryListener);
mPromptDelegate.detachFromWindow();
super.releaseWidget();
}
@Override
public void setFirstPaintReady(final boolean aFirstPaintReady) {
mWidgetPlacement.composited = aFirstPaintReady;
if (!aFirstPaintReady) {
mAfterFirstPaint = false;
}
}
public void setFirstDrawCallback(Runnable aRunnable) {
mFirstDrawCallback = aRunnable;
}
@Override
public boolean isFirstPaintReady() {
return mWidgetPlacement != null && mWidgetPlacement.composited;
}
@Override
public boolean isVisible() {
return mWidgetPlacement.visible;
}
@Override
public boolean isLayer() {
return mSurface != null && mTexture == null;
}
@Override
public void setVisible(boolean aVisible) {
if (mWidgetPlacement.visible == aVisible) {
return;
}
if (!mIsInVRVideoMode) {
mSession.setActive(aVisible);
if (aVisible) {
callSurfaceChanged();
}
}
mWidgetPlacement.visible = aVisible;
if (!aVisible) {
if (mIsBookmarksVisible || mIsHistoryVisible) {
mWidgetManager.popWorldBrightness(this);
}
} else {
if (mIsBookmarksVisible || mIsHistoryVisible) {
mWidgetManager.pushWorldBrightness(this, WidgetManagerDelegate.DEFAULT_DIM_BRIGHTNESS);
}
}
mIsBookmarksVisible = isBookmarksVisible();
mIsHistoryVisible = isHistoryVisible();
mWidgetManager.updateWidget(this);
if (!aVisible) {
clearFocus();
}
}
@Override
public void draw(Canvas aCanvas) {
if (mView != null) {
super.draw(aCanvas);
}
}
public void setSession(@NonNull Session aSession) {
setSession(aSession, SESSION_RELEASE_DISPLAY);
}
public void setSession(@NonNull Session aSession, @OldSessionDisplayAction int aDisplayAction) {
if (mSession != aSession) {
Session oldSession = mSession;
if (oldSession != null) {
cleanListeners(oldSession);
if (aDisplayAction == SESSION_RELEASE_DISPLAY) {
oldSession.releaseDisplay();
}
}
mSession = aSession;
if (oldSession != null) {
onCurrentSessionChange(oldSession.getGeckoSession(), aSession.getGeckoSession());
} else {
onCurrentSessionChange(null, aSession.getGeckoSession());
}
setupListeners(mSession);
for (WindowListener listener: mListeners) {
listener.onSessionChanged(oldSession, aSession);
}
}
hideLibraryPanels();
}
public void showPopUps() {
if (mPromptDelegate != null) {
mPromptDelegate.showPopUps(getSession().getGeckoSession());
}
}
public boolean hasPendingPopUps() {
if (mPromptDelegate != null) {
return mPromptDelegate.hasPendingPopUps(getSession().getGeckoSession());
}
return false;
}
// Session.GeckoSessionChange
@Override
public void onCurrentSessionChange(GeckoSession aOldSession, GeckoSession aSession) {
Log.d(LOGTAG, "onCurrentSessionChange: " + this.hashCode());
mWidgetManager.setIsServoSession(isInstanceOfServoSession(aSession));
Log.d(LOGTAG, "surfaceChanged: " + aSession.hashCode());
callSurfaceChanged();
aSession.getTextInput().setView(this);
boolean isPrivateMode = aSession.getSettings().getUsePrivateMode();
if (isPrivateMode) {
setPrivateBrowsingEnabled(true);
} else {
setPrivateBrowsingEnabled(false);
}
}
@Override
public void onStackSession(Session aSession) {
// e.g. tab opened via window.open()
aSession.updateLastUse();
waitForFirstPaint();
Session current = mSession;
setSession(aSession);
SessionStore.get().setActiveSession(aSession);
current.captureBackgroundBitmap(getWindowWidth(), getWindowHeight()).thenAccept(aVoid -> current.setActive(false));
mWidgetManager.getTray().showTabAddedNotification();
}
@Override
public void onUnstackSession(Session aSession, Session aParent) {
if (mSession == aSession) {
aParent.setActive(true);
setSession(aParent);
SessionStore.get().setActiveSession(aParent);
SessionStore.get().destroySession(aSession);
}
}
// View
@Override
public InputConnection onCreateInputConnection(final EditorInfo outAttrs) {
Log.d(LOGTAG, "BrowserWidget onCreateInputConnection");
GeckoSession session = mSession.getGeckoSession();
if (session == null) {
return null;
}
return session.getTextInput().onCreateInputConnection(outAttrs);
}
@Override
public boolean onCheckIsTextEditor() {
return !mIsResizing && mSession.isInputActive();
}
@Override
public boolean onKeyPreIme(int aKeyCode, KeyEvent aEvent) {
if (super.onKeyPreIme(aKeyCode, aEvent)) {
return true;
}
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getTextInput().onKeyPreIme(aKeyCode, aEvent);
}
@Override
public boolean onKeyUp(int aKeyCode, KeyEvent aEvent) {
if (super.onKeyUp(aKeyCode, aEvent)) {
return true;
}
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getTextInput().onKeyUp(aKeyCode, aEvent);
}
@Override
public boolean onKeyDown(int aKeyCode, KeyEvent aEvent) {
if (super.onKeyDown(aKeyCode, aEvent)) {
return true;
}
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getTextInput().onKeyDown(aKeyCode, aEvent);
}
@Override
public boolean onKeyLongPress(int aKeyCode, KeyEvent aEvent) {
if (super.onKeyLongPress(aKeyCode, aEvent)) {
return true;
}
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getTextInput().onKeyLongPress(aKeyCode, aEvent);
}
@Override
public boolean onKeyMultiple(int aKeyCode, int repeatCount, KeyEvent aEvent) {
if (super.onKeyMultiple(aKeyCode, repeatCount, aEvent)) {
return true;
}
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getTextInput().onKeyMultiple(aKeyCode, repeatCount, aEvent);
}
@Override
protected void onFocusChanged(boolean aGainFocus, int aDirection, Rect aPreviouslyFocusedRect) {
super.onFocusChanged(aGainFocus, aDirection, aPreviouslyFocusedRect);
Log.d(LOGTAG, "BrowserWidget onFocusChanged: " + (aGainFocus ? "true" : "false"));
}
@Override
public boolean onTouchEvent(MotionEvent aEvent) {
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getPanZoomController().onTouchEvent(aEvent) == PanZoomController.INPUT_RESULT_HANDLED;
}
@Override
public boolean onGenericMotionEvent(MotionEvent aEvent) {
GeckoSession session = mSession.getGeckoSession();
return (session != null) && session.getPanZoomController().onMotionEvent(aEvent) == PanZoomController.INPUT_RESULT_HANDLED;
}
private void setPrivateBrowsingEnabled(boolean isEnabled) {
}
public void showAlert(String title, @NonNull String msg, @NonNull PromptWidget.PromptDelegate callback) {
mAlertPrompt = new AlertPromptWidget(getContext());
mAlertPrompt.mWidgetPlacement.parentHandle = getHandle();
mAlertPrompt.mWidgetPlacement.parentAnchorY = 0.0f;
mAlertPrompt.mWidgetPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.base_app_dialog_y_distance);
mAlertPrompt.setTitle(title);
mAlertPrompt.setMessage(msg);
mAlertPrompt.setPromptDelegate(callback);
mAlertPrompt.show(REQUEST_FOCUS);
}
public void showButtonPrompt(String title, @NonNull String msg, @NonNull String[] btnMsg, @NonNull ConfirmPromptWidget.ConfirmPromptDelegate callback) {
mConfirmPrompt = new ConfirmPromptWidget(getContext());
mConfirmPrompt.mWidgetPlacement.parentHandle = getHandle();
mConfirmPrompt.mWidgetPlacement.parentAnchorY = 0.0f;
mConfirmPrompt.mWidgetPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.base_app_dialog_y_distance);
mConfirmPrompt.setTitle(title);
mConfirmPrompt.setMessage(msg);
mConfirmPrompt.setButtons(btnMsg);
mConfirmPrompt.setPromptDelegate(callback);
mConfirmPrompt.show(REQUEST_FOCUS);
}
public void showAppDialog(@NonNull String title, @NonNull @StringRes int description, @NonNull @StringRes int [] btnMsg,
@NonNull BaseAppDialogWidget.Delegate buttonsCallback, @NonNull MessageDialogWidget.Delegate messageCallback) {
mAppDialog = new MessageDialogWidget(getContext());
mAppDialog.mWidgetPlacement.parentHandle = getHandle();
mAppDialog.mWidgetPlacement.parentAnchorY = 0.0f;
mAppDialog.mWidgetPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.base_app_dialog_y_distance);
mAppDialog.setTitle(title);
mAppDialog.setMessage(description);
mAppDialog.setButtons(btnMsg);
mAppDialog.setButtonsDelegate(buttonsCallback);
mAppDialog.setMessageDelegate(messageCallback);
mAppDialog.show(REQUEST_FOCUS);
}
public void showClearCacheDialog() {
mClearCacheDialog = new ClearCacheDialogWidget(getContext());
mClearCacheDialog.mWidgetPlacement.parentHandle = getHandle();
mClearCacheDialog.mWidgetPlacement.parentAnchorY = 0.0f;
mClearCacheDialog.mWidgetPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.base_app_dialog_y_distance);
mClearCacheDialog.setTitle(R.string.history_clear);
mClearCacheDialog.setButtons(new int[] {
R.string.history_clear_cancel,
R.string.history_clear_now
});
mClearCacheDialog.setButtonsDelegate((index) -> {
if (index == BaseAppDialogWidget.NEGATIVE) {
mClearCacheDialog.hide(REMOVE_WIDGET);
} else {
Calendar date = new GregorianCalendar();
date.set(Calendar.HOUR_OF_DAY, 0);
date.set(Calendar.MINUTE, 0);
date.set(Calendar.SECOND, 0);
date.set(Calendar.MILLISECOND, 0);
long currentTime = System.currentTimeMillis();
long todayLimit = date.getTimeInMillis();
long yesterdayLimit = todayLimit - SystemUtils.ONE_DAY_MILLIS;
long oneWeekLimit = todayLimit - SystemUtils.ONE_WEEK_MILLIS;
HistoryStore store = SessionStore.get().getHistoryStore();
switch (mClearCacheDialog.getSelectedRange()) {
case ClearCacheDialogWidget.TODAY:
store.deleteVisitsBetween(todayLimit, currentTime);
break;
case ClearCacheDialogWidget.YESTERDAY:
store.deleteVisitsBetween(yesterdayLimit, currentTime);
break;
case ClearCacheDialogWidget.LAST_WEEK:
store.deleteVisitsBetween(oneWeekLimit, currentTime);
break;
case ClearCacheDialogWidget.EVERYTHING:
store.deleteEverything();
break;
}
SessionStore.get().purgeSessionHistory();
}
});
mClearCacheDialog.show(REQUEST_FOCUS);
}
public void setMaxWindowScale(float aScale) {
if (mMaxWindowScale != aScale) {
mMaxWindowScale = aScale;
Pair<Float, Float> maxSize = getSizeForScale(aScale);
if (mWidgetPlacement.worldWidth > maxSize.first) {
float currentAspect = (float) mWidgetPlacement.width / (float) mWidgetPlacement.height;
mWidgetPlacement.worldWidth = maxSize.first;
mWidgetPlacement.width = getWindowWidth(maxSize.first);
mWidgetPlacement.height = (int) Math.ceil((float)mWidgetPlacement.width / currentAspect);
}
}
}
public float getMaxWindowScale() {
return mMaxWindowScale;
}
public @NonNull Pair<Float, Float> getSizeForScale(float aScale) {
return getSizeForScale(aScale, SettingsStore.getInstance(getContext()).getWindowAspect());
}
public @NonNull Pair<Float, Float> getSizeForScale(float aScale, float aAspect) {
float worldWidth = WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width) *
(float)SettingsStore.getInstance(getContext()).getWindowWidth() / (float)SettingsStore.WINDOW_WIDTH_DEFAULT;
float worldHeight = worldWidth / aAspect;
float area = worldWidth * worldHeight * aScale;
float targetWidth = (float) Math.sqrt(area * aAspect);
float targetHeight = targetWidth / aAspect;
return Pair.create(targetWidth, targetHeight);
}
private int getWindowWidth(float aWorldWidth) {
return (int) Math.floor(SettingsStore.WINDOW_WIDTH_DEFAULT * aWorldWidth / WidgetPlacement.floatDimension(getContext(), R.dimen.window_world_width));
}
private void showLibraryItemContextMenu(@NotNull View view, LibraryMenuWidget.LibraryContextMenuItem item, boolean isLastVisibleItem) {
view.requestFocusFromTouch();
hideContextMenus();
float ratio = WidgetPlacement.viewToWidgetRatio(getContext(), WindowWidget.this);
Rect offsetViewBounds = new Rect();
getDrawingRect(offsetViewBounds);
offsetDescendantRectToMyCoords(view, offsetViewBounds);
SessionStore.get().getBookmarkStore().isBookmarked(item.getUrl()).thenAcceptAsync((isBookmarked -> {
mLibraryItemContextMenu = new LibraryMenuWidget(getContext(), item, mWidgetManager.canOpenNewWindow(), isBookmarked);
mLibraryItemContextMenu.getPlacement().parentHandle = getHandle();
PointF position;
if (isLastVisibleItem) {
mLibraryItemContextMenu.mWidgetPlacement.anchorY = 0.0f;
position = new PointF(
(offsetViewBounds.left + view.getWidth()) * ratio,
-(offsetViewBounds.top) * ratio);
} else {
mLibraryItemContextMenu.mWidgetPlacement.anchorY = 1.0f;
position = new PointF(
(offsetViewBounds.left + view.getWidth()) * ratio,
-(offsetViewBounds.top + view.getHeight()) * ratio);
}
mLibraryItemContextMenu.mWidgetPlacement.translationX = position.x - (mLibraryItemContextMenu.getWidth()/mLibraryItemContextMenu.mWidgetPlacement.density);
mLibraryItemContextMenu.mWidgetPlacement.translationY = position.y + getResources().getDimension(R.dimen.library_menu_top_margin)/mLibraryItemContextMenu.mWidgetPlacement.density;
mLibraryItemContextMenu.setItemDelegate((new LibraryItemContextMenuClickCallback() {
@Override
public void onOpenInNewWindowClick(LibraryMenuWidget.LibraryContextMenuItem item) {
mWidgetManager.openNewWindow(item.getUrl());
hideContextMenus();
}
@Override
public void onOpenInNewTabClick(LibraryMenuWidget.LibraryContextMenuItem item) {
mWidgetManager.openNewTabForeground(item.getUrl());
hideContextMenus();
}
@Override
public void onAddToBookmarks(LibraryMenuWidget.LibraryContextMenuItem item) {
SessionStore.get().getBookmarkStore().addBookmark(item.getUrl(), item.getTitle());
hideContextMenus();
}
@Override
public void onRemoveFromBookmarks(LibraryMenuWidget.LibraryContextMenuItem item) {
SessionStore.get().getBookmarkStore().deleteBookmarkByURL(item.getUrl());
hideContextMenus();
}
}));
mLibraryItemContextMenu.show(REQUEST_FOCUS);
}), mUIThreadExecutor).exceptionally(throwable -> {
Log.d(LOGTAG, "Error getting the bookmarked status: " + throwable.getLocalizedMessage());
throwable.printStackTrace();
return null;
});
}
private BookmarksCallback mBookmarksListener = new BookmarksCallback() {
@Override
public void onShowContextMenu(@NonNull View view, @NotNull Bookmark item, boolean isLastVisibleItem) {
showLibraryItemContextMenu(
view,
new LibraryMenuWidget.LibraryContextMenuItem(
item.getUrl(),
item.getTitle(),
LibraryMenuWidget.LibraryItemType.BOOKMARKS),
isLastVisibleItem);
}
@Override
public void onFxASynSettings(@NonNull View view) {
mWidgetManager.getTray().toggleSettingsDialog(SettingsWidget.SettingDialog.FXA);
}
@Override
public void onHideContextMenu(@NonNull View view) {
hideContextMenus();
}
};
private HistoryCallback mHistoryListener = new HistoryCallback() {
@Override
public void onClearHistory(@NonNull View view) {
view.requestFocusFromTouch();
showClearCacheDialog();
}
@Override
public void onShowContextMenu(@NonNull View view, @NonNull VisitInfo item, boolean isLastVisibleItem) {
showLibraryItemContextMenu(
view,
new LibraryMenuWidget.LibraryContextMenuItem(
item.getUrl(),
item.getTitle(),
LibraryMenuWidget.LibraryItemType.HISTORY),
isLastVisibleItem);
}
@Override
public void onFxASynSettings(@NonNull View view) {
mWidgetManager.getTray().toggleSettingsDialog(SettingsWidget.SettingDialog.FXA);
}
@Override
public void onHideContextMenu(@NonNull View view) {
hideContextMenus();
}
};
private void hideContextMenus() {
if (mContextMenu != null) {
if (!mContextMenu.isReleased()) {
if (mContextMenu.isVisible()) {
mContextMenu.hide(REMOVE_WIDGET);
}
mContextMenu.releaseWidget();
}
mContextMenu = null;
}
if (mSelectionMenu != null) {
mSelectionMenu.setDelegate((SelectionActionWidget.Delegate)null);
if (!mSelectionMenu.isReleased()) {
if (mSelectionMenu.isVisible()) {
mSelectionMenu.hide(REMOVE_WIDGET);
}
mSelectionMenu.releaseWidget();
}
mSelectionMenu = null;
}
if (mWidgetPlacement.tintColor != 0xFFFFFFFF) {
mWidgetPlacement.tintColor = 0xFFFFFFFF;
mWidgetManager.updateWidget(this);
}
if (mLibraryItemContextMenu != null && !mLibraryItemContextMenu.isReleased()
&& mLibraryItemContextMenu.isVisible()) {
mLibraryItemContextMenu.hide(REMOVE_WIDGET);
}
}
// GeckoSession.ContentDelegate
@Override
public void onContextMenu(GeckoSession session, int screenX, int screenY, ContextElement element) {
if (element.type == ContextElement.TYPE_VIDEO) {
return;
}
TelemetryWrapper.longPressContextMenuEvent();
hideContextMenus();
mContextMenu = new ContextMenuWidget(getContext());
mContextMenu.mWidgetPlacement.parentHandle = getHandle();
mContextMenu.setDismissCallback(this::hideContextMenus);
mContextMenu.setContextElement(element);
mContextMenu.show(REQUEST_FOCUS);
mWidgetPlacement.tintColor = 0x555555FF;
mWidgetManager.updateWidget(this);
}
@Override
public void onFirstComposite(@NonNull GeckoSession session) {
if (!mAfterFirstPaint) {
return;
}
if (mFirstDrawCallback != null) {
mUIThreadExecutor.execute(mFirstDrawCallback);
mFirstDrawCallback = null;
}
}
@Override
public void onFirstContentfulPaint(@NonNull GeckoSession session) {
if (mAfterFirstPaint) {
return;
}
if (mFirstDrawCallback != null) {
mUIThreadExecutor.execute(mFirstDrawCallback);
mFirstDrawCallback = null;
mAfterFirstPaint = true;
}
}
// VideoAvailabilityListener
@Override
public void onVideoAvailabilityChanged(boolean aVideosAvailable) {
if (mTitleBar != null) {
mTitleBar.mediaAvailabilityChanged(aVideosAvailable);
}
for (WindowListener listener: mListeners) {
listener.onVideoAvailabilityChanged(this);
}
}
// GeckoSession.NavigationDelegate
@Override
public void onPageStart(@NonNull GeckoSession geckoSession, @NonNull String s) {
mCaptureOnPageStop = true;
if (isHistoryVisible()) {
hideHistory();
}
if (isBookmarksVisible()) {
hideBookmarks();
}
}
@Override
public void onPageStop(@NonNull GeckoSession aSession, boolean b) {
if (mCaptureOnPageStop || !mSession.hasCapturedBitmap()) {
mCaptureOnPageStop = false;
captureImage();
}
}
public void captureImage() {
mSession.captureBitmap();
}
@Override
public void onLocationChange(@NonNull GeckoSession session, @Nullable String url) {
updateTitleBarUrl(url);
}
// GeckoSession.HistoryDelegate
@Override
public void onHistoryStateChange(@NonNull GeckoSession geckoSession, @NonNull HistoryList historyList) {
if (!mSession.isPrivateMode()) {
for (HistoryItem item : historyList) {
SessionStore.get().getHistoryStore().recordObservation(item.getUri(), new PageObservation(item.getTitle()));
}
}
}
@Nullable
@Override
public GeckoResult<Boolean> onVisited(@NonNull GeckoSession geckoSession, @NonNull String url, @Nullable String lastVisitedURL, int flags) {
if (mSession.isPrivateMode() ||
(flags & VISIT_TOP_LEVEL) == 0 ||
(flags & VISIT_UNRECOVERABLE_ERROR) != 0) {
return GeckoResult.fromValue(false);
}
boolean isReload = lastVisitedURL != null && lastVisitedURL.equals(url);
PageVisit pageVisit;
if (isReload) {
pageVisit = new PageVisit(VisitType.RELOAD, RedirectSource.NOT_A_SOURCE);
} else {
if ((flags & VISIT_REDIRECT_SOURCE_PERMANENT) != 0) {
pageVisit = new PageVisit(VisitType.REDIRECT_PERMANENT, RedirectSource.NOT_A_SOURCE);
} else if ((flags & VISIT_REDIRECT_SOURCE) != 0) {
pageVisit = new PageVisit(VisitType.REDIRECT_TEMPORARY, RedirectSource.NOT_A_SOURCE);
} else {
pageVisit = new PageVisit(VisitType.LINK, RedirectSource.NOT_A_SOURCE);
}
}
SessionStore.get().getHistoryStore().recordVisit(url, pageVisit);
SessionStore.get().getHistoryStore().recordObservation(url, new PageObservation(url));
return GeckoResult.fromValue(true);
}
@UiThread
@Nullable
public GeckoResult<boolean[]> getVisited(@NonNull GeckoSession geckoSession, @NonNull String[] urls) {
if (mSession.isPrivateMode()) {
return GeckoResult.fromValue(new boolean[]{});
}
GeckoResult<boolean[]> result = new GeckoResult<>();
SessionStore.get().getHistoryStore().getVisited(Arrays.asList(urls)).thenAcceptAsync(list -> {
final boolean[] primitives = new boolean[list.size()];
int index = 0;
for (Boolean object : list) {
primitives[index++] = object;
}
result.complete(primitives);
}, mUIThreadExecutor).exceptionally(throwable -> {
Log.d(LOGTAG, "Error getting history: " + throwable.getLocalizedMessage());
throwable.printStackTrace();
return null;
});
return result;
}
// GeckoSession.ProgressDelegate
@Override
public void onSecurityChange(GeckoSession geckoSession, SecurityInformation securityInformation) {
if (mTitleBar != null) {
mTitleBar.setIsInsecure(!securityInformation.isSecure);
}
}
// GeckoSession.SelectionActionDelegate
@Override
public void onShowActionRequest(@NonNull GeckoSession aSession, @NonNull Selection aSelection) {
if (aSelection.availableActions.size() == 1 && (aSelection.availableActions.contains(GeckoSession.SelectionActionDelegate.ACTION_HIDE))) {
// See: https://github.com/MozillaReality/FirefoxReality/issues/2214
aSelection.hide();
return;
}
TelemetryWrapper.longPressContextMenuEvent();
hideContextMenus();
mSelectionMenu = new SelectionActionWidget(getContext());
mSelectionMenu.mWidgetPlacement.parentHandle = getHandle();
mSelectionMenu.setActions(aSelection.availableActions);
Matrix matrix = new Matrix();
aSession.getClientToSurfaceMatrix(matrix);
matrix.mapRect(aSelection.clientRect);
mSelectionMenu.setSelectionRect(aSelection.clientRect);
mSelectionMenu.setDelegate(new SelectionActionWidget.Delegate() {
@Override
public void onAction(String action) {
hideContextMenus();
if (aSelection.isActionAvailable(action)) {
aSelection.execute(action);
}
}
@Override
public void onDismiss() {
hideContextMenus();
if (aSelection.isActionAvailable(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT)) {
aSelection.execute(GeckoSession.SelectionActionDelegate.ACTION_UNSELECT);
}
}
});
mSelectionMenu.show(KEEP_FOCUS);
}
@Override
public void onHideAction(@NonNull GeckoSession aSession, int aHideReason) {
hideContextMenus();
}
}
| 1 | 8,651 | One thing we can do in a follow up is use the shorthand calls, so You can just call `aSelection.unselect()` instead of calling the more verbose `execute()`. The fact that you have to check if the action is available sort of sucks though. | MozillaReality-FirefoxReality | java |
@@ -519,10 +519,10 @@ func (st *ServerType) serversFromPairings(
for _, h := range hosts {
if h == defaultSNI {
hosts = append(hosts, "")
- cp.DefaultSNI = defaultSNI
break
}
}
+ cp.DefaultSNI = defaultSNI
// TODO: are matchers needed if every hostname of the resulting config is matched?
if len(hosts) > 0 { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/caddyserver/certmagic"
)
func init() {
caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
}
// ServerType can set up a config from an HTTP Caddyfile.
type ServerType struct {
}
// Setup makes a config from the tokens.
func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) {
var warnings []caddyconfig.Warning
gc := counter{new(int)}
state := make(map[string]interface{})
// load all the server blocks and associate them with a "pile"
// of config values; also prohibit duplicate keys because they
// can make a config confusing if more than one server block is
// chosen to handle a request - we actually will make each
// server block's route terminal so that only one will run
sbKeys := make(map[string]struct{})
var serverBlocks []serverBlock
for i, sblock := range originalServerBlocks {
for j, k := range sblock.Keys {
if _, ok := sbKeys[k]; ok {
return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j)
}
sbKeys[k] = struct{}{}
}
serverBlocks = append(serverBlocks, serverBlock{
block: sblock,
pile: make(map[string][]ConfigValue),
})
}
// apply any global options
var err error
serverBlocks, err = st.evaluateGlobalOptionsBlock(serverBlocks, options)
if err != nil {
return nil, warnings, err
}
for _, sb := range serverBlocks {
// replace shorthand placeholders (which are
// convenient when writing a Caddyfile) with
// their actual placeholder identifiers or
// variable names
replacer := strings.NewReplacer(
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
)
for _, segment := range sb.block.Segments {
for i := 0; i < len(segment); i++ {
segment[i].Text = replacer.Replace(segment[i].Text)
}
}
if len(sb.block.Keys) == 0 {
return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first")
}
// extract matcher definitions
matcherDefs := make(map[string]caddy.ModuleMap)
for _, segment := range sb.block.Segments {
if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) {
d := sb.block.DispenseDirective(dir)
err := parseMatcherDefinitions(d, matcherDefs)
if err != nil {
return nil, warnings, err
}
}
}
// evaluate each directive ("segment") in this block
for _, segment := range sb.block.Segments {
dir := segment.Directive()
if strings.HasPrefix(dir, matcherPrefix) {
// matcher definitions were pre-processed
continue
}
dirFunc, ok := registeredDirectives[dir]
if !ok {
tkn := segment[0]
return nil, warnings, fmt.Errorf("%s:%d: unrecognized directive: %s", tkn.File, tkn.Line, dir)
}
h := Helper{
Dispenser: caddyfile.NewDispenser(segment),
options: options,
warnings: &warnings,
matcherDefs: matcherDefs,
parentBlock: sb.block,
groupCounter: gc,
State: state,
}
results, err := dirFunc(h)
if err != nil {
return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
}
for _, result := range results {
result.directive = dir
sb.pile[result.Class] = append(sb.pile[result.Class], result)
}
}
}
// map
sbmap, err := st.mapAddressToServerBlocks(serverBlocks, options)
if err != nil {
return nil, warnings, err
}
// reduce
pairings := st.consolidateAddrMappings(sbmap)
// each pairing of listener addresses to list of server
// blocks is basically a server definition
servers, err := st.serversFromPairings(pairings, options, &warnings, gc)
if err != nil {
return nil, warnings, err
}
// now that each server is configured, make the HTTP app
httpApp := caddyhttp.App{
HTTPPort: tryInt(options["http_port"], &warnings),
HTTPSPort: tryInt(options["https_port"], &warnings),
Servers: servers,
}
// now for the TLS app! (TODO: refactor into own func)
tlsApp := caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}
var certLoaders []caddytls.CertificateLoader
for _, p := range pairings {
for i, sblock := range p.serverBlocks {
// tls automation policies
if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok {
for _, issuerVal := range issuerVals {
issuer := issuerVal.Value.(certmagic.Issuer)
sblockHosts, err := st.hostsFromServerBlockKeys(sblock.block)
if err != nil {
return nil, warnings, err
}
if len(sblockHosts) > 0 {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, &caddytls.AutomationPolicy{
Hosts: sblockHosts,
IssuerRaw: caddyconfig.JSONModuleObject(issuer, "module", issuer.(caddy.Module).CaddyModule().ID.Name(), &warnings),
})
} else {
warnings = append(warnings, caddyconfig.Warning{
Message: fmt.Sprintf("Server block %d %v has no names that qualify for automatic HTTPS, so no TLS automation policy will be added.", i, sblock.block.Keys),
})
}
}
}
// tls certificate loaders
if clVals, ok := sblock.pile["tls.certificate_loader"]; ok {
for _, clVal := range clVals {
certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader))
}
}
}
}
// group certificate loaders by module name, then add to config
if len(certLoaders) > 0 {
loadersByName := make(map[string]caddytls.CertificateLoader)
for _, cl := range certLoaders {
name := caddy.GetModuleName(cl)
// ugh... technically, we may have multiple FileLoader and FolderLoader
// modules (because the tls directive returns one per occurrence), but
// the config structure expects only one instance of each kind of loader
// module, so we have to combine them... instead of enumerating each
// possible cert loader module in a type switch, we can use reflection,
// which works on any cert loaders that are slice types
if reflect.TypeOf(cl).Kind() == reflect.Slice {
combined := reflect.ValueOf(loadersByName[name])
if !combined.IsValid() {
combined = reflect.New(reflect.TypeOf(cl)).Elem()
}
clVal := reflect.ValueOf(cl)
for i := 0; i < clVal.Len(); i++ {
combined = reflect.Append(reflect.Value(combined), clVal.Index(i))
}
loadersByName[name] = combined.Interface().(caddytls.CertificateLoader)
}
}
for certLoaderName, loaders := range loadersByName {
tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings)
}
}
// if global ACME CA, DNS, or email were set, append a catch-all automation
// policy that ensures they will be used if no tls directive was used
acmeCA, hasACMECA := options["acme_ca"]
acmeDNS, hasACMEDNS := options["acme_dns"]
email, hasEmail := options["email"]
if hasACMECA || hasACMEDNS || hasEmail {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
if !hasACMECA {
acmeCA = ""
}
if !hasEmail {
email = ""
}
mgr := caddytls.ACMEIssuer{
CA: acmeCA.(string),
Email: email.(string),
}
if hasACMEDNS {
provName := acmeDNS.(string)
dnsProvModule, err := caddy.GetModule("tls.dns." + provName)
if err != nil {
return nil, warnings, fmt.Errorf("getting DNS provider module named '%s': %v", provName, err)
}
mgr.Challenges = &caddytls.ChallengesConfig{
DNSRaw: caddyconfig.JSONModuleObject(dnsProvModule.New(), "provider", provName, &warnings),
}
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, &caddytls.AutomationPolicy{
IssuerRaw: caddyconfig.JSONModuleObject(mgr, "module", "acme", &warnings),
})
}
if tlsApp.Automation != nil {
// consolidate automation policies that are the exact same
tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
}
// if experimental HTTP/3 is enabled, enable it on each server
if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 {
for _, srv := range httpApp.Servers {
srv.ExperimentalHTTP3 = true
}
}
// extract any custom logs, and enforce configured levels
var customLogs []namedCustomLog
var hasDefaultLog bool
for _, sb := range serverBlocks {
for _, clVal := range sb.pile["custom_log"] {
ncl := clVal.Value.(namedCustomLog)
if ncl.name == "" {
continue
}
if ncl.name == "default" {
hasDefaultLog = true
}
if _, ok := options["debug"]; ok && ncl.log.Level == "" {
ncl.log.Level = "DEBUG"
}
customLogs = append(customLogs, ncl)
}
}
if !hasDefaultLog {
// if the default log was not customized, ensure we
// configure it with any applicable options
if _, ok := options["debug"]; ok {
customLogs = append(customLogs, namedCustomLog{
name: "default",
log: &caddy.CustomLog{Level: "DEBUG"},
})
}
}
// annnd the top-level config, then we're done!
cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
if !reflect.DeepEqual(httpApp, caddyhttp.App{}) {
cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
}
if !reflect.DeepEqual(tlsApp, caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
}
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
"module",
storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
&warnings)
}
if adminConfig, ok := options["admin"].(string); ok && adminConfig != "" {
if adminConfig == "off" {
cfg.Admin = &caddy.AdminConfig{Disabled: true}
} else {
cfg.Admin = &caddy.AdminConfig{Listen: adminConfig}
}
}
if len(customLogs) > 0 {
if cfg.Logging == nil {
cfg.Logging = &caddy.Logging{
Logs: make(map[string]*caddy.CustomLog),
}
}
for _, ncl := range customLogs {
if ncl.name != "" {
cfg.Logging.Logs[ncl.name] = ncl.log
}
}
}
if len(customLogs) > 0 {
if cfg.Logging == nil {
cfg.Logging = &caddy.Logging{
Logs: make(map[string]*caddy.CustomLog),
}
}
for _, ncl := range customLogs {
if ncl.name != "" {
cfg.Logging.Logs[ncl.name] = ncl.log
}
}
}
return cfg, warnings, nil
}
// evaluateGlobalOptionsBlock evaluates the global options block,
// which is expected to be the first server block if it has zero
// keys. It returns the updated list of server blocks with the
// global options block removed, and updates options accordingly.
func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) {
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
return serverBlocks, nil
}
for _, segment := range serverBlocks[0].block.Segments {
dir := segment.Directive()
var val interface{}
var err error
disp := caddyfile.NewDispenser(segment)
// TODO: make this switch into a map
switch dir {
case "http_port":
val, err = parseOptHTTPPort(disp)
case "https_port":
val, err = parseOptHTTPSPort(disp)
case "default_sni":
val, err = parseOptSingleString(disp)
case "order":
val, err = parseOptOrder(disp)
case "experimental_http3":
val, err = parseOptExperimentalHTTP3(disp)
case "storage":
val, err = parseOptStorage(disp)
case "acme_ca", "acme_dns", "acme_ca_root":
val, err = parseOptSingleString(disp)
case "email":
val, err = parseOptSingleString(disp)
case "admin":
val, err = parseOptAdmin(disp)
case "debug":
options["debug"] = true
default:
return nil, fmt.Errorf("unrecognized parameter name: %s", dir)
}
if err != nil {
return nil, fmt.Errorf("%s: %v", dir, err)
}
options[dir] = val
}
return serverBlocks[1:], nil
}
// hostsFromServerBlockKeys returns a list of all the
// hostnames found in the keys of the server block sb.
// The list may not be in a consistent order.
func (st *ServerType) hostsFromServerBlockKeys(sb caddyfile.ServerBlock) ([]string, error) {
// first get each unique hostname
hostMap := make(map[string]struct{})
for _, sblockKey := range sb.Keys {
addr, err := ParseAddress(sblockKey)
if err != nil {
return nil, fmt.Errorf("parsing server block key: %v", err)
}
addr = addr.Normalize()
if addr.Host == "" {
continue
}
hostMap[addr.Host] = struct{}{}
}
// convert map to slice
sblockHosts := make([]string, 0, len(hostMap))
for host := range hostMap {
sblockHosts = append(sblockHosts, host)
}
return sblockHosts, nil
}
// serversFromPairings creates the servers for each pairing of addresses
// to server blocks. Each pairing is essentially a server definition.
func (st *ServerType) serversFromPairings(
pairings []sbAddrAssociation,
options map[string]interface{},
warnings *[]caddyconfig.Warning,
groupCounter counter,
) (map[string]*caddyhttp.Server, error) {
servers := make(map[string]*caddyhttp.Server)
defaultSNI := tryString(options["default_sni"], warnings)
for i, p := range pairings {
srv := &caddyhttp.Server{
Listen: p.addresses,
}
// sort server blocks by their keys; this is important because
// only the first matching site should be evaluated, and we should
// attempt to match most specific site first (host and path), in
// case their matchers overlap; we do this somewhat naively by
// descending sort by length of host then path
sort.SliceStable(p.serverBlocks, func(i, j int) bool {
// TODO: we could pre-process the specificities for efficiency,
// but I don't expect many blocks will have SO many keys...
var iLongestPath, jLongestPath string
var iLongestHost, jLongestHost string
for _, key := range p.serverBlocks[i].block.Keys {
addr, _ := ParseAddress(key)
if specificity(addr.Host) > specificity(iLongestHost) {
iLongestHost = addr.Host
}
if specificity(addr.Path) > specificity(iLongestPath) {
iLongestPath = addr.Path
}
}
for _, key := range p.serverBlocks[j].block.Keys {
addr, _ := ParseAddress(key)
if specificity(addr.Host) > specificity(jLongestHost) {
jLongestHost = addr.Host
}
if specificity(addr.Path) > specificity(jLongestPath) {
jLongestPath = addr.Path
}
}
if specificity(iLongestHost) == specificity(jLongestHost) {
return len(iLongestPath) > len(jLongestPath)
}
return specificity(iLongestHost) > specificity(jLongestHost)
})
var hasCatchAllTLSConnPolicy bool
// create a subroute for each site in the server block
for _, sblock := range p.serverBlocks {
matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock.block)
if err != nil {
return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
}
// tls: connection policies and toggle auto HTTPS
if _, ok := sblock.pile["tls.off"]; ok {
// TODO: right now, no directives yield any tls.off value...
// tls off: disable TLS (and automatic HTTPS) for server block's names
if srv.AutoHTTPS == nil {
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
}
srv.AutoHTTPS.Disabled = true
} else if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
// tls connection policies
for _, cpVal := range cpVals {
cp := cpVal.Value.(*caddytls.ConnectionPolicy)
// make sure the policy covers all hostnames from the block
hosts, err := st.hostsFromServerBlockKeys(sblock.block)
if err != nil {
return nil, err
}
for _, h := range hosts {
if h == defaultSNI {
hosts = append(hosts, "")
cp.DefaultSNI = defaultSNI
break
}
}
// TODO: are matchers needed if every hostname of the resulting config is matched?
if len(hosts) > 0 {
cp.MatchersRaw = caddy.ModuleMap{
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
}
} else {
hasCatchAllTLSConnPolicy = true
}
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
}
// TODO: consolidate equal conn policies?
}
// exclude any hosts that were defined explicitly with
// "http://" in the key from automated cert management (issue #2998)
for _, key := range sblock.block.Keys {
addr, err := ParseAddress(key)
if err != nil {
return nil, err
}
addr = addr.Normalize()
if addr.Scheme == "http" {
if srv.AutoHTTPS == nil {
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
}
if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) {
srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
}
}
}
// set up each handler directive, making sure to honor directive order
dirRoutes := sblock.pile["route"]
siteSubroute, err := buildSubroute(dirRoutes, groupCounter)
if err != nil {
return nil, err
}
// add the site block's route(s) to the server
srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings)
// if error routes are defined, add those too
if errorSubrouteVals, ok := sblock.pile["error_route"]; ok {
if srv.Errors == nil {
srv.Errors = new(caddyhttp.HTTPErrorConfig)
}
for _, val := range errorSubrouteVals {
sr := val.Value.(*caddyhttp.Subroute)
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings)
}
}
// add log associations
for _, cval := range sblock.pile["custom_log"] {
ncl := cval.Value.(namedCustomLog)
if srv.Logs == nil {
srv.Logs = &caddyhttp.ServerLogConfig{
LoggerNames: make(map[string]string),
}
}
hosts, err := st.hostsFromServerBlockKeys(sblock.block)
if err != nil {
return nil, err
}
for _, h := range hosts {
if ncl.name != "" {
srv.Logs.LoggerNames[h] = ncl.name
}
}
}
}
// a catch-all TLS conn policy is necessary to ensure TLS can
// be offered to all hostnames of the server; even though only
// one policy is needed to enable TLS for the server, that
// policy might apply to only certain TLS handshakes; but when
// using the Caddyfile, user would expect all handshakes to at
// least have a matching connection policy, so here we append a
// catch-all/default policy if there isn't one already (it's
// important that it goes at the end) - see issue #3004:
// https://github.com/caddyserver/caddy/issues/3004
if !hasCatchAllTLSConnPolicy && (len(srv.TLSConnPolicies) > 0 || defaultSNI != "") {
srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI})
}
srv.Routes = consolidateRoutes(srv.Routes)
servers[fmt.Sprintf("srv%d", i)] = srv
}
return servers, nil
}
// appendSubrouteToRouteList appends the routes in subroute
// to the routeList, optionally qualified by matchers.
func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
subroute *caddyhttp.Subroute,
matcherSetsEnc []caddy.ModuleMap,
p sbAddrAssociation,
warnings *[]caddyconfig.Warning) caddyhttp.RouteList {
if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
// no need to wrap the handlers in a subroute if this is
// the only server block and there is no matcher for it
routeList = append(routeList, subroute.Routes...)
} else {
routeList = append(routeList, caddyhttp.Route{
MatcherSetsRaw: matcherSetsEnc,
HandlersRaw: []json.RawMessage{
caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings),
},
Terminal: true, // only first matching site block should be evaluated
})
}
return routeList
}
// buildSubroute turns the config values, which are expected to be routes
// into a clean and orderly subroute that has all the routes within it.
func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) {
for _, val := range routes {
if !directiveIsOrdered(val.directive) {
return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive)
}
}
sortRoutes(routes)
subroute := new(caddyhttp.Subroute)
// some directives are mutually exclusive (only first matching
// instance should be evaluated); this is done by putting their
// routes in the same group
mutuallyExclusiveDirs := map[string]*struct {
count int
groupName string
}{
// as a special case, group rewrite directives so that they are mutually exclusive;
// this means that only the first matching rewrite will be evaluated, and that's
// probably a good thing, since there should never be a need to do more than one
// rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
// rewrite /docs/json/* /docs/json/index.html
// rewrite /docs/* /docs/index.html
// (We use this on the Caddy website, or at least we did once.) The first rewrite's
// result is also matched by the second rewrite, making the first rewrite pointless.
// See issue #2959.
"rewrite": {},
// handle blocks are also mutually exclusive by definition
"handle": {},
// root just sets a variable, so if it was not mutually exclusive, intersecting
// root directives would overwrite previously-matched ones; they should not cascade
"root": {},
}
for meDir, info := range mutuallyExclusiveDirs {
// see how many instances of the directive there are
for _, r := range routes {
if r.directive == meDir {
info.count++
if info.count > 1 {
break
}
}
}
// if there is more than one, put them in a group
if info.count > 1 {
info.groupName = groupCounter.nextGroup()
}
}
// add all the routes piled in from directives
for _, r := range routes {
// put this route into a group if it is mutually exclusive
if info, ok := mutuallyExclusiveDirs[r.directive]; ok {
route := r.Value.(caddyhttp.Route)
route.Group = info.groupName
r.Value = route
}
switch route := r.Value.(type) {
case caddyhttp.Subroute:
// if a route-class config value is actually a Subroute handler
// with nothing but a list of routes, then it is the intention
// of the directive to keep these handlers together and in this
// same order, but not necessarily in a subroute (if it wanted
// to keep them in a subroute, the directive would have returned
// a route with a Subroute as its handler); this is useful to
// keep multiple handlers/routes together and in the same order
// so that the sorting procedure we did above doesn't reorder them
if route.Errors != nil {
// if error handlers are also set, this is confusing; it's
// probably supposed to be wrapped in a Route and encoded
// as a regular handler route... programmer error.
panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?")
}
subroute.Routes = append(subroute.Routes, route.Routes...)
case caddyhttp.Route:
subroute.Routes = append(subroute.Routes, route)
}
}
subroute.Routes = consolidateRoutes(subroute.Routes)
return subroute, nil
}
// consolidateRoutes combines routes with the same properties
// (same matchers, same Terminal and Group settings) for a
// cleaner overall output.
// FIXME: See caddyserver/caddy#3108
func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
for i := 0; i < len(routes)-1; i++ {
if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
routes[i].Terminal == routes[i+1].Terminal &&
routes[i].Group == routes[i+1].Group {
// keep the handlers in the same order, then splice out repetitive route
routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
routes = append(routes[:i+1], routes[i+2:]...)
i--
}
}
return routes
}
// consolidateAutomationPolicies combines automation policies that are the same,
// for a cleaner overall output.
func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy {
for i := 0; i < len(aps); i++ {
for j := 0; j < len(aps); j++ {
if j == i {
continue
}
// if they're exactly equal in every way, just keep one of them
if reflect.DeepEqual(aps[i], aps[j]) {
aps = append(aps[:j], aps[j+1:]...)
i--
break
}
// if the policy is the same, we can keep just one, but we have
// to be careful which one we keep; if only one has any hostnames
// defined, then we need to keep the one without any hostnames,
// otherwise the one without any hosts (a catch-all) would be
// eaten up by the one with hosts; and if both have hosts, we
// need to combine their lists
if reflect.DeepEqual(aps[i].IssuerRaw, aps[j].IssuerRaw) &&
aps[i].ManageSync == aps[j].ManageSync {
if len(aps[i].Hosts) == 0 && len(aps[j].Hosts) > 0 {
aps = append(aps[:j], aps[j+1:]...)
} else if len(aps[i].Hosts) > 0 && len(aps[j].Hosts) == 0 {
aps = append(aps[:i], aps[i+1:]...)
} else {
aps[i].Hosts = append(aps[i].Hosts, aps[j].Hosts...)
aps = append(aps[:j], aps[j+1:]...)
}
i--
break
}
}
}
// ensure any catch-all policies go last
sort.SliceStable(aps, func(i, j int) bool {
return len(aps[i].Hosts) > len(aps[j].Hosts)
})
return aps
}
func matcherSetFromMatcherToken(
tkn caddyfile.Token,
matcherDefs map[string]caddy.ModuleMap,
warnings *[]caddyconfig.Warning,
) (caddy.ModuleMap, bool, error) {
// matcher tokens can be wildcards, simple path matchers,
// or refer to a pre-defined matcher by some name
if tkn.Text == "*" {
// match all requests == no matchers, so nothing to do
return nil, true, nil
} else if strings.HasPrefix(tkn.Text, "/") {
// convenient way to specify a single path match
return caddy.ModuleMap{
"path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
}, true, nil
} else if strings.HasPrefix(tkn.Text, matcherPrefix) {
// pre-defined matcher
m, ok := matcherDefs[tkn.Text]
if !ok {
return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
}
return m, true, nil
}
return nil, false, nil
}
func (st *ServerType) compileEncodedMatcherSets(sblock caddyfile.ServerBlock) ([]caddy.ModuleMap, error) {
type hostPathPair struct {
hostm caddyhttp.MatchHost
pathm caddyhttp.MatchPath
}
// keep routes with common host and path matchers together
var matcherPairs []*hostPathPair
for _, key := range sblock.Keys {
addr, err := ParseAddress(key)
if err != nil {
return nil, fmt.Errorf("server block %v: parsing and standardizing address '%s': %v", sblock.Keys, key, err)
}
addr = addr.Normalize()
// choose a matcher pair that should be shared by this
// server block; if none exists yet, create one
var chosenMatcherPair *hostPathPair
for _, mp := range matcherPairs {
if (len(mp.pathm) == 0 && addr.Path == "") ||
(len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
chosenMatcherPair = mp
break
}
}
if chosenMatcherPair == nil {
chosenMatcherPair = new(hostPathPair)
if addr.Path != "" {
chosenMatcherPair.pathm = []string{addr.Path}
}
matcherPairs = append(matcherPairs, chosenMatcherPair)
}
// add this server block's keys to the matcher
// pair if it doesn't already exist
if addr.Host != "" {
var found bool
for _, h := range chosenMatcherPair.hostm {
if h == addr.Host {
found = true
break
}
}
if !found {
chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
}
}
}
// iterate each pairing of host and path matchers and
// put them into a map for JSON encoding
var matcherSets []map[string]caddyhttp.RequestMatcher
for _, mp := range matcherPairs {
matcherSet := make(map[string]caddyhttp.RequestMatcher)
if len(mp.hostm) > 0 {
matcherSet["host"] = mp.hostm
}
if len(mp.pathm) > 0 {
matcherSet["path"] = mp.pathm
}
if len(matcherSet) > 0 {
matcherSets = append(matcherSets, matcherSet)
}
}
// finally, encode each of the matcher sets
var matcherSetsEnc []caddy.ModuleMap
for _, ms := range matcherSets {
msEncoded, err := encodeMatcherSet(ms)
if err != nil {
return nil, fmt.Errorf("server block %v: %v", sblock.Keys, err)
}
matcherSetsEnc = append(matcherSetsEnc, msEncoded)
}
return matcherSetsEnc, nil
}
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
for d.Next() {
definitionName := d.Val()
if _, ok := matchers[definitionName]; ok {
return fmt.Errorf("matcher is defined more than once: %s", definitionName)
}
matchers[definitionName] = make(caddy.ModuleMap)
// in case there are multiple instances of the same matcher, concatenate
// their tokens (we expect that UnmarshalCaddyfile should be able to
// handle more than one segment); otherwise, we'd overwrite other
// instances of the matcher in this set
tokensByMatcherName := make(map[string][]caddyfile.Token)
for nesting := d.Nesting(); d.NextBlock(nesting); {
matcherName := d.Val()
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
}
for matcherName, tokens := range tokensByMatcherName {
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil {
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
}
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
if err != nil {
return err
}
rm, ok := unm.(caddyhttp.RequestMatcher)
if !ok {
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
}
}
return nil
}
func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) {
msEncoded := make(caddy.ModuleMap)
for matcherName, val := range matchers {
jsonBytes, err := json.Marshal(val)
if err != nil {
return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
}
msEncoded[matcherName] = jsonBytes
}
return msEncoded, nil
}
// tryInt tries to convert val to an integer. If it fails,
// it downgrades the error to a warning and returns 0.
func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int {
intVal, ok := val.(int)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
}
return intVal
}
func tryString(val interface{}, warnings *[]caddyconfig.Warning) string {
stringVal, ok := val.(string)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
}
return stringVal
}
// sliceContains returns true if needle is in haystack.
func sliceContains(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// specificity returns len(s) minus any wildcards (*) and
// placeholders ({...}). Basically, it's a length count
// that penalizes the use of wildcards and placeholders.
// This is useful for comparing hostnames and paths.
// However, wildcards in paths are not a sure answer to
// the question of specificity. For example,
// '*.example.com' is clearly less specific than
// 'a.example.com', but is '/a' more or less specific
// than '/a*'?
func specificity(s string) int {
l := len(s) - strings.Count(s, "*")
for len(s) > 0 {
start := strings.Index(s, "{")
if start < 0 {
return l
}
end := strings.Index(s[start:], "}") + start + 1
if end <= start {
return l
}
l -= end - start
s = s[end:]
}
return l
}
type counter struct {
n *int
}
func (c counter) nextGroup() string {
name := fmt.Sprintf("group%d", *c.n)
*c.n++
return name
}
type namedCustomLog struct {
name string
log *caddy.CustomLog
}
// sbAddrAssociation is a mapping from a list of
// addresses to a list of server blocks that are
// served on those addresses.
type sbAddrAssociation struct {
addresses []string
serverBlocks []serverBlock
}
const matcherPrefix = "@"
// Interface guard
var _ caddyfile.ServerType = (*ServerType)(nil)
| 1 | 14,410 | I don't think adding this value to _every_ connection policy is needed or useful... why is this necessary? | caddyserver-caddy | go |
@@ -10,7 +10,11 @@ declare(strict_types=1);
namespace Ergonode\Product\Infrastructure\Provider\Strategy;
use Ergonode\Attribute\Domain\Entity\AbstractAttribute;
-use Ergonode\Attribute\Infrastructure\Provider\AttributeValueConstraintStrategyInterface;
+use Ergonode\Attribute\Infrastructure\Provider\ContextAwareAttributeValueConstraintStrategyInterface;
+use Ergonode\EventSourcing\Infrastructure\Manager\EventStoreManagerInterface;
+use Ergonode\Product\Application\Validator\NotTheSameProduct;
+use Ergonode\Product\Domain\Entity\AbstractProduct;
+use Ergonode\SharedKernel\Domain\AggregateId;
use Symfony\Component\Validator\Constraint;
use Symfony\Component\Validator\Constraints\Collection;
use Ergonode\Product\Application\Validator\ProductExists; | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Product\Infrastructure\Provider\Strategy;
use Ergonode\Attribute\Domain\Entity\AbstractAttribute;
use Ergonode\Attribute\Infrastructure\Provider\AttributeValueConstraintStrategyInterface;
use Symfony\Component\Validator\Constraint;
use Symfony\Component\Validator\Constraints\Collection;
use Ergonode\Product\Application\Validator\ProductExists;
use Symfony\Component\Validator\Constraints\All;
use Ergonode\Product\Domain\Entity\Attribute\ProductRelationAttribute;
use Symfony\Component\Validator\Constraints\Uuid;
use Symfony\Component\Validator\Constraints\NotBlank;
class ProductRelationAttributeValueConstraintStrategy implements AttributeValueConstraintStrategyInterface
{
public function supports(AbstractAttribute $attribute): bool
{
return $attribute instanceof ProductRelationAttribute;
}
public function get(AbstractAttribute $attribute): Constraint
{
return new Collection([
'value' => new All(
['constraints' =>
[
new NotBlank(),
new Uuid(['strict' => true]),
new ProductExists(),
],
]
),
]);
}
}
| 1 | 9,559 | adding this constrain for null AggreagateId is redundant | ergonode-backend | php |
@@ -103,7 +103,12 @@ func Dashboard(ctx *middleware.Context) {
feeds := make([]*models.Action, 0, len(actions))
for _, act := range actions {
if act.IsPrivate {
- if has, _ := models.HasAccess(ctx.User, &models.Repository{Id: act.RepoId, IsPrivate: true}, models.ACCESS_MODE_READ); !has {
+ repo := &models.Repository{Id: act.RepoId, IsPrivate: true}
+ // This prevents having to retrieve the repository for each action
+ if act.RepoUserName == ctx.User.LowerName {
+ repo.OwnerId = ctx.User.Id
+ }
+ if has, _ := models.HasAccess(ctx.User, repo, models.ACCESS_MODE_READ); !has {
continue
}
} | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package user
import (
"bytes"
"fmt"
"strings"
"github.com/Unknwon/com"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/middleware"
"github.com/gogits/gogs/modules/setting"
)
const (
DASHBOARD base.TplName = "user/dashboard/dashboard"
PULLS base.TplName = "user/dashboard/pulls"
ISSUES base.TplName = "user/issues"
STARS base.TplName = "user/stars"
PROFILE base.TplName = "user/profile"
)
func Dashboard(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("dashboard")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsNews"] = true
var ctxUser *models.User
// Check context type.
orgName := ctx.Params(":org")
if len(orgName) > 0 {
// Organization.
org, err := models.GetUserByName(orgName)
if err != nil {
if err == models.ErrUserNotExist {
ctx.Handle(404, "GetUserByName", err)
} else {
ctx.Handle(500, "GetUserByName", err)
}
return
}
ctxUser = org
} else {
// Normal user.
ctxUser = ctx.User
collaborates, err := ctx.User.GetAccessibleRepositories()
if err != nil {
ctx.Handle(500, "GetAccessibleRepositories", err)
return
}
repositories := make([]*models.Repository, 0, len(collaborates))
for repo := range collaborates {
repositories = append(repositories, repo)
}
ctx.Data["CollaborateCount"] = len(repositories)
ctx.Data["CollaborativeRepos"] = repositories
}
ctx.Data["ContextUser"] = ctxUser
if err := ctx.User.GetOrganizations(); err != nil {
ctx.Handle(500, "GetOrganizations", err)
return
}
ctx.Data["Orgs"] = ctx.User.Orgs
repos, err := models.GetRepositories(ctxUser.Id, true)
if err != nil {
ctx.Handle(500, "GetRepositories", err)
return
}
ctx.Data["Repos"] = repos
// Get mirror repositories.
mirrors := make([]*models.Repository, 0, len(repos)/2)
for _, repo := range repos {
if repo.IsMirror {
if err = repo.GetMirror(); err != nil {
ctx.Handle(500, "GetMirror: "+repo.Name, err)
return
}
mirrors = append(mirrors, repo)
}
}
ctx.Data["MirrorCount"] = len(mirrors)
ctx.Data["Mirrors"] = mirrors
// Get feeds.
actions, err := models.GetFeeds(ctxUser.Id, 0, false)
if err != nil {
ctx.Handle(500, "GetFeeds", err)
return
}
// Check access of private repositories.
feeds := make([]*models.Action, 0, len(actions))
for _, act := range actions {
if act.IsPrivate {
if has, _ := models.HasAccess(ctx.User, &models.Repository{Id: act.RepoId, IsPrivate: true}, models.ACCESS_MODE_READ); !has {
continue
}
}
// FIXME: cache results?
u, err := models.GetUserByName(act.ActUserName)
if err != nil {
if err == models.ErrUserNotExist {
continue
}
ctx.Handle(500, "GetUserByName", err)
return
}
act.ActAvatar = u.AvatarLink()
feeds = append(feeds, act)
}
ctx.Data["Feeds"] = feeds
ctx.HTML(200, DASHBOARD)
}
func Pulls(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("pull_requests")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsPulls"] = true
if err := ctx.User.GetOrganizations(); err != nil {
ctx.Handle(500, "GetOrganizations", err)
return
}
ctx.Data["ContextUser"] = ctx.User
ctx.HTML(200, PULLS)
}
func ShowSSHKeys(ctx *middleware.Context, uid int64) {
keys, err := models.ListPublicKeys(uid)
if err != nil {
ctx.Handle(500, "ListPublicKeys", err)
return
}
var buf bytes.Buffer
for i := range keys {
buf.WriteString(keys[i].OmitEmail())
}
ctx.RenderData(200, buf.Bytes())
}
func Profile(ctx *middleware.Context) {
ctx.Data["Title"] = "Profile"
ctx.Data["PageIsUserProfile"] = true
uname := ctx.Params(":username")
// Special handle for FireFox requests favicon.ico.
if uname == "favicon.ico" {
ctx.Redirect(setting.AppSubUrl + "/img/favicon.png")
return
}
isShowKeys := false
if strings.HasSuffix(uname, ".keys") {
isShowKeys = true
uname = strings.TrimSuffix(uname, ".keys")
}
u, err := models.GetUserByName(uname)
if err != nil {
if err == models.ErrUserNotExist {
ctx.Handle(404, "GetUserByName", err)
} else {
ctx.Handle(500, "GetUserByName", err)
}
return
}
// Show SSH keys.
if isShowKeys {
ShowSSHKeys(ctx, u.Id)
return
}
if u.IsOrganization() {
ctx.Redirect(setting.AppSubUrl + "/org/" + u.Name)
return
}
// For security reason, hide e-mail address for anonymous visitors.
if !ctx.IsSigned {
u.Email = ""
}
ctx.Data["Owner"] = u
tab := ctx.Query("tab")
ctx.Data["TabName"] = tab
switch tab {
case "activity":
actions, err := models.GetFeeds(u.Id, 0, false)
if err != nil {
ctx.Handle(500, "GetFeeds", err)
return
}
feeds := make([]*models.Action, 0, len(actions))
for _, act := range actions {
if act.IsPrivate {
if !ctx.IsSigned {
continue
}
if has, _ := models.HasAccess(ctx.User,
&models.Repository{
Id: act.RepoId,
IsPrivate: true,
}, models.ACCESS_MODE_READ); !has {
continue
}
}
// FIXME: cache results?
u, err := models.GetUserByName(act.ActUserName)
if err != nil {
if err == models.ErrUserNotExist {
continue
}
ctx.Handle(500, "GetUserByName", err)
return
}
act.ActAvatar = u.AvatarLink()
feeds = append(feeds, act)
}
ctx.Data["Feeds"] = feeds
default:
ctx.Data["Repos"], err = models.GetRepositories(u.Id, ctx.IsSigned && ctx.User.Id == u.Id)
if err != nil {
ctx.Handle(500, "GetRepositories", err)
return
}
}
ctx.HTML(200, PROFILE)
}
func Email2User(ctx *middleware.Context) {
u, err := models.GetUserByEmail(ctx.Query("email"))
if err != nil {
if err == models.ErrUserNotExist {
ctx.Handle(404, "user.Email2User(GetUserByEmail)", err)
} else {
ctx.Handle(500, "user.Email2User(GetUserByEmail)", err)
}
return
}
ctx.Redirect(setting.AppSubUrl + "/user/" + u.Name)
}
func Issues(ctx *middleware.Context) {
ctx.Data["Title"] = "Your Issues"
viewType := ctx.Query("type")
types := []string{"assigned", "created_by"}
if !com.IsSliceContainsStr(types, viewType) {
viewType = "all"
}
isShowClosed := ctx.Query("state") == "closed"
var filterMode int
switch viewType {
case "assigned":
filterMode = models.FM_ASSIGN
case "created_by":
filterMode = models.FM_CREATE
}
repoId, _ := com.StrTo(ctx.Query("repoid")).Int64()
issueStats := models.GetUserIssueStats(ctx.User.Id, filterMode)
// Get all repositories.
repos, err := models.GetRepositories(ctx.User.Id, true)
if err != nil {
ctx.Handle(500, "user.Issues(GetRepositories)", err)
return
}
repoIds := make([]int64, 0, len(repos))
showRepos := make([]*models.Repository, 0, len(repos))
for _, repo := range repos {
if repo.NumIssues == 0 {
continue
}
repoIds = append(repoIds, repo.Id)
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
issueStats.AllCount += int64(repo.NumOpenIssues)
if isShowClosed {
if repo.NumClosedIssues > 0 {
if filterMode == models.FM_CREATE {
repo.NumClosedIssues = int(models.GetIssueCountByPoster(ctx.User.Id, repo.Id, isShowClosed))
}
showRepos = append(showRepos, repo)
}
} else {
if repo.NumOpenIssues > 0 {
if filterMode == models.FM_CREATE {
repo.NumOpenIssues = int(models.GetIssueCountByPoster(ctx.User.Id, repo.Id, isShowClosed))
}
showRepos = append(showRepos, repo)
}
}
}
if repoId > 0 {
repoIds = []int64{repoId}
}
page, _ := com.StrTo(ctx.Query("page")).Int()
// Get all issues.
var ius []*models.IssueUser
switch viewType {
case "assigned":
fallthrough
case "created_by":
ius, err = models.GetIssueUserPairsByMode(ctx.User.Id, repoId, isShowClosed, page, filterMode)
default:
ius, err = models.GetIssueUserPairsByRepoIds(repoIds, isShowClosed, page)
}
if err != nil {
ctx.Handle(500, "user.Issues(GetAllIssueUserPairs)", err)
return
}
issues := make([]*models.Issue, len(ius))
for i := range ius {
issues[i], err = models.GetIssueById(ius[i].IssueId)
if err != nil {
if err == models.ErrIssueNotExist {
log.Warn("user.Issues(GetIssueById #%d): issue not exist", ius[i].IssueId)
continue
} else {
ctx.Handle(500, fmt.Sprintf("user.Issues(GetIssueById #%d)", ius[i].IssueId), err)
return
}
}
issues[i].Repo, err = models.GetRepositoryById(issues[i].RepoId)
if err != nil {
if err == models.ErrRepoNotExist {
log.Warn("user.Issues(GetRepositoryById #%d): repository not exist", issues[i].RepoId)
continue
} else {
ctx.Handle(500, fmt.Sprintf("user.Issues(GetRepositoryById #%d)", issues[i].RepoId), err)
return
}
}
if err = issues[i].Repo.GetOwner(); err != nil {
ctx.Handle(500, "user.Issues(GetOwner)", err)
return
}
if err = issues[i].GetPoster(); err != nil {
ctx.Handle(500, "user.Issues(GetUserById)", err)
return
}
}
ctx.Data["RepoId"] = repoId
ctx.Data["Repos"] = showRepos
ctx.Data["Issues"] = issues
ctx.Data["ViewType"] = viewType
ctx.Data["IssueStats"] = issueStats
ctx.Data["IsShowClosed"] = isShowClosed
if isShowClosed {
ctx.Data["State"] = "closed"
ctx.Data["ShowCount"] = issueStats.ClosedCount
} else {
ctx.Data["ShowCount"] = issueStats.OpenCount
}
ctx.HTML(200, ISSUES)
}
| 1 | 8,737 | Why when current user is owner but still check access? Should we use `else if` here? | gogs-gogs | go |
@@ -4,12 +4,18 @@ import (
"errors"
"fmt"
"strings"
+ "time"
docker "github.com/fsouza/go-dockerclient"
. "github.com/weaveworks/weave/common"
)
+const (
+ InitialInterval = 1 * time.Second
+ MaxInterval = 60 * time.Second
+)
+
// An observer for container events
type ContainerObserver interface {
ContainerStarted(ident string) | 1 | package docker
import (
"errors"
"fmt"
"strings"
docker "github.com/fsouza/go-dockerclient"
. "github.com/weaveworks/weave/common"
)
// An observer for container events
type ContainerObserver interface {
ContainerStarted(ident string)
ContainerDied(ident string)
}
type Client struct {
*docker.Client
}
// NewClient creates a new Docker client and checks we can talk to Docker
func NewClient(apiPath string) (*Client, error) {
if apiPath != "" && !strings.Contains(apiPath, "://") {
apiPath = "tcp://" + apiPath
}
dc, err := docker.NewClient(apiPath)
if err != nil {
return nil, err
}
client := &Client{dc}
return client, client.checkWorking()
}
func NewVersionedClient(apiPath string, apiVersionString string) (*Client, error) {
if !strings.Contains(apiPath, "://") {
apiPath = "tcp://" + apiPath
}
dc, err := docker.NewVersionedClient(apiPath, apiVersionString)
if err != nil {
return nil, err
}
client := &Client{dc}
return client, client.checkWorking()
}
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
dc, err := docker.NewVersionedClientFromEnv(apiVersionString)
if err != nil {
return nil, err
}
client := &Client{dc}
return client, client.checkWorking()
}
func (c *Client) checkWorking() error {
_, err := c.Version()
return err
}
func (c *Client) Info() string {
if env, err := c.Version(); err != nil {
return fmt.Sprintf("Docker API error: %s", err)
} else {
return fmt.Sprintf("Docker API on %s: %v", c.Endpoint(), env)
}
}
// AddObserver adds an observer for docker events
func (c *Client) AddObserver(ob ContainerObserver) error {
events := make(chan *docker.APIEvents)
if err := c.AddEventListener(events); err != nil {
Log.Errorf("[docker] Unable to add listener to Docker API: %s", err)
return err
}
go func() {
for event := range events {
switch event.Status {
case "start":
id := event.ID
ob.ContainerStarted(id)
case "die":
id := event.ID
ob.ContainerDied(id)
}
}
}()
return nil
}
// IsContainerNotRunning returns true if we have checked with Docker that the ID is not running
func (c *Client) IsContainerNotRunning(idStr string) bool {
container, err := c.InspectContainer(idStr)
if err == nil {
return !container.State.Running || container.State.Restarting
}
if _, notThere := err.(*docker.NoSuchContainer); notThere {
return true
}
Log.Errorf("[docker] Could not check container status: %s", err)
return false
}
// This is intended to find an IP address that we can reach the container on;
// if it is on the Docker bridge network then that address; if on the host network
// then localhost
func (c *Client) GetContainerIP(nameOrID string) (string, error) {
Log.Debugf("Getting IP for container %s", nameOrID)
info, err := c.InspectContainer(nameOrID)
if err != nil {
return "", err
}
if info.NetworkSettings.Networks != nil {
Log.Debugln("Networks: ", info.NetworkSettings.Networks)
if bridgeNetwork, ok := info.NetworkSettings.Networks["bridge"]; ok {
return bridgeNetwork.IPAddress, nil
} else if _, ok := info.NetworkSettings.Networks["host"]; ok {
return "127.0.0.1", nil
}
} else if info.HostConfig.NetworkMode == "host" {
return "127.0.0.1", nil
}
if info.NetworkSettings.IPAddress == "" {
return "", errors.New("No IP address found for container " + nameOrID)
}
return info.NetworkSettings.IPAddress, nil
}
| 1 | 12,629 | 60 seems high. 10? 20? | weaveworks-weave | go |
@@ -755,8 +755,8 @@ def server_init_start(args):
# Create the main database link from the arguments passed over the
# command line.
- default_product_path = os.path.join(args.config_directory,
- 'Default.sqlite')
+ cfg_dir = os.path.abspath(args.config_directory)
+ default_product_path = os.path.join(cfg_dir, 'Default.sqlite')
create_default_product = 'sqlite' in args and \
not os.path.exists(default_product_path)
| 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Handler for the subcommand that is used to start and manage CodeChecker
servers, which are used to query analysis report information.
"""
import argparse
import errno
import json
import os
import socket
import sys
from alembic import config
from alembic import script
from sqlalchemy.orm import sessionmaker
from shared.ttypes import DBStatus
from libcodechecker import generic_package_context
from libcodechecker import generic_package_suppress_handler
from libcodechecker import host_check
from libcodechecker import logger
from libcodechecker import output_formatters
from libcodechecker import util
from libcodechecker.analyze import analyzer_env
from libcodechecker.server import instance_manager
from libcodechecker.server import server
from libcodechecker.server.database import database
from libcodechecker.server.database import database_status
from libcodechecker.server.database.config_db_model \
import IDENTIFIER as CONFIG_META
from libcodechecker.server.database.config_db_model \
import Product as ORMProduct
from libcodechecker.server.database.run_db_model \
import IDENTIFIER as RUN_META
LOG = logger.get_logger('server')
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker server',
'formatter_class': argparse.ArgumentDefaultsHelpFormatter,
# Description is shown when the command's help is queried directly
'description': "The CodeChecker Web server is used to handle the "
"storage and navigation of analysis results. A "
"started server can be connected to via a Web "
"browser, or by using the 'CodeChecker cmd' "
"command-line client.",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Start and manage the CodeChecker Web server."
}
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
# TODO: --workspace is an outdated concept in 'store'. Later on,
# it shall be deprecated, as changes to db_handler commence.
parser.add_argument('-w', '--workspace',
type=str,
dest="workspace",
default=util.get_default_workspace(),
required=False,
help="Directory where CodeChecker can store analysis "
"result related data, such as the database. "
"(Cannot be specified at the same time with "
"'--sqlite' or '--config-directory'.)")
parser.add_argument('-f', '--config-directory',
type=str,
dest="config_directory",
default=util.get_default_workspace(),
required=False,
help="Directory where CodeChecker server should read "
"server-specific configuration (such as "
"authentication settings, SSL certificate"
" (cert.pem) and key (key.pem)) from.")
parser.add_argument('--host',
type=str,
dest="listen_address",
default="localhost",
required=False,
help="The IP address or hostname of the server on "
"which it should listen for connections.")
# TODO: -v/--view-port is too verbose. The server's -p/--port is used
# symmetrically in 'CodeChecker cmd' anyways.
parser.add_argument('-v', '--view-port', # TODO: <- Deprecate and remove.
'-p', '--port',
type=int,
dest="view_port",
metavar='PORT',
default=8001,
required=False,
help="The port which will be used as listen port for "
"the server.")
# TODO: This should be removed later on, in favour of --host.
parser.add_argument('--not-host-only',
dest="not_host_only",
action="store_true",
required=False,
help="If specified, storing and viewing the results "
"will be possible not only by browsers and "
"clients running locally, but to everyone, who "
"can access the server over the Internet. "
"(Equivalent to specifying '--host \"\"'.)")
parser.add_argument('--skip-db-cleanup',
dest="skip_db_cleanup",
action='store_true',
default=False,
required=False,
help="Skip performing cleanup jobs on the database "
"like removing unused files.")
dbmodes = parser.add_argument_group("configuration database arguments")
dbmodes = dbmodes.add_mutually_exclusive_group(required=False)
dbmodes.add_argument('--sqlite',
type=str,
dest="sqlite",
metavar='SQLITE_FILE',
default=os.path.join(
'<CONFIG_DIRECTORY>',
"config.sqlite"),
required=False,
help="Path of the SQLite database file to use.")
dbmodes.add_argument('--postgresql',
dest="postgresql",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Specifies that a PostgreSQL database is to be "
"used instead of SQLite. See the \"PostgreSQL "
"arguments\" section on how to configure the "
"database connection.")
pgsql = parser.add_argument_group("PostgreSQL arguments",
"Values of these arguments are ignored, "
"unless '--postgresql' is specified!")
# TODO: --dbSOMETHING arguments are kept to not break interface from
# old command. Database using commands such as "CodeChecker store" no
# longer supports these --- it would be ideal to break and remove args
# with this style and only keep --db-SOMETHING.
pgsql.add_argument('--dbaddress', '--db-host',
type=str,
dest="dbaddress",
default="localhost",
required=False,
help="Database server address.")
pgsql.add_argument('--dbport', '--db-port',
type=int,
dest="dbport",
default=5432,
required=False,
help="Database server port.")
pgsql.add_argument('--dbusername', '--db-username',
type=str,
dest="dbusername",
default='codechecker',
required=False,
help="Username to use for connection.")
pgsql.add_argument('--dbname', '--db-name',
type=str,
dest="dbname",
default="config",
required=False,
help="Name of the database to use.")
root_account = parser.add_argument_group(
"root account arguments",
"Servers automatically create a root user to access the server's "
"configuration via the clients. This user is created at first start "
"and saved in the CONFIG_DIRECTORY, and the credentials are printed "
"to the server's standard output. The plaintext credentials are "
"NEVER accessible again.")
root_account.add_argument('--reset-root',
dest="reset_root",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Force the server to recreate the master "
"superuser (root) account name and "
"password. The previous credentials will "
"be invalidated, and the new ones will be "
"printed to the standard output.")
root_account.add_argument('--force-authentication',
dest="force_auth",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Force the server to run in "
"authentication requiring mode, despite "
"the configuration value in "
"'server_config.json'. This is needed "
"if you need to edit the product "
"configuration of a server that would not "
"require authentication otherwise.")
instance_mgmnt = parser.add_argument_group("running server management")
instance_mgmnt = instance_mgmnt. \
add_mutually_exclusive_group(required=False)
instance_mgmnt.add_argument('-l', '--list',
dest="list",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="List the servers that has been started "
"by you.")
# TODO: '-s' was removed from 'quickcheck', it shouldn't be here either?
instance_mgmnt.add_argument('-s', '--stop',
dest="stop",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Stops the server associated with "
"the given view-port and workspace.")
instance_mgmnt.add_argument('--stop-all',
dest="stop_all",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Stops all of your running CodeChecker "
"server instances.")
database_mgmnt = parser.add_argument_group(
"Database management arguments.",
"""WARNING these commands needs to be called with the same
workspace and configuration arguments as the server so the
configuration database will be found which is required for the
schema migration. Migration can be done without a running server
but pay attention to use the same arguments which will be used to
start the server.
NOTE:
Before migration it is advised to create a full a backup of
the product databases.
""")
database_mgmnt = database_mgmnt. \
add_mutually_exclusive_group(required=False)
database_mgmnt.add_argument('--db-status',
type=str,
dest="status",
action='store',
default=argparse.SUPPRESS,
required=False,
help="Name of the product to get "
"the database status for. "
"Use 'all' to list the database "
"statuses for all of the products.")
database_mgmnt.add_argument('--db-upgrade-schema',
type=str,
dest='product_to_upgrade',
action='store',
default=argparse.SUPPRESS,
required=False,
help="Name of the product to upgrade to the "
"latest database schema available in "
"the package. Use 'all' to upgrade all "
"of the products."
"NOTE: Before migration it is advised"
" to create a full backup of "
"the product databases.")
logger.add_verbose_arguments(parser)
def __handle(args):
"""Custom handler for 'server' so custom error messages can be
printed without having to capture 'parser' in main."""
def arg_match(options):
return util.arg_match(options, sys.argv[1:])
# See if there is a "PostgreSQL argument" specified in the invocation
# without '--postgresql' being there. There is no way to distinguish
# a default argument and a deliberately specified argument without
# inspecting sys.argv.
options = ['--dbaddress', '--dbport', '--dbusername', '--dbname',
'--db-host', '--db-port', '--db-username', '--db-name']
psql_args_matching = arg_match(options)
if any(psql_args_matching) and\
'postgresql' not in args:
first_matching_arg = next(iter([match for match
in psql_args_matching]))
parser.error("argument {0}: not allowed without "
"argument --postgresql".format(first_matching_arg))
# parser.error() terminates with return code 2.
# --not-host-only is a "shortcut", actually a to-be-deprecated
# call which means '--host ""'.
# TODO: Actually deprecate --not-host-only later on.
options = ['--not-host-only', '--host']
if set(arg_match(options)) == set(options):
parser.error("argument --not-host-only: not allowed with "
"argument --host, as it is a shortcut to --host "
"\"\"")
else:
# Apply the shortcut.
if len(arg_match(['--not-host-only'])) > 0:
args.listen_address = "" # Listen on every interface.
# --not-host-only is just a shortcut optstring, no actual use
# is intended later on.
delattr(args, 'not_host_only')
# --workspace and --sqlite cannot be specified either, as
# both point to a database location.
options = ['--sqlite', '--workspace']
options_short = ['--sqlite', '-w']
if set(arg_match(options)) == set(options) or \
set(arg_match(options_short)) == set(options_short):
parser.error("argument --sqlite: not allowed with "
"argument --workspace")
# --workspace and --config-directory also aren't allowed together now,
# the latter one is expected to replace the earlier.
options = ['--config-directory', '--workspace']
options_short = ['--config-directory', '-w']
if set(arg_match(options)) == set(options) or \
set(arg_match(options_short)) == set(options_short):
parser.error("argument --config-directory: not allowed with "
"argument --workspace")
# If workspace is specified, sqlite is workspace/config.sqlite
# and config_directory is the workspace directory.
if len(arg_match(['--workspace', '-w'])) > 0:
args.config_directory = args.workspace
args.sqlite = os.path.join(args.workspace,
'config.sqlite')
setattr(args, 'dbdatadir', os.path.join(args.workspace,
'pgsql_data'))
# Workspace should not exist as a Namespace key.
delattr(args, 'workspace')
if '<CONFIG_DIRECTORY>' in args.sqlite:
# Replace the placeholder variable with the actual value.
args.sqlite = args.sqlite.replace('<CONFIG_DIRECTORY>',
args.config_directory)
if 'postgresql' not in args:
# Later called database modules need the argument to be actually
# present, even though the default is suppressed in the optstring.
setattr(args, 'postgresql', False)
# This is not needed by the database starter as we are
# running SQLite.
if 'dbdatadir' in args:
delattr(args, 'dbdatadir')
else:
# If --postgresql is given, --sqlite is useless.
delattr(args, 'sqlite')
# If everything is fine, do call the handler for the subcommand.
main(args)
parser.set_defaults(func=__handle)
def print_prod_status(prod_status):
"""
Print the database statuses for each of the products.
"""
header = ['Product endpoint', 'Database status',
'Database location',
'Schema version in the database',
'Schema version in the package']
rows = []
for k, v in prod_status.items():
db_status, schema_ver, package_ver, db_location = v
db_status_msg = database_status.db_status_msg.get(db_status)
if schema_ver == package_ver:
schema_ver += " (up to date)"
rows.append([k, db_status_msg, db_location, schema_ver, package_ver])
prod_status = output_formatters.twodim_to_str('table',
header,
rows,
sort_by_column_number=0)
LOG.info('Status of products:\n{0}'.format(prod_status))
def get_schema_version_from_package(migration_root):
"""
Returns the latest schema version in the package.
"""
cfg = config.Config()
cfg.set_main_option("script_location", migration_root)
pckg_schema_ver = script.ScriptDirectory.from_config(cfg)
return pckg_schema_ver.get_current_head()
def check_product_db_status(cfg_sql_server, context):
"""
Check the products for database statuses.
:returns: dictionary of product endpoints with database statuses
"""
migration_root = context.run_migration_root
engine = cfg_sql_server.create_engine()
config_session = sessionmaker(bind=engine)
sess = config_session()
try:
products = sess.query(ORMProduct).all()
except Exception as ex:
LOG.debug(ex)
LOG.error("Failed to get product configurations from the database.")
LOG.error("Please check your command arguments.")
sys.exit(1)
package_schema = get_schema_version_from_package(migration_root)
db_errors = [DBStatus.FAILED_TO_CONNECT,
DBStatus.MISSING,
DBStatus.SCHEMA_INIT_ERROR,
DBStatus.SCHEMA_MISSING]
cc_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
prod_status = {}
for pd in products:
db = database.SQLServer.from_connection_string(pd.connection,
RUN_META,
migration_root,
interactive=False,
env=cc_env)
db_location = db.get_db_location()
ret = db.connect()
s_ver = db.get_schema_version()
if s_ver in db_errors:
s_ver = None
prod_status[pd.endpoint] = (ret, s_ver, package_schema, db_location)
sess.commit()
sess.close()
engine.dispose()
return prod_status
def __db_status_check(cfg_sql_server, context, product_name=None):
"""
Check and print database statuses for the given product.
"""
if not product_name:
return 0
LOG.debug("Checking database status for " + product_name +
" product.")
prod_statuses = check_product_db_status(cfg_sql_server, context)
if product_name != 'all':
avail = prod_statuses.get(product_name)
if not avail:
LOG.error("No product was found with this endpoint: " +
str(product_name))
return 1
prod_statuses = {k: v for k, v in prod_statuses.items()
if k == product_name}
print_prod_status(prod_statuses)
return 0
def __db_migration(cfg_sql_server, context, product_to_upgrade='all'):
"""
Handle database management.
Schema checking and migration.
"""
LOG.info("Preparing schema upgrade for " + str(product_to_upgrade))
product_name = product_to_upgrade
prod_statuses = check_product_db_status(cfg_sql_server, context)
prod_to_upgrade = []
if product_name != 'all':
avail = prod_statuses.get(product_name)
if not avail:
LOG.error("No product was found with this endpoint: " +
product_name)
return 1
prod_to_upgrade.append(product_name)
else:
prod_to_upgrade = list(prod_statuses.keys())
migration_root = context.run_migration_root
LOG.warning("Please note after migration only "
"newer CodeChecker versions can be used "
"to start the server")
LOG.warning("It is advised to make a full backup of your "
"run databases.")
cc_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
for prod in prod_to_upgrade:
LOG.info("========================")
LOG.info("Checking: " + prod)
engine = cfg_sql_server.create_engine()
config_session = sessionmaker(bind=engine)
sess = config_session()
product = sess.query(ORMProduct).filter(
ORMProduct.endpoint == prod).first()
db = database.SQLServer.from_connection_string(product.connection,
RUN_META,
migration_root,
interactive=False,
env=cc_env)
db_status = db.connect()
msg = database_status.db_status_msg.get(db_status,
'Unknown database status')
LOG.info(msg)
if db_status == DBStatus.SCHEMA_MISSING:
question = 'Do you want to initialize a new schema for ' \
+ product.endpoint + '? Y(es)/n(o) '
if util.get_user_input(question):
ret = db.connect(init=True)
msg = database_status.db_status_msg.get(
ret, 'Unknown database status')
else:
LOG.info("No schema initialization was done.")
elif db_status == DBStatus.SCHEMA_MISMATCH_OK:
question = 'Do you want to upgrade to new schema for ' \
+ product.endpoint + '? Y(es)/n(o) '
if util.get_user_input(question):
LOG.info("Upgrading schema ...")
ret = db.upgrade()
LOG.info("Done.")
msg = database_status.db_status_msg.get(
ret, 'Unknown database status')
else:
LOG.info("No schema migration was done.")
sess.commit()
sess.close()
engine.dispose()
LOG.info("========================")
return 0
def __instance_management(args):
"""Handles the instance-manager commands --list/--stop/--stop-all."""
# TODO: The server stopping and listing must be revised on its invocation
# once "workspace", as a concept, is removed.
# QUESTION: What is the bestest way here to identify a server for the user?
if 'list' in args:
instances = instance_manager.get_instances()
instances_on_multiple_hosts = any(True for inst in instances
if inst['hostname'] !=
socket.gethostname())
if not instances_on_multiple_hosts:
head = ['Workspace', 'View port']
else:
head = ['Workspace', 'Computer host', 'View port']
rows = []
for instance in instance_manager.get_instances():
if not instances_on_multiple_hosts:
rows.append((instance['workspace'], str(instance['port'])))
else:
rows.append((instance['workspace'],
instance['hostname']
if instance['hostname'] != socket.gethostname()
else '',
str(instance['port'])))
print("Your running CodeChecker servers:")
print(output_formatters.twodim_to_str('table', head, rows))
elif 'stop' in args or 'stop_all' in args:
for i in instance_manager.get_instances():
if i['hostname'] != socket.gethostname():
continue
# A STOP only stops the server associated with the given workspace
# and view-port.
if 'stop' in args and \
not (i['port'] == args.view_port and
os.path.abspath(i['workspace']) ==
os.path.abspath(args.config_directory)):
continue
try:
util.kill_process_tree(i['pid'])
LOG.info("Stopped CodeChecker server running on port {0} "
"in workspace {1} (PID: {2})".
format(i['port'], i['workspace'], i['pid']))
except Exception:
# Let the exception come out if the commands fail
LOG.error("Couldn't stop process PID #" + str(i['pid']))
raise
def server_init_start(args):
"""
Start or manage a CodeChecker report server.
"""
if 'list' in args or 'stop' in args or 'stop_all' in args:
__instance_management(args)
sys.exit(0)
# Actual server starting from this point.
if not host_check.check_zlib():
raise Exception("zlib is not available on the system!")
# WARNING
# In case of SQLite args.dbaddress default value is used
# for which the is_localhost should return true.
if util.is_localhost(args.dbaddress) and \
not os.path.exists(args.config_directory):
os.makedirs(args.config_directory)
# Make sure the SQLite file can be created if it not exists.
if 'sqlite' in args and \
not os.path.isdir(os.path.dirname(args.sqlite)):
os.makedirs(os.path.dirname(args.sqlite))
if 'reset_root' in args:
try:
os.remove(os.path.join(args.config_directory, 'root.user'))
LOG.info("Master superuser (root) credentials invalidated and "
"deleted. New ones will be generated...")
except OSError:
# File doesn't exist.
pass
if 'force_auth' in args:
LOG.info("'--force-authentication' was passed as a command-line "
"option. The server will ask for users to authenticate!")
context = generic_package_context.get_context()
context.codechecker_workspace = args.config_directory
context.db_username = args.dbusername
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
cfg_sql_server = database.SQLServer.from_cmdline_args(
vars(args), CONFIG_META, context.config_migration_root,
interactive=True, env=check_env)
LOG.info("Checking configuration database ...")
db_status = cfg_sql_server.connect()
db_status_msg = database_status.db_status_msg.get(db_status)
LOG.info(db_status_msg)
if db_status == DBStatus.SCHEMA_MISSING:
LOG.debug("Config database schema is missing, initializing new.")
db_status = cfg_sql_server.connect(init=True)
if db_status != DBStatus.OK:
LOG.error("Config database initialization failed!")
LOG.error("Please check debug logs.")
sys.exit(1)
if db_status == DBStatus.SCHEMA_MISMATCH_NO:
LOG.debug("Configuration database schema mismatch.")
LOG.debug("No schema upgrade is possible.")
sys.exit(1)
if db_status == DBStatus.SCHEMA_MISMATCH_OK:
LOG.debug("Configuration database schema mismatch.")
LOG.debug("Schema upgrade is possible.")
LOG.warning("Please note after migration only "
"newer CodeChecker versions can be used"
"to start the server")
LOG.warning("It is advised to make a full backup of your "
"configuration database")
LOG.warning(cfg_sql_server.get_db_location())
question = 'Do you want to upgrade to the new schema?' \
' Y(es)/n(o) '
if util.get_user_input(question):
print("Upgrading schema ...")
ret = cfg_sql_server.upgrade()
msg = database_status.db_status_msg.get(
ret, 'Unknown database status')
print(msg)
if ret != DBStatus.OK:
LOG.error("Schema migration failed")
syst.exit(ret)
else:
LOG.info("No schema migration was done.")
sys.exit(0)
if db_status == DBStatus.MISSING:
LOG.error("Missing configuration database.")
LOG.error("Server can not be started.")
sys.exit(1)
# Configuration database setup and check is needed before database
# statuses can be checked.
try:
if args.status:
ret = __db_status_check(cfg_sql_server, context, args.status)
sys.exit(ret)
except AttributeError:
LOG.debug('Status was not in the arguments.')
try:
if args.product_to_upgrade:
ret = __db_migration(cfg_sql_server, context,
args.product_to_upgrade)
sys.exit(ret)
except AttributeError:
LOG.debug('Product upgrade was not in the arguments.')
# Create the main database link from the arguments passed over the
# command line.
default_product_path = os.path.join(args.config_directory,
'Default.sqlite')
create_default_product = 'sqlite' in args and \
not os.path.exists(default_product_path)
if create_default_product:
# Create a default product and add it to the configuration database.
LOG.debug("Create default product...")
LOG.debug("Configuring schema and migration...")
prod_server = database.SQLiteDatabase(
default_product_path, RUN_META,
context.run_migration_root, check_env)
LOG.debug("Checking 'Default' product database.")
db_status = prod_server.connect()
if db_status != DBStatus.MISSING:
db_status = prod_server.connect(init=True)
LOG.error(database_status.db_status_msg.get(db_status))
if db_status != DBStatus.OK:
LOG.error("Failed to configure default product")
sys.exit(1)
product_conn_string = prod_server.get_connection_string()
server.add_initial_run_database(
cfg_sql_server, product_conn_string)
LOG.info("Product 'Default' at '{0}' created and set up."
.format(default_product_path))
prod_statuses = check_product_db_status(cfg_sql_server, context)
upgrade_available = {}
for k, v in prod_statuses.items():
db_status, _, _, _ = v
if db_status == DBStatus.SCHEMA_MISMATCH_OK or \
db_status == DBStatus.SCHEMA_MISSING:
upgrade_available[k] = v
if upgrade_available:
print_prod_status(prod_statuses)
LOG.warning("Multiple products can be upgraded, make a backup!")
__db_migration(cfg_sql_server, context)
prod_statuses = check_product_db_status(cfg_sql_server, context)
print_prod_status(prod_statuses)
non_ok_db = False
for k, v in prod_statuses.items():
db_status, _, _, _ = v
if db_status != DBStatus.OK:
non_ok_db = True
break
if non_ok_db:
msg = "There are some database issues. " \
"Do you want to start the " \
"server? Y(es)/n(o) "
if not util.get_user_input(msg):
sys.exit(1)
# Start database viewer.
checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
checker_md_docs_map = os.path.join(checker_md_docs,
'checker_doc_map.json')
with open(checker_md_docs_map, 'r') as dFile:
checker_md_docs_map = json.load(dFile)
package_data = {'www_root': context.www_root,
'doc_root': context.doc_root,
'checker_md_docs': checker_md_docs,
'checker_md_docs_map': checker_md_docs_map,
'version': context.package_git_tag}
suppress_handler = generic_package_suppress_handler. \
GenericSuppressHandler(None, False)
try:
server.start_server(args.config_directory,
package_data,
args.view_port,
cfg_sql_server,
suppress_handler,
args.listen_address,
'force_auth' in args,
args.skip_db_cleanup,
context,
check_env)
except socket.error as err:
if err.errno == errno.EADDRINUSE:
LOG.error("Server can't be started, maybe the given port number "
"({}) is already used. Check the connection "
"parameters.".format(args.view_port))
sys.exit(1)
else:
raise
def main(args):
"""
Setup a logger server based on the configuration and
manage the CodeChecker server.
"""
with logger.LOG_CFG_SERVER(args.verbose if 'verbose' in args else None):
server_init_start(args)
| 1 | 9,287 | I think this is not the best way to handle this problem. This way every relative path will be converted to absolute path. What if the user would like to move his config directory to a different directory? | Ericsson-codechecker | c |
@@ -169,6 +169,10 @@ type NetworkSpec struct {
// Subnets configuration.
// +optional
Subnets Subnets `json:"subnets,omitempty"`
+
+ // ControlPlaneLoadBalancerScheme (defaults to Internet-facing)
+ // +optional
+ ControlPlaneLoadBalancerScheme ClassicELBScheme `json:"controlPlaneLoadBalancerScheme,omitempty""`
}
// APIEndpoint represents a reachable Kubernetes API endpoint. | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"fmt"
"sort"
"time"
)
// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
// a validation error.
type AWSResourceReference struct {
// ID of resource
// +optional
ID *string `json:"id,omitempty"`
// ARN of resource
// +optional
ARN *string `json:"arn,omitempty"`
// Filters is a set of key/value pairs used to identify a resource
// They are applied according to the rules defined by the AWS API:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
// +optional
Filters []Filter `json:"filters,omitempty"`
}
// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template
type AWSMachineTemplateResource struct {
// Spec is the specification of the desired behavior of the machine.
Spec AWSMachineSpec `json:"spec"`
}
// Filter is a filter used to identify an AWS resource
type Filter struct {
// Name of the filter. Filter names are case-sensitive.
Name string `json:"name"`
// Values includes one or more filter values. Filter values are case-sensitive.
Values []string `json:"values"`
}
// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type
type AWSMachineProviderConditionType string
// Valid conditions for an AWS machine instance
const (
// MachineCreated indicates whether the machine has been created or not. If not,
// it should include a reason and message for the failure.
MachineCreated AWSMachineProviderConditionType = "MachineCreated"
)
// Network encapsulates AWS networking resources.
type Network struct {
// SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"`
// APIServerELB is the Kubernetes api server classic load balancer.
APIServerELB ClassicELB `json:"apiServerElb,omitempty"`
}
// ClassicELBScheme defines the scheme of a classic load balancer.
type ClassicELBScheme string
var (
// ClassicELBSchemeInternetFacing defines an internet-facing, publicly
// accessible AWS Classic ELB scheme
ClassicELBSchemeInternetFacing = ClassicELBScheme("Internet-facing")
// ClassicELBSchemeInternal defines an internal-only facing
// load balancer internal to an ELB.
ClassicELBSchemeInternal = ClassicELBScheme("internal")
)
// ClassicELBProtocol defines listener protocols for a classic load balancer.
type ClassicELBProtocol string
var (
// ClassicELBProtocolTCP defines the ELB API string representing the TCP protocol
ClassicELBProtocolTCP = ClassicELBProtocol("TCP")
// ClassicELBProtocolSSL defines the ELB API string representing the TLS protocol
ClassicELBProtocolSSL = ClassicELBProtocol("SSL")
// ClassicELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7
ClassicELBProtocolHTTP = ClassicELBProtocol("HTTP")
// ClassicELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7
ClassicELBProtocolHTTPS = ClassicELBProtocol("HTTPS")
)
// ClassicELB defines an AWS classic load balancer.
type ClassicELB struct {
// The name of the load balancer. It must be unique within the set of load balancers
// defined in the region. It also serves as identifier.
Name string `json:"name,omitempty"`
// DNSName is the dns name of the load balancer.
DNSName string `json:"dnsName,omitempty"`
// Scheme is the load balancer scheme, either internet-facing or private.
Scheme ClassicELBScheme `json:"scheme,omitempty"`
// SubnetIDs is an array of subnets in the VPC attached to the load balancer.
SubnetIDs []string `json:"subnetIds,omitempty"`
// SecurityGroupIDs is an array of security groups assigned to the load balancer.
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
// Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
Listeners []*ClassicELBListener `json:"listeners,omitempty"`
// HealthCheck is the classic elb health check associated with the load balancer.
HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"`
// Attributes defines extra attributes associated with the load balancer.
Attributes ClassicELBAttributes `json:"attributes,omitempty"`
// Tags is a map of tags associated with the load balancer.
Tags map[string]string `json:"tags,omitempty"`
}
// ClassicELBAttributes defines extra attributes associated with a classic load balancer.
type ClassicELBAttributes struct {
// IdleTimeout is time that the connection is allowed to be idle (no data
// has been sent over the connection) before it is closed by the load balancer.
IdleTimeout time.Duration `json:"idleTimeout,omitempty"`
}
// ClassicELBListener defines an AWS classic load balancer listener.
type ClassicELBListener struct {
Protocol ClassicELBProtocol `json:"protocol"`
Port int64 `json:"port"`
InstanceProtocol ClassicELBProtocol `json:"instanceProtocol"`
InstancePort int64 `json:"instancePort"`
}
// ClassicELBHealthCheck defines an AWS classic load balancer health check.
type ClassicELBHealthCheck struct {
Target string `json:"target"`
Interval time.Duration `json:"interval"`
Timeout time.Duration `json:"timeout"`
HealthyThreshold int64 `json:"healthyThreshold"`
UnhealthyThreshold int64 `json:"unhealthyThreshold"`
}
// NetworkSpec encapsulates all things related to AWS network.
type NetworkSpec struct {
// VPC configuration.
// +optional
VPC VPCSpec `json:"vpc,omitempty"`
// Subnets configuration.
// +optional
Subnets Subnets `json:"subnets,omitempty"`
}
// APIEndpoint represents a reachable Kubernetes API endpoint.
type APIEndpoint struct {
// The hostname on which the API server is serving.
Host string `json:"host"`
// The port on which the API server is serving.
Port int `json:"port"`
}
// VPCSpec configures an AWS VPC.
type VPCSpec struct {
// ID is the vpc-id of the VPC this provider should use to create resources.
ID string `json:"id,omitempty"`
// CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
// Defaults to 10.0.0.0/16.
CidrBlock string `json:"cidrBlock,omitempty"`
// InternetGatewayID is the id of the internet gateway associated with the VPC.
// +optional
InternetGatewayID *string `json:"internetGatewayId,omitempty"`
// Tags is a collection of tags describing the resource.
Tags Tags `json:"tags,omitempty"`
}
// String returns a string representation of the VPC.
func (v *VPCSpec) String() string {
return fmt.Sprintf("id=%s", v.ID)
}
// IsUnmanaged returns true if the VPC is unmanaged.
func (v *VPCSpec) IsUnmanaged(clusterName string) bool {
return v.ID != "" && !v.Tags.HasOwned(clusterName)
}
// SubnetSpec configures an AWS Subnet.
type SubnetSpec struct {
// ID defines a unique identifier to reference this resource.
ID string `json:"id,omitempty"`
// CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
CidrBlock string `json:"cidrBlock,omitempty"`
// AvailabilityZone defines the availability zone to use for this subnet in the cluster's region.
AvailabilityZone string `json:"availabilityZone,omitempty"`
// IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
// +optional
IsPublic bool `json:"isPublic"`
// RouteTableID is the routing table id associated with the subnet.
// +optional
RouteTableID *string `json:"routeTableId"`
// NatGatewayID is the NAT gateway id associated with the subnet.
// Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
// +optional
NatGatewayID *string `json:"natGatewayId,omitempty"`
// Tags is a collection of tags describing the resource.
Tags Tags `json:"tags,omitempty"`
}
// String returns a string representation of the subnet.
func (s *SubnetSpec) String() string {
return fmt.Sprintf("id=%s/az=%s/public=%v", s.ID, s.AvailabilityZone, s.IsPublic)
}
// Subnets is a slice of Subnet.
type Subnets []*SubnetSpec
// ToMap returns a map from id to subnet.
func (s Subnets) ToMap() map[string]*SubnetSpec {
res := make(map[string]*SubnetSpec)
for _, x := range s {
res[x.ID] = x
}
return res
}
// FindByID returns a single subnet matching the given id or nil.
func (s Subnets) FindByID(id string) *SubnetSpec {
for _, x := range s {
if x.ID == id {
return x
}
}
return nil
}
// FilterPrivate returns a slice containing all subnets marked as private.
func (s Subnets) FilterPrivate() (res Subnets) {
for _, x := range s {
if !x.IsPublic {
res = append(res, x)
}
}
return
}
// FilterPublic returns a slice containing all subnets marked as public.
func (s Subnets) FilterPublic() (res Subnets) {
for _, x := range s {
if x.IsPublic {
res = append(res, x)
}
}
return
}
// FilterByZone returns a slice containing all subnets that live in the availability zone specified.
func (s Subnets) FilterByZone(zone string) (res Subnets) {
for _, x := range s {
if x.AvailabilityZone == zone {
res = append(res, x)
}
}
return
}
// RouteTable defines an AWS routing table.
type RouteTable struct {
ID string `json:"id"`
}
// SecurityGroupRole defines the unique role of a security group.
type SecurityGroupRole string
var (
// SecurityGroupBastion defines an SSH bastion role
SecurityGroupBastion = SecurityGroupRole("bastion")
// SecurityGroupNode defines a Kubernetes workload node role
SecurityGroupNode = SecurityGroupRole("node")
// SecurityGroupControlPlane defines a Kubernetes control plane node role
SecurityGroupControlPlane = SecurityGroupRole("controlplane")
// SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules
SecurityGroupLB = SecurityGroupRole("lb")
)
// SecurityGroup defines an AWS security group.
type SecurityGroup struct {
// ID is a unique identifier.
ID string `json:"id"`
// Name is the security group name.
Name string `json:"name"`
// IngressRules is the inbound rules associated with the security group.
// +optional
IngressRules IngressRules `json:"ingressRule"`
// Tags is a map of tags associated with the security group.
Tags Tags `json:"tags,omitempty"`
}
// String returns a string representation of the security group.
func (s *SecurityGroup) String() string {
return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name)
}
// SecurityGroupProtocol defines the protocol type for a security group rule.
type SecurityGroupProtocol string
var (
// SecurityGroupProtocolAll is a wildcard for all IP protocols
SecurityGroupProtocolAll = SecurityGroupProtocol("-1")
// SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules
SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4")
// SecurityGroupProtocolTCP represents the TCP protocol in ingress rules
SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp")
// SecurityGroupProtocolUDP represents the UDP protocol in ingress rules
SecurityGroupProtocolUDP = SecurityGroupProtocol("udp")
// SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules
SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp")
// SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules
SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58")
)
// IngressRule defines an AWS ingress rule for security groups.
type IngressRule struct {
Description string `json:"description"`
Protocol SecurityGroupProtocol `json:"protocol"`
FromPort int64 `json:"fromPort"`
ToPort int64 `json:"toPort"`
// List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
// +optional
CidrBlocks []string `json:"cidrBlocks"`
// The security group id to allow access from. Cannot be specified with CidrBlocks.
// +optional
SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds"`
}
// String returns a string representation of the ingress rule.
func (i *IngressRule) String() string {
return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description)
}
// IngressRules is a slice of AWS ingress rules for security groups.
type IngressRules []*IngressRule
// Difference returns the difference between this slice and the other slice.
func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
for _, x := range i {
found := false
for _, y := range o {
if x.Equals(y) {
found = true
break
}
}
if !found {
out = append(out, x)
}
}
return
}
// Equals returns true if two IngressRule are equal
func (i *IngressRule) Equals(o *IngressRule) bool {
if len(i.CidrBlocks) != len(o.CidrBlocks) {
return false
}
sort.Strings(i.CidrBlocks)
sort.Strings(o.CidrBlocks)
for i, v := range i.CidrBlocks {
if v != o.CidrBlocks[i] {
return false
}
}
if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
return false
}
sort.Strings(i.SourceSecurityGroupIDs)
sort.Strings(o.SourceSecurityGroupIDs)
for i, v := range i.SourceSecurityGroupIDs {
if v != o.SourceSecurityGroupIDs[i] {
return false
}
}
if i.Description != o.Description || i.Protocol != o.Protocol {
return false
}
// AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but
// we avoid serializing it out for clarity's sake.
// See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
switch i.Protocol {
case SecurityGroupProtocolTCP,
SecurityGroupProtocolUDP,
SecurityGroupProtocolICMP,
SecurityGroupProtocolICMPv6:
return i.FromPort == o.FromPort && i.ToPort == o.ToPort
}
return true
}
// InstanceState describes the state of an AWS instance.
type InstanceState string
var (
// InstanceStatePending is the string representing an instance in a pending state
InstanceStatePending = InstanceState("pending")
// InstanceStateRunning is the string representing an instance in a pending state
InstanceStateRunning = InstanceState("running")
// InstanceStateShuttingDown is the string representing an instance shutting down
InstanceStateShuttingDown = InstanceState("shutting-down")
// InstanceStateTerminated is the string representing an instance that has been terminated
InstanceStateTerminated = InstanceState("terminated")
// InstanceStateStopping is the string representing an instance
// that is in the process of being stopped and can be restarted
InstanceStateStopping = InstanceState("stopping")
// InstanceStateStopped is the string representing an instance
// that has been stopped and can be restarted
InstanceStateStopped = InstanceState("stopped")
)
// Instance describes an AWS instance.
type Instance struct {
ID string `json:"id"`
// The current state of the instance.
State InstanceState `json:"instanceState,omitempty"`
// The instance type.
Type string `json:"type,omitempty"`
// The ID of the subnet of the instance.
SubnetID string `json:"subnetId,omitempty"`
// The ID of the AMI used to launch the instance.
ImageID string `json:"imageId,omitempty"`
// The name of the SSH key pair.
SSHKeyName *string `json:"sshKeyName,omitempty"`
// SecurityGroupIDs are one or more security group IDs this instance belongs to.
SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
// UserData is the raw data script passed to the instance which is run upon bootstrap.
// This field must not be base64 encoded and should only be used when running a new instance.
UserData *string `json:"userData,omitempty"`
// The name of the IAM instance profile associated with the instance, if applicable.
IAMProfile string `json:"iamProfile,omitempty"`
// The private IPv4 address assigned to the instance.
PrivateIP *string `json:"privateIp,omitempty"`
// The public IPv4 address assigned to the instance, if applicable.
PublicIP *string `json:"publicIp,omitempty"`
// Specifies whether enhanced networking with ENA is enabled.
ENASupport *bool `json:"enaSupport,omitempty"`
// Indicates whether the instance is optimized for Amazon EBS I/O.
EBSOptimized *bool `json:"ebsOptimized,omitempty"`
// Specifies size (in Gi) of the root storage device
RootDeviceSize int64 `json:"rootDeviceSize,omitempty"`
// Specifies ENIs attached to instance
NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
// The tags associated with the instance.
Tags map[string]string `json:"tags,omitempty"`
}
| 1 | 11,317 | Let's make this a pointer given that's optional | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -4,6 +4,7 @@
* Module dependencies.
*/
+var CircularJSON = require('circular-json');
var Stream = require('stream').Stream;
var utils = require('./utils');
var helpers = require('./queryhelpers'); | 1 | /* eslint no-empty: 1 */
/*!
* Module dependencies.
*/
var Stream = require('stream').Stream;
var utils = require('./utils');
var helpers = require('./queryhelpers');
var K = function(k) {
return k;
};
/**
* Provides a Node.js 0.8 style [ReadStream](http://nodejs.org/docs/v0.8.21/api/stream.html#stream_readable_stream) interface for Queries.
*
* var stream = Model.find().stream();
*
* stream.on('data', function (doc) {
* // do something with the mongoose document
* }).on('error', function (err) {
* // handle the error
* }).on('close', function () {
* // the stream is closed
* });
*
*
* The stream interface allows us to simply "plug-in" to other _Node.js 0.8_ style write streams.
*
* Model.where('created').gte(twoWeeksAgo).stream().pipe(writeStream);
*
* ####Valid options
*
* - `transform`: optional function which accepts a mongoose document. The return value of the function will be emitted on `data`.
*
* ####Example
*
* // JSON.stringify all documents before emitting
* var stream = Thing.find().stream({ transform: JSON.stringify });
* stream.pipe(writeStream);
*
* _NOTE: plugging into an HTTP response will *not* work out of the box. Those streams expect only strings or buffers to be emitted, so first formatting our documents as strings/buffers is necessary._
*
* _NOTE: these streams are Node.js 0.8 style read streams which differ from Node.js 0.10 style. Node.js 0.10 streams are not well tested yet and are not guaranteed to work._
*
* @param {Query} query
* @param {Object} [options]
* @inherits NodeJS Stream http://nodejs.org/docs/v0.8.21/api/stream.html#stream_readable_stream
* @event `data`: emits a single Mongoose document
* @event `error`: emits when an error occurs during streaming. This will emit _before_ the `close` event.
* @event `close`: emits when the stream reaches the end of the cursor or an error occurs, or the stream is manually `destroy`ed. After this event, no more events are emitted.
* @api public
*/
function QueryStream(query, options) {
Stream.call(this);
this.query = query;
this.readable = true;
this.paused = false;
this._cursor = null;
this._destroyed = null;
this._fields = null;
this._buffer = null;
this._inline = T_INIT;
this._running = false;
this._transform = options && typeof options.transform === 'function'
? options.transform
: K;
// give time to hook up events
var _this = this;
process.nextTick(function() {
_this._init();
});
}
/*!
* Inherit from Stream
*/
QueryStream.prototype.__proto__ = Stream.prototype;
/**
* Flag stating whether or not this stream is readable.
*
* @property readable
* @api public
*/
QueryStream.prototype.readable;
/**
* Flag stating whether or not this stream is paused.
*
* @property paused
* @api public
*/
QueryStream.prototype.paused;
// trampoline flags
var T_INIT = 0;
var T_IDLE = 1;
var T_CONT = 2;
/**
* Initializes the query.
*
* @api private
*/
QueryStream.prototype._init = function() {
if (this._destroyed) {
return;
}
var query = this.query,
model = query.model,
options = query._optionsForExec(model),
_this = this;
try {
query.cast(model);
} catch (err) {
return _this.destroy(err);
}
_this._fields = utils.clone(query._fields);
options.fields = query._castFields(_this._fields);
model.collection.find(query._conditions, options, function(err, cursor) {
if (err) {
return _this.destroy(err);
}
_this._cursor = cursor;
_this._next();
});
};
/**
* Trampoline for pulling the next doc from cursor.
*
* @see QueryStream#__next #querystream_QueryStream-__next
* @api private
*/
QueryStream.prototype._next = function _next() {
if (this.paused || this._destroyed) {
this._running = false;
return this._running;
}
this._running = true;
if (this._buffer && this._buffer.length) {
var arg;
while (!this.paused && !this._destroyed && (arg = this._buffer.shift())) { // eslint-disable-line no-cond-assign
this._onNextObject.apply(this, arg);
}
}
// avoid stack overflows with large result sets.
// trampoline instead of recursion.
while (this.__next()) {
}
};
/**
* Pulls the next doc from the cursor.
*
* @see QueryStream#_next #querystream_QueryStream-_next
* @api private
*/
QueryStream.prototype.__next = function() {
if (this.paused || this._destroyed) {
this._running = false;
return this._running;
}
var _this = this;
_this._inline = T_INIT;
_this._cursor.nextObject(function cursorcb(err, doc) {
_this._onNextObject(err, doc);
});
// if onNextObject() was already called in this tick
// return ourselves to the trampoline.
if (T_CONT === this._inline) {
return true;
}
// onNextObject() hasn't fired yet. tell onNextObject
// that its ok to call _next b/c we are not within
// the trampoline anymore.
this._inline = T_IDLE;
};
/**
* Transforms raw `doc`s returned from the cursor into a model instance.
*
* @param {Error|null} err
* @param {Object} doc
* @api private
*/
QueryStream.prototype._onNextObject = function _onNextObject(err, doc) {
if (this._destroyed) {
return;
}
if (this.paused) {
this._buffer || (this._buffer = []);
this._buffer.push([err, doc]);
this._running = false;
return this._running;
}
if (err) {
return this.destroy(err);
}
// when doc is null we hit the end of the cursor
if (!doc) {
this.emit('end');
return this.destroy();
}
var opts = this.query._mongooseOptions;
if (!opts.populate) {
return opts.lean === true ?
emit(this, doc) :
createAndEmit(this, null, doc);
}
var _this = this;
var pop = helpers.preparePopulationOptionsMQ(_this.query, _this.query._mongooseOptions);
// Hack to work around gh-3108
pop.forEach(function(option) {
delete option.model;
});
pop.__noPromise = true;
_this.query.model.populate(doc, pop, function(err, doc) {
if (err) {
return _this.destroy(err);
}
return opts.lean === true ?
emit(_this, doc) :
createAndEmit(_this, pop, doc);
});
};
function createAndEmit(self, populatedIds, doc) {
var instance = helpers.createModel(self.query.model, doc, self._fields);
var opts = populatedIds ?
{populated: populatedIds} :
undefined;
instance.init(doc, opts, function(err) {
if (err) {
return self.destroy(err);
}
emit(self, instance);
});
}
/*!
* Emit a data event and manage the trampoline state
*/
function emit(self, doc) {
self.emit('data', self._transform(doc));
// trampoline management
if (T_IDLE === self._inline) {
// no longer in trampoline. restart it.
self._next();
} else {
// in a trampoline. tell __next that its
// ok to continue jumping.
self._inline = T_CONT;
}
}
/**
* Pauses this stream.
*
* @api public
*/
QueryStream.prototype.pause = function() {
this.paused = true;
};
/**
* Resumes this stream.
*
* @api public
*/
QueryStream.prototype.resume = function() {
this.paused = false;
if (!this._cursor) {
// cannot start if not initialized
return;
}
// are we within the trampoline?
if (T_INIT === this._inline) {
return;
}
if (!this._running) {
// outside QueryStream control, need manual restart
return this._next();
}
};
/**
* Destroys the stream, closing the underlying cursor, which emits the close event. No more events will be emitted after the close event.
*
* @param {Error} [err]
* @api public
*/
QueryStream.prototype.destroy = function(err) {
if (this._destroyed) {
return;
}
this._destroyed = true;
this._running = false;
this.readable = false;
if (this._cursor) {
this._cursor.close();
}
if (err) {
this.emit('error', err);
}
this.emit('close');
};
/**
* Pipes this query stream into another stream. This method is inherited from NodeJS Streams.
*
* ####Example:
*
* query.stream().pipe(writeStream [, options])
*
* @method pipe
* @memberOf QueryStream
* @see NodeJS http://nodejs.org/api/stream.html
* @api public
*/
/*!
* Module exports
*/
module.exports = exports = QueryStream;
| 1 | 13,579 | Why is CircularJSON imported here? It is never used in this file. | Automattic-mongoose | js |
@@ -80,7 +80,11 @@ namespace AutoRest.Swagger
{
// Look for semantic errors and warnings in the document.
var validator = new RecursiveObjectValidator(PropertyNameResolver.JsonName);
- foreach (var validationEx in validator.GetValidationExceptions(ServiceDefinition.FilePath, ServiceDefinition))
+ foreach (var validationEx in validator.GetValidationExceptions(ServiceDefinition.FilePath, ServiceDefinition, new ServiceDefinitionMetadata
+ { // LEGACY MODE! set defaults for the metadata, the validator doesn't care from this codepath
+ OpenApiDocumentType = ServiceDefinitionDocumentType.Default,
+ MergeState = ServiceDefinitionMergeState.After
+ }))
{
Logger.Instance.Log(validationEx);
} | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using AutoRest.Core;
using AutoRest.Core.Model;
using AutoRest.Core.Logging;
using AutoRest.Core.Utilities;
using AutoRest.Core.Utilities.Collections;
using AutoRest.Swagger.Model;
using AutoRest.Swagger.Properties;
using ParameterLocation = AutoRest.Swagger.Model.ParameterLocation;
using static AutoRest.Core.Utilities.DependencyInjection;
using AutoRest.Swagger.Validation.Core;
namespace AutoRest.Swagger
{
public class SwaggerModeler : Modeler
{
private const string BaseUriParameterName = "BaseUri";
internal Dictionary<string, string> ExtendedTypes = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
internal Dictionary<string, CompositeType> GeneratedTypes = new Dictionary<string, CompositeType>();
internal Dictionary<Schema, CompositeType> GeneratingTypes = new Dictionary<Schema, CompositeType>();
public SwaggerModeler()
{
if (Settings.Instance == null)
{
throw new ArgumentNullException("settings");
}
DefaultProtocol = TransferProtocolScheme.Http;
}
public override string Name
{
get { return "Swagger"; }
}
/// <summary>
/// Swagger service model.
/// </summary>
public ServiceDefinition ServiceDefinition { get; set; }
/// <summary>
/// Client model.
/// </summary>
public CodeModel CodeModel { get; set; }
/// <summary>
/// Default protocol when no protocol is specified in the schema
/// </summary>
public TransferProtocolScheme DefaultProtocol { get; set; }
/// <summary>
/// Builds service model from swagger file.
/// </summary>
/// <returns></returns>
public override CodeModel Build()
{
Logger.Instance.Log(Category.Info, Resources.ParsingSwagger);
if (string.IsNullOrWhiteSpace(Settings.Input))
{
throw ErrorManager.CreateError(Resources.InputRequired);
}
var serviceDefinition = SwaggerParser.Load(Settings.Input, Settings.FileSystemInput);
return Build(serviceDefinition);
}
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Maintainability", "CA1506:AvoidExcessiveClassCoupling")]
public CodeModel Build(ServiceDefinition serviceDefinition)
{
ServiceDefinition = serviceDefinition;
if (Settings.Instance.CodeGenerator.EqualsIgnoreCase("None"))
{
// Look for semantic errors and warnings in the document.
var validator = new RecursiveObjectValidator(PropertyNameResolver.JsonName);
foreach (var validationEx in validator.GetValidationExceptions(ServiceDefinition.FilePath, ServiceDefinition))
{
Logger.Instance.Log(validationEx);
}
return New<CodeModel>();
}
Logger.Instance.Log(Category.Info, Resources.GeneratingClient);
// Update settings
UpdateSettings();
InitializeClientModel();
BuildCompositeTypes();
// Build client parameters
foreach (var swaggerParameter in ServiceDefinition.Parameters.Values)
{
var parameter = ((ParameterBuilder)swaggerParameter.GetBuilder(this)).Build();
var clientProperty = New<Property>();
clientProperty.LoadFrom(parameter);
clientProperty.RealPath = new string[] { parameter.SerializedName.Value };
CodeModel.Add(clientProperty);
}
var methods = new List<Method>();
// Build methods
foreach (var path in ServiceDefinition.Paths.Concat(ServiceDefinition.CustomPaths))
{
foreach (var verb in path.Value.Keys)
{
var operation = path.Value[verb];
if (string.IsNullOrWhiteSpace(operation.OperationId))
{
throw ErrorManager.CreateError(
string.Format(CultureInfo.InvariantCulture,
Resources.OperationIdMissing,
verb,
path.Key));
}
var methodName = GetMethodName(operation);
var methodGroup = GetMethodGroup(operation);
if (verb.ToHttpMethod() != HttpMethod.Options)
{
string url = path.Key;
if (url.Contains("?"))
{
url = url.Substring(0, url.IndexOf('?'));
}
var method = BuildMethod(verb.ToHttpMethod(), url, methodName, operation);
method.Group = methodGroup;
methods.Add(method);
if (method.DefaultResponse.Body is CompositeType)
{
CodeModel.AddError((CompositeType)method.DefaultResponse.Body);
}
}
else
{
Logger.Instance.Log(Category.Warning, Resources.OptionsNotSupported);
}
}
}
// Set base type
foreach (var typeName in GeneratedTypes.Keys)
{
var objectType = GeneratedTypes[typeName];
if (ExtendedTypes.ContainsKey(typeName))
{
objectType.BaseModelType = GeneratedTypes[ExtendedTypes[typeName]];
}
CodeModel.Add(objectType);
}
CodeModel.AddRange(methods);
return CodeModel;
}
/// <summary>
/// Copares two versions of the same service specification.
/// </summary>
/// <returns></returns>
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Maintainability", "CA1506:AvoidExcessiveClassCoupling")]
public override IEnumerable<LogMessage> Compare()
{
var settings = Settings.Instance;
Logger.Instance.Log(Category.Info, Resources.ParsingSwagger);
if (string.IsNullOrWhiteSpace(Settings.Input) || string.IsNullOrWhiteSpace(Settings.Previous))
{
throw ErrorManager.CreateError(Resources.InputRequired);
}
var oldDefintion = SwaggerParser.Load(settings.Previous, settings.FileSystemInput);
var newDefintion = SwaggerParser.Load(settings.Input, settings.FileSystemInput);
var context = new ComparisonContext(oldDefintion, newDefintion);
// Look for semantic errors and warnings in the new document.
var validator = new RecursiveObjectValidator(PropertyNameResolver.JsonName);
var LogMessages = validator.GetValidationExceptions(newDefintion.FilePath, newDefintion).ToList();
// Only compare versions if the new version is correct.
var comparisonMessages =
!LogMessages.Any(m => m.Severity > Category.Error) ?
newDefintion.Compare(context, oldDefintion) :
Enumerable.Empty<ComparisonMessage>();
return LogMessages
.Select(msg => new ComparisonMessage(new MessageTemplate { Id = 0, Message = msg.Message }, msg.Path, msg.Severity))
.Concat(comparisonMessages);
}
private void UpdateSettings()
{
if (ServiceDefinition.Info.CodeGenerationSettings != null)
{
foreach (var key in ServiceDefinition.Info.CodeGenerationSettings.Extensions.Keys)
{
//Don't overwrite settings that come in from the command line
if (!this.Settings.CustomSettings.ContainsKey(key))
this.Settings.CustomSettings[key] = ServiceDefinition.Info.CodeGenerationSettings.Extensions[key];
}
Settings.PopulateSettings(this.Settings, this.Settings.CustomSettings);
}
}
/// <summary>
/// Initialize the base service and populate global service properties
/// </summary>
/// <returns>The base ServiceModel Service</returns>
public virtual void InitializeClientModel()
{
if (string.IsNullOrEmpty(ServiceDefinition.Swagger))
{
throw ErrorManager.CreateError(Resources.UnknownSwaggerVersion);
}
if (ServiceDefinition.Info == null)
{
throw ErrorManager.CreateError(Resources.InfoSectionMissing);
}
CodeModel = New<CodeModel>();
if (string.IsNullOrWhiteSpace(Settings.ClientName) && ServiceDefinition.Info.Title == null)
{
throw ErrorManager.CreateError(Resources.TitleMissing);
}
CodeModel.Name = ServiceDefinition.Info.Title?.Replace(" ", "");
CodeModel.Namespace = Settings.Namespace;
CodeModel.ModelsName = Settings.ModelsName;
CodeModel.ApiVersion = ServiceDefinition.Info.Version;
CodeModel.Documentation = ServiceDefinition.Info.Description;
if (ServiceDefinition.Schemes == null || ServiceDefinition.Schemes.Count != 1)
{
ServiceDefinition.Schemes = new List<TransferProtocolScheme> { DefaultProtocol };
}
if (string.IsNullOrEmpty(ServiceDefinition.Host))
{
ServiceDefinition.Host = "localhost";
}
CodeModel.BaseUrl = string.Format(CultureInfo.InvariantCulture, "{0}://{1}{2}",
ServiceDefinition.Schemes[0].ToString().ToLower(),
ServiceDefinition.Host, ServiceDefinition.BasePath);
// Copy extensions
ServiceDefinition.Extensions.ForEach(extention => CodeModel.Extensions.AddOrSet(extention.Key, extention.Value));
}
/// <summary>
/// Build composite types from definitions
/// </summary>
public virtual void BuildCompositeTypes()
{
// Build service types and validate allOf
if (ServiceDefinition.Definitions != null)
{
foreach (var schemaName in ServiceDefinition.Definitions.Keys.ToArray())
{
var schema = ServiceDefinition.Definitions[schemaName];
schema.GetBuilder(this).BuildServiceType(schemaName);
Resolver.ExpandAllOf(schema);
var parent = string.IsNullOrEmpty(schema.Extends.StripDefinitionPath())
? null
: ServiceDefinition.Definitions[schema.Extends.StripDefinitionPath()];
if (parent != null &&
!AncestorsHaveProperties(parent.Properties, parent.Extends))
{
throw ErrorManager.CreateError(Resources.InvalidAncestors, schemaName);
}
}
}
}
/// <summary>
/// Recursively traverse the schema's extends to verify that it or one of it's parents
/// has at least one property
/// </summary>
/// <param name="properties">The schema's properties</param>
/// <param name="extends">The schema's extends</param>
/// <returns>True if one or more properties found in this schema or in it's ancestors. False otherwise</returns>
private bool AncestorsHaveProperties(Dictionary<string, Schema> properties, string extends)
{
if (properties.IsNullOrEmpty() && string.IsNullOrEmpty(extends))
{
return false;
}
if (!properties.IsNullOrEmpty())
{
return true;
}
extends = extends.StripDefinitionPath();
Debug.Assert(!string.IsNullOrEmpty(extends) && ServiceDefinition.Definitions.ContainsKey(extends));
return AncestorsHaveProperties(ServiceDefinition.Definitions[extends].Properties,
ServiceDefinition.Definitions[extends].Extends);
}
/// <summary>
/// Builds method from swagger operation.
/// </summary>
/// <param name="httpMethod"></param>
/// <param name="url"></param>
/// <param name="name"></param>
/// <param name="operation"></param>
/// <returns></returns>
public Method BuildMethod(HttpMethod httpMethod, string url, string name,
Operation operation)
{
string methodGroup = GetMethodGroup(operation);
var operationBuilder = new OperationBuilder(operation, this);
Method method = operationBuilder.BuildMethod(httpMethod, url, name, methodGroup);
return method;
}
/// <summary>
/// Extracts method group from operation ID.
/// </summary>
/// <param name="operation">The swagger operation.</param>
/// <returns>Method group name or null.</returns>
public static string GetMethodGroup(Operation operation)
{
if (operation == null)
{
throw new ArgumentNullException("operation");
}
if (operation.OperationId == null || operation.OperationId.IndexOf('_') == -1)
{
return null;
}
var parts = operation.OperationId.Split('_');
return parts[0];
}
/// <summary>
/// Extracts method name from operation ID.
/// </summary>
/// <param name="operation">The swagger operation.</param>
/// <returns>Method name.</returns>
public static string GetMethodName(Operation operation)
{
if (operation == null)
{
throw new ArgumentNullException("operation");
}
if (operation.OperationId == null)
{
return null;
}
if (operation.OperationId.IndexOf('_') == -1)
{
return operation.OperationId;
}
var parts = operation.OperationId.Split('_');
return parts[1];
}
public SwaggerParameter Unwrap(SwaggerParameter swaggerParameter)
{
if (swaggerParameter == null)
{
throw new ArgumentNullException("swaggerParameter");
}
// If referencing global parameters serializationProperty
if (swaggerParameter.Reference != null)
{
string referenceKey = swaggerParameter.Reference.StripParameterPath();
if (!ServiceDefinition.Parameters.ContainsKey(referenceKey))
{
throw new ArgumentException(
string.Format(CultureInfo.InvariantCulture,
Resources.DefinitionDoesNotExist, referenceKey));
}
swaggerParameter = ServiceDefinition.Parameters[referenceKey];
}
// Unwrap the schema if in "body"
if (swaggerParameter.Schema != null && swaggerParameter.In == ParameterLocation.Body)
{
swaggerParameter.Schema = Resolver.Unwrap(swaggerParameter.Schema);
}
return swaggerParameter;
}
public SchemaResolver Resolver
{
get { return new SchemaResolver(this); }
}
}
}
| 1 | 24,609 | Should be ARM | Azure-autorest | java |
@@ -86,10 +86,10 @@ func openBucket(dir string, _ *Options) (driver.Bucket, error) {
dir = filepath.Clean(dir)
info, err := os.Stat(dir)
if err != nil {
- return nil, fmt.Errorf("open file bucket: %v", err)
+ return nil, err
}
if !info.IsDir() {
- return nil, fmt.Errorf("open file bucket: %s is not a directory", dir)
+ return nil, fmt.Errorf("%s is not a directory", dir)
}
return &bucket{dir}, nil
} | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileblob provides a bucket implementation that operates on the local
// filesystem. This should not be used for production: it is intended for local
// development.
//
// Blob keys are escaped before being used as filenames, and filenames are
// unescaped when they are passed back as blob keys during List. The escape
// algorithm is:
// -- Alphanumeric characters (A-Z a-z 0-9) are not escaped.
// -- Space (' '), dash ('-'), underscore ('_'), and period ('.') are not escaped.
// -- Slash ('/') is always escaped to the OS-specific path separator character
// (os.PathSeparator).
// -- All other characters are escaped similar to url.PathEscape:
// "%<hex UTF-8 byte>", with capital letters ABCDEF in the hex code.
//
// Filenames that can't be unescaped due to invalid escape sequences
// (e.g., "%%"), or whose unescaped key doesn't escape back to the filename
// (e.g., "~", which unescapes to "~", which escapes back to "%7E" != "~"),
// aren't visible using fileblob.
//
// For blob.Open URLs, fileblob registers for the "file" scheme.
// The URL's Path is used as the root directory; the URL's Host is ignored.
// If os.PathSeparator != "/", any leading "/" from the Path is dropped.
// No query options are supported. Examples:
// -- file:///a/directory passes "/a/directory" to OpenBucket.
// -- file://localhost/a/directory also passes "/a/directory".
// -- file:///c:/foo/bar passes "c:/foo/bar".
//
// fileblob does not support any types for As.
package fileblob
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/google/go-cloud/blob"
"github.com/google/go-cloud/blob/driver"
)
const defaultPageSize = 1000
func init() {
blob.Register("file", func(_ context.Context, u *url.URL) (driver.Bucket, error) {
path := u.Path
if os.PathSeparator != '/' && strings.HasPrefix(path, "/") {
path = path[1:]
}
return openBucket(path, nil)
})
}
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct{}
type bucket struct {
dir string
}
// openBucket creates a driver.Bucket that reads and writes to dir.
// dir must exist.
func openBucket(dir string, _ *Options) (driver.Bucket, error) {
dir = filepath.Clean(dir)
info, err := os.Stat(dir)
if err != nil {
return nil, fmt.Errorf("open file bucket: %v", err)
}
if !info.IsDir() {
return nil, fmt.Errorf("open file bucket: %s is not a directory", dir)
}
return &bucket{dir}, nil
}
// OpenBucket creates a *blob.Bucket that reads and writes to dir.
// dir must exist.
func OpenBucket(dir string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(dir, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// shouldEscape returns true if c should be escaped.
func shouldEscape(c byte) bool {
switch {
case 'A' <= c && c <= 'Z':
return false
case 'a' <= c && c <= 'z':
return false
case '0' <= c && c <= '9':
return false
case c == ' ' || c == '-' || c == '_' || c == '.':
return false
case c == '/':
return false
}
return true
}
// escape returns s escaped per the rules described in the package docstring.
// The code is modified from https://golang.org/src/net/url/url.go.
func escape(s string) string {
hexCount := 0
replaceSlash := false
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
} else if c == '/' && os.PathSeparator != '/' {
replaceSlash = true
}
}
if hexCount == 0 && !replaceSlash {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == '/':
t[j] = os.PathSeparator
j++
case shouldEscape(c):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// ishex returns true if c is a valid part of a hexadecimal number.
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
// unhex returns the hexadecimal value of the hexadecimal character c.
// For example, unhex('A') returns 10.
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
// unescape unescapes s per the rules described in the package docstring.
// It returns an error if s has invalid escape sequences, or if
// escape(unescape(s)) != s.
// The code is modified from https://golang.org/src/net/url/url.go.
func unescape(s string) (string, error) {
// Count %, check that they're well-formed.
n := 0
replacePathSeparator := false
for i := 0; i < len(s); {
switch s[i] {
case '%':
n++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
bad := s[i:]
if len(bad) > 3 {
bad = bad[:3]
}
return "", fmt.Errorf("couldn't unescape %q near %q", s, bad)
}
i += 3
case os.PathSeparator:
replacePathSeparator = os.PathSeparator != '/'
i++
default:
i++
}
}
unescaped := s
if n > 0 || replacePathSeparator {
t := make([]byte, len(s)-2*n)
j := 0
for i := 0; i < len(s); {
switch s[i] {
case '%':
t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
j++
i += 3
case os.PathSeparator:
t[j] = '/'
j++
i++
default:
t[j] = s[i]
j++
i++
}
}
unescaped = string(t)
}
escaped := escape(unescaped)
if escaped != s {
return "", fmt.Errorf("%q unescaped to %q but escaped back to %q instead of itself", s, unescaped, escaped)
}
return unescaped, nil
}
// IsNotExist implements driver.IsNotExist.
func (b *bucket) IsNotExist(err error) bool {
return os.IsNotExist(err)
}
var errNotImplemented = errors.New("not implemented")
// IsNotImplemented implements driver.IsNotImplemented.
func (b *bucket) IsNotImplemented(err error) bool {
return err == errNotImplemented
}
// forKey returns the full path, os.FileInfo, and attributes for key.
func (b *bucket) forKey(key string) (string, os.FileInfo, *xattrs, error) {
relpath := escape(key)
path := filepath.Join(b.dir, relpath)
if strings.HasSuffix(path, attrsExt) {
return "", nil, nil, fmt.Errorf("open file blob %s: extension %q cannot be directly read", key, attrsExt)
}
info, err := os.Stat(path)
if err != nil {
return "", nil, nil, err
}
xa, err := getAttrs(path)
if err != nil {
return "", nil, nil, fmt.Errorf("open file attributes %s: %v", key, err)
}
return path, info, &xa, nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
var pageToken string
if len(opts.PageToken) > 0 {
pageToken = string(opts.PageToken)
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
// If opts.Delimiter != "", lastPrefix contains the last "directory" key we
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
// Do a full recursive scan of the root directory.
var result driver.ListPage
err := filepath.Walk(b.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
}
// Skip the self-generated attribute files.
if strings.HasSuffix(path, attrsExt) {
return nil
}
// os.Walk returns the root directory; skip it.
if path == b.dir {
return nil
}
// Strip the <b.dir> prefix from path; +1 is to include the separator.
path = path[len(b.dir)+1:]
// Unescape the path to get the key; if this fails, skip.
key, err := unescape(path)
if err != nil {
return nil
}
// Skip all directories. If opts.Delimiter is set, we'll create
// pseudo-directories later.
// Note that returning nil means that we'll still recurse into it;
// we're just not adding a result for the directory itself.
if info.IsDir() {
key += "/"
// Avoid recursing into subdirectories if the directory name already
// doesn't match the prefix; any files in it are guaranteed not to match.
if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) {
return filepath.SkipDir
}
// Similarly, avoid recursing into subdirectories if we're making
// "directories" and all of the files in this subdirectory are guaranteed
// to collapse to a "directory" that we've already added.
if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) {
return filepath.SkipDir
}
return nil
}
// Skip files/directories that don't match the Prefix.
if !strings.HasPrefix(key, opts.Prefix) {
return nil
}
obj := &driver.ListObject{
Key: key,
ModTime: info.ModTime(),
Size: info.Size(),
}
// If using Delimiter, collapse "directories".
if opts.Delimiter != "" {
// Strip the prefix, which may contain Delimiter.
keyWithoutPrefix := key[len(opts.Prefix):]
// See if the key still contains Delimiter.
// If no, it's a file and we just include it.
// If yes, it's a file in a "sub-directory" and we want to collapse
// all files in that "sub-directory" into a single "directory" result.
if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 {
prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)]
// We've already included this "directory"; don't add it.
if prefix == lastPrefix {
return nil
}
// Update the object to be a "directory".
obj = &driver.ListObject{
Key: prefix,
IsDir: true,
}
lastPrefix = prefix
}
}
// If there's a pageToken, skip anything before it.
if pageToken != "" && obj.Key <= pageToken {
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
if len(result.Objects) == pageSize {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
return &result, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool { return false }
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (driver.Attributes, error) {
_, info, xa, err := b.forKey(key)
if err != nil {
return driver.Attributes{}, err
}
return driver.Attributes{
ContentType: xa.ContentType,
Metadata: xa.Metadata,
ModTime: info.ModTime(),
Size: info.Size(),
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (driver.Reader, error) {
path, info, xa, err := b.forKey(key)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("open file blob %s: %v", key, err)
}
if offset > 0 {
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, fmt.Errorf("open file blob %s: %v", key, err)
}
}
r := io.Reader(f)
if length > 0 {
r = io.LimitReader(r, length)
}
return reader{
r: r,
c: f,
attrs: driver.ReaderAttributes{
ContentType: xa.ContentType,
ModTime: info.ModTime(),
Size: info.Size(),
},
}, nil
}
type reader struct {
r io.Reader
c io.Closer
attrs driver.ReaderAttributes
}
func (r reader) Read(p []byte) (int, error) {
if r.r == nil {
return 0, io.EOF
}
return r.r.Read(p)
}
func (r reader) Close() error {
if r.c == nil {
return nil
}
return r.c.Close()
}
func (r reader) Attributes() driver.ReaderAttributes {
return r.attrs
}
func (r reader) As(i interface{}) bool { return false }
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
path := filepath.Join(b.dir, escape(key))
if strings.HasSuffix(path, attrsExt) {
return nil, fmt.Errorf("open file blob %s: extension %q is reserved and cannot be used", key, attrsExt)
}
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return nil, fmt.Errorf("open file blob %s: %v", key, err)
}
f, err := ioutil.TempFile("", "fileblob")
if err != nil {
return nil, fmt.Errorf("open file blob %s: %v", key, err)
}
if opts.BeforeWrite != nil {
if err := opts.BeforeWrite(func(interface{}) bool { return false }); err != nil {
return nil, err
}
}
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
}
attrs := xattrs{
ContentType: contentType,
Metadata: metadata,
}
w := &writer{
ctx: ctx,
f: f,
path: path,
attrs: attrs,
}
if len(opts.ContentMD5) > 0 {
w.contentMD5 = opts.ContentMD5
w.md5hash = md5.New()
}
return w, nil
}
type writer struct {
ctx context.Context
f *os.File
path string
attrs xattrs
contentMD5 []byte
md5hash hash.Hash
}
func (w writer) Write(p []byte) (n int, err error) {
if w.md5hash != nil {
if _, err := w.md5hash.Write(p); err != nil {
return 0, fmt.Errorf("updating md5 hash: %v", err)
}
}
return w.f.Write(p)
}
func (w writer) Close() error {
err := w.f.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been renamed so
// the Remove will fail.
defer func() {
_ = os.Remove(w.f.Name())
}()
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
// Check MD5 hash if necessary.
if w.md5hash != nil {
md5sum := w.md5hash.Sum(nil)
if !bytes.Equal(md5sum, w.contentMD5) {
return fmt.Errorf(
"the ContentMD5 you specified did not match what we received (%s != %s)",
base64.StdEncoding.EncodeToString(md5sum),
base64.StdEncoding.EncodeToString(w.contentMD5),
)
}
}
// Write the attributes file.
if err := setAttrs(w.path, w.attrs); err != nil {
return fmt.Errorf("write blob attributes: %v", err)
}
// Rename the temp file to path.
if err := os.Rename(w.f.Name(), w.path); err != nil {
_ = os.Remove(w.path + attrsExt)
return fmt.Errorf("rename during Close: %v", err)
}
return nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
path := filepath.Join(b.dir, escape(key))
if strings.HasSuffix(path, attrsExt) {
return fmt.Errorf("delete file blob %s: extension %q cannot be directly deleted", key, attrsExt)
}
err := os.Remove(path)
if err != nil {
return err
}
if err = os.Remove(path + attrsExt); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("delete file blob %s: %v", key, err)
}
return nil
}
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
// TODO(Issue #546): Implemented SignedURL for fileblob.
return "", errNotImplemented
}
| 1 | 12,266 | I don't see how removing "open file bucket" helps your purpose. | google-go-cloud | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.