patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -47,7 +47,7 @@ class SolrVersionStatus extends AbstractSolrStatus
*
* @var string
*/
- const REQUIRED_SOLR_VERSION = '6.3.0';
+ const REQUIRED_SOLR_VERSION = '6.6.2';
/**
* Compiles a version check against each configured Solr server. | 1 | <?php
namespace ApacheSolrForTypo3\Solr\Report;
/***************************************************************
* Copyright notice
*
* (c) 2011-2015 Stefan Sprenger <[email protected]>
* (c) 2012-2015 Ingo Renner <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\ConnectionManager;
use ApacheSolrForTypo3\Solr\SolrService;
use TYPO3\CMS\Core\Utility\GeneralUtility;
use TYPO3\CMS\Fluid\View\StandaloneView;
use TYPO3\CMS\Reports\Status;
use TYPO3\CMS\Reports\StatusProviderInterface;
/**
* Provides a status report about whether the installed Solr version matches
* the required version.
*
* @author Stefan Sprenger <[email protected]>
*/
class SolrVersionStatus extends AbstractSolrStatus
{
/**
* Required Solr version. The version that gets installed when using the
* provided install script EXT:solr/Resources/Private/Install/install-solr.sh
*
* @var string
*/
const REQUIRED_SOLR_VERSION = '6.3.0';
/**
* Compiles a version check against each configured Solr server.
*
*/
public function getStatus()
{
$reports = [];
$solrConnections = GeneralUtility::makeInstance(ConnectionManager::class)->getAllConnections();
foreach ($solrConnections as $solrConnection) {
/** @var $solrConnection SolrService */
if (!$solrConnection->ping()) {
$url = $solrConnection->__toString();
$pingFailedMsg = 'Could not ping solr server, can not check version ' . (string)$url;
$status = GeneralUtility::makeInstance(Status::class, 'Apache Solr Version', 'Not accessible', $pingFailedMsg, Status::ERROR);
$reports[] = $status;
continue;
}
$solrVersion = $solrConnection->getSolrServerVersion();
$isOutdatedVersion = version_compare($this->getCleanSolrVersion($solrVersion), self::REQUIRED_SOLR_VERSION, '<');
if (!$isOutdatedVersion) {
continue;
}
$formattedVersion = $this->formatSolrVersion($solrVersion);
$variables = ['requiredVersion' => self::REQUIRED_SOLR_VERSION, 'currentVersion' => $formattedVersion, 'solr' => $solrConnection];
$report = $this->getRenderedReport('SolrVersionStatus.html', $variables);
$status = GeneralUtility::makeInstance(Status::class, 'Apache Solr Version', 'Outdated, Unsupported', $report, Status::ERROR);
$reports[] = $status;
}
return $reports;
}
/**
* Gets the clean Solr version in case of a custom build which may have
* additional information in the version string.
*
* @param string $solrVersion Unformatted Apache Solr version number as provided by Solr.
* @return string Clean Solr version number: mayor.minor.patchlevel
*/
protected function getCleanSolrVersion($solrVersion)
{
$explodedSolrVersion = explode('.', $solrVersion);
$shortSolrVersion = $explodedSolrVersion[0]
. '.' . $explodedSolrVersion[1]
. '.' . $explodedSolrVersion[2];
return $shortSolrVersion;
}
/**
* Formats the Apache Solr server version number. By default this is going
* to be the simple major.minor.patch-level version. Custom Builds provide
* more information though, in case of custom builds, their complete
* version will be added, too.
*
* @param string $solrVersion Unformatted Apache Solr version number as provided by Solr.
* @return string formatted short version number, in case of custom builds followed by the complete version number
*/
protected function formatSolrVersion($solrVersion)
{
$shortSolrVersion = $this->getCleanSolrVersion($solrVersion);
$formattedSolrVersion = $shortSolrVersion;
if ($solrVersion != $shortSolrVersion) {
$formattedSolrVersion .= ' (' . $solrVersion . ')';
}
return $formattedSolrVersion;
}
}
| 1 | 6,438 | Solr dependency was downgraded? | TYPO3-Solr-ext-solr | php |
@@ -38,6 +38,7 @@ module C2
config.autoload_paths << Rails.root.join('app', 'mailers', 'concerns')
config.autoload_paths << Rails.root.join('app', 'policies', 'concerns')
config.autoload_paths << Rails.root.join('lib')
+ config.autoload_paths << Rails.root.join("lib", "services")
config.assets.precompile << 'communicarts_mailer.css'
| 1 | require File.expand_path('../boot', __FILE__)
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(:default, Rails.env)
module C2
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
# https://git.io/ETVYsQ
config.middleware.insert_before 0, Rack::Cors, logger: Rails.logger do
allow do
origins '*'
resource '*',
headers: :any,
methods: [:get, :post, :delete, :put, :options, :head],
max_age: 1728000
end
end
config.action_mailer.raise_delivery_errors = true
config.roadie.url_options = config.action_mailer.default_url_options
config.autoload_paths << Rails.root.join('app', 'mailers', 'concerns')
config.autoload_paths << Rails.root.join('app', 'policies', 'concerns')
config.autoload_paths << Rails.root.join('lib')
config.assets.precompile << 'communicarts_mailer.css'
# remove for Rails 4.3+(?)
config.active_record.raise_in_transactional_callbacks = true
config.active_job.queue_adapter = :delayed_job
end
end
| 1 | 14,969 | or should we just autoload everything in `lib` ? | 18F-C2 | rb |
@@ -59,6 +59,13 @@ public class NodeStatus {
}
}
+ public boolean hasCapability(Capabilities caps) {
+ long count = slots.stream()
+ .filter(slot -> slot.isSupporting(caps))
+ .count();
+ return count > 0;
+ }
+
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.data;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.json.TypeToken;
import java.net.URI;
import java.time.Instant;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
public class NodeStatus {
private final NodeId nodeId;
private final URI externalUri;
private final int maxSessionCount;
private final Set<Slot> slots;
private final Availability availability;
public NodeStatus(
NodeId nodeId,
URI externalUri,
int maxSessionCount,
Set<Slot> slots,
Availability availability) {
this.nodeId = Require.nonNull("Node id", nodeId);
this.externalUri = Require.nonNull("URI", externalUri);
this.maxSessionCount = Require.positive("Max session count",
maxSessionCount,
"Make sure that a driver is available on $PATH");
this.slots = ImmutableSet.copyOf(Require.nonNull("Slots", slots));
this.availability = Require.nonNull("Availability", availability);
ImmutableSet.Builder<Session> sessions = ImmutableSet.builder();
for (Slot slot : slots) {
slot.getSession().ifPresent(sessions::add);
}
}
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
}
public boolean hasCapacity(Capabilities caps) {
long count = slots.stream()
.filter(slot -> !slot.getSession().isPresent())
.filter(slot -> slot.isSupporting(caps))
.count();
return count > 0;
}
public NodeId getId() {
return nodeId;
}
public URI getUri() {
return externalUri;
}
public int getMaxSessionCount() {
return maxSessionCount;
}
public Set<Slot> getSlots() {
return slots;
}
public Availability getAvailability() {
return availability;
}
public float getLoad() {
float inUse = slots.parallelStream()
.filter(slot -> slot.getSession().isPresent())
.count();
return (inUse / (float) maxSessionCount) * 100f;
}
public long getLastSessionCreated() {
return slots.parallelStream()
.map(Slot::getLastStarted)
.mapToLong(Instant::toEpochMilli)
.max()
.orElse(0);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof NodeStatus)) {
return false;
}
NodeStatus that = (NodeStatus) o;
return Objects.equals(this.nodeId, that.nodeId) &&
Objects.equals(this.externalUri, that.externalUri) &&
this.maxSessionCount == that.maxSessionCount &&
Objects.equals(this.slots, that.slots) &&
Objects.equals(this.availability, that.availability);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, externalUri, maxSessionCount, slots);
}
private Map<String, Object> toJson() {
return new ImmutableMap.Builder<String, Object>()
.put("id", nodeId)
.put("uri", externalUri)
.put("maxSessions", maxSessionCount)
.put("slots", slots)
.put("availability", availability)
.build();
}
public static NodeStatus fromJson(JsonInput input) {
NodeId nodeId = null;
URI uri = null;
int maxSessions = 0;
Set<Slot> slots = null;
Availability availability = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "availability":
availability = input.read(Availability.class);
break;
case "id":
nodeId = input.read(NodeId.class);
break;
case "maxSessions":
maxSessions = input.read(Integer.class);
break;
case "slots":
slots = input.read(new TypeToken<Set<Slot>>(){}.getType());
break;
case "uri":
uri = input.read(URI.class);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new NodeStatus(
nodeId,
uri,
maxSessions,
slots,
availability);
}
}
| 1 | 18,174 | Prefer `Stream.anyMatch` instead of iterating over all slots. | SeleniumHQ-selenium | java |
@@ -533,6 +533,7 @@ CREATE_VIOLATIONS_TABLE = """
`violation_type` enum('UNSPECIFIED',
'ADDED','REMOVED',
'BIGQUERY_VIOLATION',
+ 'BLACKLIST_VIOLATION',
'BUCKET_VIOLATION',
'CLOUD_SQL_VIOLATION',
'FIREWALL_BLACKLIST_VIOLATION', | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL queries to create Cloud SQL tables."""
CREATE_APPENGINE_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`dispatch_rules` json DEFAULT NULL,
`auth_domain` varchar(255) DEFAULT NULL,
`location_id` varchar(255) DEFAULT NULL,
`code_bucket` varchar(255) DEFAULT NULL,
`default_cookie_expiration` varchar(255) DEFAULT NULL,
`serving_status` varchar(255) DEFAULT NULL,
`default_hostname` varchar(255) DEFAULT NULL,
`default_bucket` varchar(255) DEFAULT NULL,
`iap` json DEFAULT NULL,
`gcr_domain` varchar(255) DEFAULT NULL,
`raw_application` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_SERVICES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`service` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `service_key` (`app_id`, `service_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_VERSIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`version_id` varchar(255) DEFAULT NULL,
`version` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `version_key` (`app_id`, `service_id`, `version_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_INSTANCES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`version_id` varchar(255) DEFAULT NULL,
`instance_id` varchar(255) DEFAULT NULL,
`instance` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `instance_key` (`app_id`, `service_id`, `version_id`,
`instance_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BACKEND_SERVICES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`affinity_cookie_ttl_sec` int DEFAULT NULL,
`backends` json DEFAULT NULL,
`cdn_policy` json DEFAULT NULL,
`connection_draining` json DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`enable_cdn` bool DEFAULT NULL,
`health_checks` json DEFAULT NULL,
`iap` json DEFAULT NULL,
`load_balancing_scheme` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`port_name` varchar(255) DEFAULT NULL,
`port` int DEFAULT NULL,
`protocol` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`session_affinity` varchar(255) DEFAULT NULL,
`timeout_sec` varchar(255) DEFAULT NULL,
`raw_backend_service` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BIGQUERY_DATASETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`dataset_id` varchar(255) DEFAULT NULL,
`access_domain` varchar(255) DEFAULT NULL,
`access_user_by_email` varchar(255) DEFAULT NULL,
`access_special_group` varchar(255) DEFAULT NULL,
`access_group_by_email` varchar(255) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`access_view_project_id` varchar(255) DEFAULT NULL,
`access_view_table_id` varchar(255) DEFAULT NULL,
`access_view_dataset_id` varchar(255) DEFAULT NULL,
`raw_access_map` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_ACL_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`bucket` varchar(255) DEFAULT NULL,
`domain` varchar(255) DEFAULT NULL,
`email` varchar(255) DEFAULT NULL,
`entity` varchar(255) DEFAULT NULL,
`entity_id` varchar(255) DEFAULT NULL,
`acl_id` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`project_team` json DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`bucket_acl_selflink` varchar(255) DEFAULT NULL,
`raw_bucket_acl` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`bucket_id` varchar(255) DEFAULT NULL,
`bucket_name` varchar(255) DEFAULT NULL,
`bucket_kind` varchar(255) DEFAULT NULL,
`bucket_storage_class` varchar(255) DEFAULT NULL,
`bucket_location` varchar(255) DEFAULT NULL,
`bucket_create_time` datetime DEFAULT NULL,
`bucket_update_time` datetime DEFAULT NULL,
`bucket_selflink` varchar(255) DEFAULT NULL,
`bucket_lifecycle_raw` json DEFAULT NULL,
`raw_bucket` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_INSTANCES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`name` varchar(255) DEFAULT NULL,
`project` varchar(255) DEFAULT NULL,
`backend_type` varchar(255) DEFAULT NULL,
`connection_name` varchar(255) DEFAULT NULL,
`current_disk_size` bigint DEFAULT NULL,
`database_version` varchar(255) DEFAULT NULL,
`failover_replica_available` varchar(255) DEFAULT NULL,
`failover_replica_name` varchar(255) DEFAULT NULL,
`instance_type` varchar(255) DEFAULT NULL,
`ipv6_address` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`master_instance_name` varchar(255) DEFAULT NULL,
`max_disk_size` bigint DEFAULT NULL,
`on_premises_configuration_host_port` varchar(255) DEFAULT NULL,
`on_premises_configuration_kind` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`replica_configuration` json DEFAULT NULL,
`replica_names` json DEFAULT NULL,
`self_link` varchar(255) DEFAULT NULL,
`server_ca_cert` json DEFAULT NULL,
`service_account_email_address` varchar(255) DEFAULT NULL,
`settings_activation_policy` varchar(255) DEFAULT NULL,
`settings_authorized_gae_applications` json DEFAULT NULL,
`settings_availability_type` varchar(255) DEFAULT NULL,
`settings_backup_configuration_binary_log_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_kind` varchar(255) DEFAULT NULL,
`settings_backup_configuration_start_time` varchar(255) DEFAULT NULL,
`settings_crash_safe_replication_enabled` varchar(255) DEFAULT NULL,
`settings_data_disk_size_gb` bigint DEFAULT NULL,
`settings_data_disk_type` varchar(255) DEFAULT NULL,
`settings_database_flags` json DEFAULT NULL,
`settings_database_replication_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_ipv4_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_require_ssl` varchar(255) DEFAULT NULL,
`settings_kind` varchar(255) DEFAULT NULL,
`settings_labels` json DEFAULT NULL,
`settings_location_preference_follow_gae_application` varchar(255) DEFAULT NULL,
`settings_location_preference_kind` varchar(255) DEFAULT NULL,
`settings_location_preference_zone` varchar(255) DEFAULT NULL,
`settings_maintenance_window` json DEFAULT NULL,
`settings_pricing_plan` varchar(255) DEFAULT NULL,
`settings_replication_type` varchar(255) DEFAULT NULL,
`settings_settings_version` bigint DEFAULT NULL,
`settings_storage_auto_resize` varchar(255) DEFAULT NULL,
`settings_storage_auto_resize_limit` bigint DEFAULT NULL,
`settings_tier` varchar(255) DEFAULT NULL,
`state` varchar(255) DEFAULT NULL,
`suspension_reason` json DEFAULT NULL,
`raw_cloudsql_instance` json DEFAULT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPADDRESSES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`type` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`time_to_retire` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPCONFIGURATION_AUTHORIZEDNETWORKS = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`value` varchar(255) DEFAULT NULL,
`expiration_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FIREWALL_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`firewall_rule_id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`firewall_rule_name` varchar(255) DEFAULT NULL,
`firewall_rule_description` varchar(512) DEFAULT NULL,
`firewall_rule_kind` varchar(255) DEFAULT NULL,
`firewall_rule_network` varchar(255) DEFAULT NULL,
`firewall_rule_priority` smallint(5) unsigned,
`firewall_rule_direction` varchar(255) DEFAULT NULL,
`firewall_rule_source_ranges` json DEFAULT NULL,
`firewall_rule_destination_ranges` json DEFAULT NULL,
`firewall_rule_source_tags` json DEFAULT NULL,
`firewall_rule_source_service_accounts` json DEFAULT NULL,
`firewall_rule_target_service_accounts` json DEFAULT NULL,
`firewall_rule_target_tags` json DEFAULT NULL,
`firewall_rule_allowed` json DEFAULT NULL,
`firewall_rule_denied` json DEFAULT NULL,
`firewall_rule_self_link` varchar(255) DEFAULT NULL,
`firewall_rule_create_time` datetime(3) DEFAULT NULL,
`raw_firewall_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDERS_TABLE = """
CREATE TABLE `{0}` (
`folder_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('ACTIVE','DELETE_REQUESTED',
'DELETED','LIFECYCLE_STATE_UNSPECIFIED') DEFAULT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_folder` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`folder_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FORWARDING_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`ip_protocol` enum('TCP','UDP','ESP','AH','SCTP','ICMP') DEFAULT NULL,
`port_range` varchar(255) DEFAULT NULL,
`ports` json DEFAULT NULL,
`target` varchar(255) DEFAULT NULL,
`load_balancing_scheme` enum('INTERNAL','EXTERNAL') DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`backend_service` varchar(255) DEFAULT NULL,
`raw_forwarding_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: Add a RAW_GROUP_MEMBERS_TABLE.
CREATE_GROUP_MEMBERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`member_kind` varchar(255) DEFAULT NULL,
`member_role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_status` varchar(255) DEFAULT NULL,
`member_id` varchar(255) DEFAULT NULL,
`member_email` varchar(255) DEFAULT NULL,
`raw_member` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`group_email` varchar(255) DEFAULT NULL,
`group_kind` varchar(255) DEFAULT NULL,
`direct_member_count` bigint(20) DEFAULT NULL,
`raw_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`can_ip_forward` bool DEFAULT NULL,
`cpu_platform` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`disks` json DEFAULT NULL,
`machine_type` varchar(255) DEFAULT NULL,
`metadata` json DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`network_interfaces` json DEFAULT NULL,
`scheduling` json DEFAULT NULL,
`service_accounts` json DEFAULT NULL,
`status` varchar(255) DEFAULT NULL,
`status_message` varchar(255) DEFAULT NULL,
`tags` json DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`instance_urls` json DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`size` int DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_TEMPLATES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`properties` json DEFAULT NULL,
`raw_instance_template` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUP_MANAGERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`base_instance_name` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`current_actions` json DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`instance_group` varchar(255) DEFAULT NULL,
`instance_template` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`target_pools` json DEFAULT NULL,
`target_size` int DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group_manager` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORGANIZATIONS_TABLE = """
CREATE TABLE `{0}` (
`org_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED', 'DELETED') NOT NULL,
`raw_org` json DEFAULT NULL,
`creation_time` datetime DEFAULT NULL,
PRIMARY KEY (`org_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`project_id` varchar(255) NOT NULL,
`project_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED','DELETED') NOT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_project` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `project_id_UNIQUE` (`project_id`),
UNIQUE KEY `project_number_UNIQUE` (`project_number`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`buckets` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_SERVICE_ACCOUNTS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`email` varchar(255) DEFAULT NULL,
`oauth2_client_id` varchar(255) DEFAULT NULL,
`account_keys` json DEFAULT NULL,
`raw_service_account` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: define the violation_type enum as a list
CREATE_VIOLATIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`resource_type` varchar(255) NOT NULL,
`resource_id` varchar(255) NOT NULL,
`rule_name` varchar(255) DEFAULT NULL,
`rule_index` int DEFAULT NULL,
`violation_type` enum('UNSPECIFIED',
'ADDED','REMOVED',
'BIGQUERY_VIOLATION',
'BUCKET_VIOLATION',
'CLOUD_SQL_VIOLATION',
'FIREWALL_BLACKLIST_VIOLATION',
'FIREWALL_MATCHES_VIOLATION',
'FIREWALL_REQUIRED_VIOLATION',
'FIREWALL_WHITELIST_VIOLATION',
'FORWARDING_RULE_VIOLATION',
'GROUP_VIOLATION',
'IAP_VIOLATION',
'INSTANCE_NETWORK_INTERFACE_VIOLATION') NOT NULL,
`violation_data` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
| 1 | 28,557 | create_tables isn't used in 2.0 (should probably be deleted?) | forseti-security-forseti-security | py |
@@ -29,6 +29,7 @@ namespace OpenTelemetry.Exporter
internal Func<DateTimeOffset> GetUtcNowDateTimeOffset = () => DateTimeOffset.UtcNow;
private int scrapeResponseCacheDurationMilliseconds = 10 * 1000;
+ private IReadOnlyCollection<string> httpListenerPrefixes = new string[] { "http://*:80/" };
#if NETCOREAPP3_1_OR_GREATER
/// <summary> | 1 | // <copyright file="PrometheusExporterOptions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Exporter
{
/// <summary>
/// <see cref="PrometheusExporter"/> options.
/// </summary>
public class PrometheusExporterOptions
{
internal const string DefaultScrapeEndpointPath = "/metrics";
internal Func<DateTimeOffset> GetUtcNowDateTimeOffset = () => DateTimeOffset.UtcNow;
private int scrapeResponseCacheDurationMilliseconds = 10 * 1000;
#if NETCOREAPP3_1_OR_GREATER
/// <summary>
/// Gets or sets a value indicating whether or not an http listener
/// should be started. Default value: False.
/// </summary>
public bool StartHttpListener { get; set; }
#else
/// <summary>
/// Gets or sets a value indicating whether or not an http listener
/// should be started. Default value: True.
/// </summary>
public bool StartHttpListener { get; set; } = true;
#endif
/// <summary>
/// Gets or sets the prefixes to use for the http listener. Default
/// value: http://*:80/.
/// </summary>
public IReadOnlyCollection<string> HttpListenerPrefixes { get; set; } = new string[] { "http://*:80/" };
/// <summary>
/// Gets or sets the path to use for the scraping endpoint. Default value: /metrics.
/// </summary>
public string ScrapeEndpointPath { get; set; } = DefaultScrapeEndpointPath;
/// <summary>
/// Gets or sets the cache duration in milliseconds for scrape responses. Default value: 10,000 (10 seconds).
/// </summary>
/// <remarks>
/// Note: Specify 0 to disable response caching.
/// </remarks>
public int ScrapeResponseCacheDurationMilliseconds
{
get => this.scrapeResponseCacheDurationMilliseconds;
set
{
Guard.Range(value, nameof(value), min: 0);
this.scrapeResponseCacheDurationMilliseconds = value;
}
}
}
}
| 1 | 23,134 | Now I start to wonder, do we want to have `80` as the default or `9090` (or depending on whether we are exposing it via `PrometheusExporterMiddleware` vs. `PrometheusExporterHttpServer`)? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -980,7 +980,9 @@ TEST(Parser, UserOperation) {
{
GQLParser parser;
std::string query = "CREATE USER IF NOT EXISTS user1 WITH PASSWORD \"aaa\" , "
- "FIRSTNAME \"a\", LASTNAME \"a\", EMAIL \"a\", PHONE \"111\"";
+ "ACCOUNT LOCK, MAX_QUERIES_PER_HOUR 1, "
+ "MAX_UPDATES_PER_HOUR 2, MAX_CONNECTIONS_PER_HOUR 3, "
+ "MAX_USER_CONNECTIONS 4";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value(); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include <gtest/gtest.h>
#include "base/Base.h"
#include "parser/GQLParser.h"
// TODO(dutor) Inspect the internal structures to check on the syntax and semantics
namespace nebula {
TEST(Parser, Go) {
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend;";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO 2 STEPS FROM 1 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO UPTO 2 STEPS FROM 1 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend REVERSELY";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend YIELD person.name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend "
"YIELD $^.manager.name,$^.manager.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend "
"YIELD $$.manager.name,$$.manager.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test wrong syntax
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend "
"YIELD $^[manager].name,$^[manager].age";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1,2,3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM $-.id OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM $-.col1 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM $-.id OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1,2,3 OVER friend WHERE person.name == \"dutor\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, SpaceOperation) {
{
GQLParser parser;
std::string query = "CREATE SPACE default_space(partition_num=9, replica_factor=3)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "USE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESC SPACE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESCRIBE SPACE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE SPACE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DROP SPACE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, TagOperation) {
{
GQLParser parser;
std::string query = "CREATE TAG person(name string, age int, "
"married bool, salary double, create_time timestamp)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test empty prop
{
GQLParser parser;
std::string query = "CREATE TAG person()";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG man(name string, age int, "
"married bool, salary double, create_time timestamp)"
"ttl_duration = 100";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG woman(name string, age int, "
"married bool, salary double, create_time timestamp)"
"ttl_duration = 100, ttl_col = create_time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER TAG person ADD (col1 int, col2 string), "
"CHANGE (married int, salary int), "
"DROP (age, create_time)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER TAG man ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER TAG woman ttl_duration = 50, ttl_col = age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER TAG woman ADD (col6 int) ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESCRIBE TAG person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESC TAG person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE TAG person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DROP TAG person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, EdgeOperation) {
{
GQLParser parser;
std::string query = "CREATE EDGE e1(name string, age int, "
"married bool, salary double, create_time timestamp)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test empty prop
{
GQLParser parser;
std::string query = "CREATE EDGE e1()";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE EDGE man(name string, age int, "
"married bool, salary double, create_time timestamp)"
"ttl_duration = 100";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE EDGE woman(name string, age int, "
"married bool, salary double, create_time timestamp)"
"ttl_duration = 100, ttl_col = create_time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER EDGE e1 ADD (col1 int, col2 string), "
"CHANGE (married int, salary int), "
"DROP (age, create_time)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER EDGE man ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER EDGE woman ttl_duration = 50, ttl_col = age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "ALTER EDGE woman ADD (col6 int) ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESCRIBE EDGE e1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DESC EDGE e1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE EDGE e1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DROP EDGE e1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
// Column space format test, expected SyntaxError
TEST(Parser, ColumnSpacesTest) {
{
GQLParser parser;
std::string query = "CREATE TAG person(name, age, married bool)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "CREATE TAG person(name, age, married)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "CREATE TAG man(name string, age)"
"ttl_duration = 100";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER TAG person ADD (col1 int, col2 string), "
"CHANGE (married int, salary int), "
"DROP (age int, create_time timestamp)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER TAG person ADD (col1, col2), "
"CHANGE (married int, salary int), "
"DROP (age, create_time)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER TAG person ADD (col1 int, col2 string), "
"CHANGE (married, salary), "
"DROP (age, create_time)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "CREATE EDGE man(name, age, married bool) "
"ttl_duration = 100";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER EDGE woman ADD (col6) ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER EDGE woman CHANGE (col6) ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
{
GQLParser parser;
std::string query = "ALTER EDGE woman DROP (col6 int) ttl_duration = 200";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok());
}
}
TEST(Parser, Set) {
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend INTERSECT "
"GO FROM 2 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend UNION "
"GO FROM 2 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend MINUS "
"GO FROM 2 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend MINUS "
"GO FROM 2 OVER friend UNION "
"GO FROM 2 OVER friend INTERSECT "
"GO FROM 3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "(GO FROM 1 OVER friend | "
"GO FROM 2 OVER friend) UNION "
"GO FROM 3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
// pipe have priority over set
std::string query = "GO FROM 1 OVER friend | "
"GO FROM 2 OVER friend UNION "
"GO FROM 3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "(GO FROM 1 OVER friend UNION "
"GO FROM 2 OVER friend) | "
"GO FROM $- OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, Pipe) {
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend | "
"GO FROM 2 OVER friend | "
"GO FROM 3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER friend MINUS "
"GO FROM 2 OVER friend | "
"GO FROM 3 OVER friend";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, InsertVertex) {
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name,age,married,salary,create_time) "
"VALUES 12345:(\"dutor\", 30, true, 3.14, 1551331900)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test one vertex multi tags
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age, id), student(name, number, id) "
"VALUES 12345:(\"zhangsan\", 18, 1111, \"zhangsan\", 20190527, 1111)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test multi vertex multi tags
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age, id), student(name, number, id) "
"VALUES 12345:(\"zhangsan\", 18, 1111, \"zhangsan\", 20190527, 1111),"
"12346:(\"lisi\", 20, 1112, \"lisi\", 20190413, 1112)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test wrong syntax
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age, id), student(name, number) "
"VALUES 12345:(\"zhangsan\", 18, 1111), ( \"zhangsan\", 20190527),"
"12346:(\"lisi\", 20, 1112), (\"lisi\", 20190413)";
auto result = parser.parse(query);
ASSERT_FALSE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name,age,married,salary,create_time) "
"VALUES -12345:(\"dutor\", 30, true, 3.14, 1551331900)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name,age,married,salary,create_time) "
"VALUES hash(\"dutor\"):(\"dutor\", 30, true, 3.14, 1551331900)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name,age,married,salary,create_time) "
"VALUES uuid(\"dutor\"):(\"dutor\", 30, true, 3.14, 1551331900)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test insert empty value
{
GQLParser parser;
std::string query = "INSERT VERTEX person() "
"VALUES 12345:()";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test insert prop unterminated ""
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age) "
"VALUES 12345:(\"dutor, 30)";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isSyntaxError());
}
// Test insert prop unterminated ''
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age) "
"VALUES 12345:(\'dutor, 30)";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isSyntaxError());
}
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age) "
"VALUES hash(\"dutor\"):(\'dutor, 30)";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isSyntaxError());
}
{
GQLParser parser;
std::string query = "INSERT VERTEX person(name, age) "
"VALUES uuid(\"dutor\"):(\'dutor, 30)";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isSyntaxError());
}
}
TEST(Parser, UpdateVertex) {
{
GQLParser parser;
std::string query = "UPDATE VERTEX 12345 "
"SET person.name=\"dutor\", person.age=30, "
"job.salary=10000, person.create_time=1551331999";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE VERTEX 12345 "
"SET person.name=\"dutor\", person.age=$^.person.age + 1, "
"person.married=true "
"WHEN $^.job.salary > 10000 && $^.person.age > 30";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE VERTEX 12345 "
"SET person.name=\"dutor\", person.age=31, person.married=true, "
"job.salary=1.1 * $^.person.create_time / 31536000 "
"YIELD $^.person.name AS Name, job.name AS Title, "
"$^.job.salary AS Salary";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE VERTEX 12345 "
"SET person.name=\"dutor\", person.age=30, person.married=true "
"WHEN $^.job.salary > 10000 && $^.job.name == \"CTO\" || "
"$^.person.age < 30"
"YIELD $^.person.name AS Name, $^.job.salary AS Salary, "
"$^.person.create_time AS Time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPSERT VERTEX 12345 "
"SET person.name=\"dutor\", person.age = 30, job.name =\"CTO\" "
"WHEN $^.job.salary > 10000 "
"YIELD $^.person.name AS Name, $^.job.salary AS Salary, "
"$^.person.create_time AS Time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, InsertEdge) {
{
GQLParser parser;
std::string query = "INSERT EDGE transfer(amount, time) "
"VALUES 12345->-54321:(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT EDGE transfer(amount, time) "
"VALUES hash(\"from\")->hash(\"to\"):(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT EDGE transfer(amount, time) "
"VALUES uuid(\"from\")->uuid(\"to\"):(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// multi edge
{
GQLParser parser;
std::string query = "INSERT EDGE transfer(amount, time) "
"VALUES 12345->54321@1537408527:(3.75, 1537408527),"
"56789->98765@1537408527:(3.5, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test insert empty value
{
GQLParser parser;
std::string query = "INSERT EDGE transfer() "
"VALUES 12345->54321@1537408527:()";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT EDGE NO OVERWRITE transfer(amount, time) "
"VALUES -12345->54321:(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT EDGE transfer(amount, time) "
"VALUES 12345->54321@1537408527:(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INSERT EDGE NO OVERWRITE transfer(amount, time) "
"VALUES 12345->-54321@1537408527:(3.75, 1537408527)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
// Test insert empty value
{
GQLParser parser;
std::string query = "INSERT EDGE NO OVERWRITE transfer() "
"VALUES 12345->54321@1537408527:()";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, UpdateEdge) {
{
GQLParser parser;
std::string query = "UPDATE EDGE 12345 -> 54321 OF transfer "
"SET amount=3.14, time=1537408527";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE EDGE 12345 -> 54321@789 OF transfer "
"SET amount=3.14,time=1537408527 "
"WHEN transfer.amount > 3.14 && $^.person.name == \"dutor\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE EDGE 12345 -> 54321 OF transfer "
"SET amount = 3.14 + $^.job.salary, time = 1537408527 "
"WHEN transfer.amount > 3.14 || $^.job.salary >= 10000 "
"YIELD transfer.amount, transfer.time AS Time, "
"$^.person.name AS PayFrom";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPSERT EDGE 12345 -> 54321 @789 OF transfer "
"SET amount=$^.job.salary + 3.14, time=1537408527 "
"WHEN transfer.amount > 3.14 && $^.job.salary >= 10000 "
"YIELD transfer.amount,transfer.time, $^.person.name AS Name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, DeleteVertex) {
{
GQLParser parser;
std::string query = "DELETE VERTEX 12345";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DELETE VERTEX hash(\"zhangsan\")";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, DeleteEdge) {
{
GQLParser parser;
std::string query = "DELETE EDGE 12345 -> 54321";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DELETE EDGE 123 -> 321,456 -> 654,789 -> 987";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DELETE EDGE 12345 -> 54321 WHERE amount > 3.14";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "DELETE EDGE 123 -> 321,456 -> 654,789 -> 987 WHERE amount > 3.14";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, FetchVertex) {
{
GQLParser parser;
std::string query = "FETCH PROP ON person 1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person 1, 2, 3";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person hash(\"dutor\")";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person hash(\"dutor\"), hash(\"darion\")";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person hash(\"dutor\") "
"YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person hash(\"dutor\"), hash(\"darion\") "
"YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over edu | "
"FETCH PROP ON person $- YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "$var = GO FROM 1 over e1; "
"FETCH PROP ON person $var.id YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person 1,2,3 "
"YIELD DISTINCT person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person uuid(\"dutor\")";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person uuid(\"dutor\"), uuid(\"darion\")";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person uuid(\"dutor\") "
"YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON person uuid(\"dutor\"), uuid(\"darion\") "
"YIELD person.name, person.age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, FetchEdge) {
{
GQLParser parser;
std::string query = "FETCH PROP ON transfer 12345 -> -54321";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FETCH PROP ON transfer 12345 -> -54321 "
"YIELD transfer.time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 12345 OVER transfer "
"YIELD transfer.time";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 12345 OVER transfer "
"YIELD transfer._src AS s, serve._dst AS d | "
"FETCH PROP ON transfer $-.s -> $-.d YIELD transfer.amount";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "$var = GO FROM 12345 OVER transfer "
"YIELD transfer._src AS s, edu._dst AS d; "
"FETCH PROP ON service $var.s -> $var.d YIELD service.amount";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, Find) {
{
GQLParser parser;
std::string query = "FIND name FROM person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FIND name, salary, age FROM person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FIND name, salary, age FROM person WHERE gender == \"man\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "FIND amount, time FROM transfer WHERE amount > 1000";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, AdminOperation) {
{
GQLParser parser;
std::string query = "ADD HOSTS 127.0.0.1:1000, 127.0.0.1:9000";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW HOSTS";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "REMOVE HOSTS 127.0.0.1:1000, 127.0.0.1:9000";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW SPACES";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW TAGS";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW EDGES";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE SPACE default_space";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE TAG person";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW CREATE EDGE e1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, UserOperation) {
{
GQLParser parser;
std::string query = "CREATE USER user1 WITH PASSWORD \"aaa\" ";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "CREATE USER IF NOT EXISTS user1 WITH PASSWORD \"aaa\" , "
"FIRSTNAME \"a\", LASTNAME \"a\", EMAIL \"a\", PHONE \"111\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "ALTER USER user1 WITH FIRSTNAME \"a\","
" LASTNAME \"a\", EMAIL \"a\", PHONE \"111\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "DROP USER user1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "DROP USER IF EXISTS user1";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "CHANGE PASSWORD account FROM \"old password\" TO \"new password\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "GRANT ROLE ADMIN ON spacename TO account";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "REVOKE ROLE ADMIN ON spacename FROM account";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "SHOW ROLES IN spacename";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "SHOW USER account";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
{
GQLParser parser;
std::string query = "SHOW USERS";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
auto& sentence = result.value();
EXPECT_EQ(query, sentence->toString());
}
}
TEST(Parser, UnreservedKeywords) {
{
GQLParser parser;
std::string query = "CREATE TAG tag1(space string, spaces string, "
"email string, password string, roles string)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE EDGE edge1(space string, spaces string, "
"email string, password string, roles string)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 123 OVER guest WHERE $-.EMAIL";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM UUID(\"tom\") OVER guest WHERE $-.EMAIL";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 123 OVER like YIELD $$.tag1.EMAIL, like.users,"
"like._src, like._dst, like.type, $^.tag2.SPACE "
"| ORDER BY $-.SPACE";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM UUID(\"tom\") OVER like YIELD $$.tag1.EMAIL, like.users,"
"like._src, like._dst, like.type, $^.tag2.SPACE "
"| ORDER BY $-.SPACE";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "$var = GO FROM 123 OVER like;GO FROM $var.SPACE OVER like";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "$var = GO FROM UUID(\"tom\") OVER like;GO FROM $var.SPACE OVER like";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, Annotation) {
{
GQLParser parser;
std::string query = "show spaces /* test comment....";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isSyntaxError());
}
{
GQLParser parser;
std::string query = "// test comment....";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isStatementEmpty());
}
{
GQLParser parser;
std::string query = "# test comment....";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isStatementEmpty());
}
{
GQLParser parser;
std::string query = "-- test comment....";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isStatementEmpty());
}
{
GQLParser parser;
std::string query = "/* test comment....*/";
auto result = parser.parse(query);
ASSERT_TRUE(result.status().isStatementEmpty());
}
{
GQLParser parser;
std::string query = "CREATE TAG TAG1(space string) // test....";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG TAG1(space string) -- test....";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG TAG1(space string) # test....";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG TAG1/* tag name */(space string)";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "CREATE TAG TAG1/* tag name */(space string) // test....";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, DownloadAndIngest) {
{
GQLParser parser;
std::string query = "DOWNLOAD HDFS \"hdfs://127.0.0.1:9090/data\"";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "INGEST";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, Agg) {
{
GQLParser parser;
std::string query = "ORDER BY $-.id";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name | "
"ORDER BY name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name | "
"ORDER BY $-.name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name | "
"ORDER BY $-.name ASC";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name | "
"ORDER BY $-.name DESC";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name, friend.age as age | "
"ORDER BY $-.name ASC, $-.age DESC";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name, friend.age as age | "
"ORDER BY name ASC, age DESC";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, ReentrantRecoveryFromFailure) {
GQLParser parser;
{
std::string query = "USE dumy tag_name";
ASSERT_FALSE(parser.parse(query).ok());
}
{
std::string query = "USE space_name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, IllegalCharacter) {
GQLParser parser;
{
std::string query = "USE space;";
ASSERT_FALSE(parser.parse(query).ok());
}
{
std::string query = "USE space_name;USE space";
ASSERT_FALSE(parser.parse(query).ok());
}
}
TEST(Parser, Distinct) {
{
GQLParser parser;
std::string query = "GO FROM 1 over friend "
"YIELD DISTINCT friend.name as name, friend.age as age";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
// syntax error
std::string query = "GO FROM 1 over friend "
"YIELD friend.name as name, DISTINCT friend.age as age";
auto result = parser.parse(query);
ASSERT_TRUE(!result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GO FROM 1 OVER like "
"| GO FROM $-.id OVER like | GO FROM $-.id OVER serve "
"YIELD DISTINCT serve._dst, $$.team.name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, ConfigOperation) {
{
GQLParser parser;
std::string query = "SHOW VARIABLES";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "SHOW VARIABLES GRAPH";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE VARIABLES storage:name=value";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GET VARIABLES Meta:name";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "UPDATE VARIABLES load_data_interval_secs=120";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
{
GQLParser parser;
std::string query = "GET VARIABLES load_data_interval_secs";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
TEST(Parser, BalanceOperation) {
{
GQLParser parser;
std::string query = "BALANCE LEADER";
auto result = parser.parse(query);
ASSERT_TRUE(result.ok()) << result.status();
}
}
} // namespace nebula
| 1 | 18,243 | try an illegal case. and the result is syntax error. | vesoft-inc-nebula | cpp |
@@ -23,6 +23,7 @@ void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
added_label_.Reshape(batch_size_, 1, 1, 1);
data_ = NULL;
labels_ = NULL;
+ needs_reshape_ = false;
added_data_.cpu_data();
added_label_.cpu_data();
} | 1 | #include <vector>
#include "caffe/data_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
namespace caffe {
template <typename Dtype>
void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
batch_size_ = this->layer_param_.memory_data_param().batch_size();
channels_ = this->layer_param_.memory_data_param().channels();
height_ = this->layer_param_.memory_data_param().height();
width_ = this->layer_param_.memory_data_param().width();
size_ = channels_ * height_ * width_;
CHECK_GT(batch_size_ * size_, 0) <<
"batch_size, channels, height, and width must be specified and"
" positive in memory_data_param";
top[0]->Reshape(batch_size_, channels_, height_, width_);
top[1]->Reshape(batch_size_, 1, 1, 1);
added_data_.Reshape(batch_size_, channels_, height_, width_);
added_label_.Reshape(batch_size_, 1, 1, 1);
data_ = NULL;
labels_ = NULL;
added_data_.cpu_data();
added_label_.cpu_data();
}
template <typename Dtype>
void MemoryDataLayer<Dtype>::AddDatumVector(const vector<Datum>& datum_vector) {
CHECK(!has_new_data_) <<
"Can't add Datum when earlier ones haven't been consumed"
<< " by the upper layers";
size_t num = datum_vector.size();
CHECK_GT(num, 0) << "There is no datum to add";
CHECK_LE(num, batch_size_) <<
"The number of added datum must be no greater than the batch size";
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_.Transform(datum_vector, &added_data_);
// Copy Labels
Dtype* top_label = added_label_.mutable_cpu_data();
for (int item_id = 0; item_id < num; ++item_id) {
top_label[item_id] = datum_vector[item_id].label();
}
// num_images == batch_size_
Dtype* top_data = added_data_.mutable_cpu_data();
Reset(top_data, top_label, batch_size_);
has_new_data_ = true;
}
template <typename Dtype>
void MemoryDataLayer<Dtype>::Reset(Dtype* data, Dtype* labels, int n) {
CHECK(data);
CHECK(labels);
CHECK_EQ(n % batch_size_, 0) << "n must be a multiple of batch size";
data_ = data;
labels_ = labels;
n_ = n;
pos_ = 0;
}
template <typename Dtype>
void MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset";
top[0]->set_cpu_data(data_ + pos_ * size_);
top[1]->set_cpu_data(labels_ + pos_);
pos_ = (pos_ + batch_size_) % n_;
has_new_data_ = false;
}
INSTANTIATE_CLASS(MemoryDataLayer);
REGISTER_LAYER_CLASS(MEMORY_DATA, MemoryDataLayer);
} // namespace caffe
| 1 | 31,165 | Initialize `needs_reshape_` with true and call `Reshape` method | BVLC-caffe | cpp |
@@ -936,7 +936,10 @@ EOM
# @private
RANDOM_ORDERING = lambda do |list|
Kernel.srand RSpec.configuration.seed
- ordering = list.sort_by { Kernel.rand(list.size) }
+ orders = list.map {|x| !(x.respond_to?(:order)) || x.order.nil? || x.order == :random ? Kernel.rand(list.size) : 1}
+ zipped = list.zip(orders)
+
+ ordering = zipped.sort_by { |x| x[1] }.map { |x| x.first }
Kernel.srand # reset random generation
ordering
end | 1 | require 'fileutils'
require 'rspec/core/backtrace_cleaner'
require 'rspec/core/ruby_project'
module RSpec
module Core
# Stores runtime configuration information.
#
# Configuration options are loaded from `~/.rspec`, `.rspec`,
# `.rspec-local`, command line switches, and the `SPEC_OPTS` environment
# variable (listed in lowest to highest precedence; for example, an option
# in `~/.rspec` can be overridden by an option in `.rspec-local`).
#
# @example Standard settings
# RSpec.configure do |c|
# c.drb = true
# c.drb_port = 1234
# c.default_path = 'behavior'
# end
#
# @example Hooks
# RSpec.configure do |c|
# c.before(:suite) { establish_connection }
# c.before(:each) { log_in_as :authorized }
# c.around(:each) { |ex| Database.transaction(&ex) }
# end
#
# @see RSpec.configure
# @see Hooks
class Configuration
include RSpec::Core::Hooks
class MustBeConfiguredBeforeExampleGroupsError < StandardError; end
# @private
def self.define_reader(name)
define_method(name) do
variable = instance_variable_defined?("@#{name}") ? instance_variable_get("@#{name}") : nil
value_for(name, variable)
end
end
# @private
def self.deprecate_alias_key
RSpec.warn_deprecation <<-MESSAGE
The :alias option to add_setting is deprecated. Use :alias_with on the original setting instead.
Called from #{caller(0)[5]}
MESSAGE
end
# @private
def self.define_aliases(name, alias_name)
alias_method alias_name, name
alias_method "#{alias_name}=", "#{name}="
define_predicate_for alias_name
end
# @private
def self.define_predicate_for(*names)
names.each {|name| alias_method "#{name}?", name}
end
# @private
#
# Invoked by the `add_setting` instance method. Use that method on a
# `Configuration` instance rather than this class method.
def self.add_setting(name, opts={})
raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default)
if opts[:alias]
deprecate_alias_key
define_aliases(opts[:alias], name)
else
attr_writer name
define_reader name
define_predicate_for name
end
[opts[:alias_with]].flatten.compact.each do |alias_name|
define_aliases(name, alias_name)
end
end
# @macro [attach] add_setting
# @attribute $1
# Path to use if no path is provided to the `rspec` command (default:
# `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to
# run all the examples in the `spec` directory.
add_setting :default_path
# Run examples over DRb (default: `false`). RSpec doesn't supply the DRb
# server, but you can use tools like spork.
add_setting :drb
# The drb_port (default: nil).
add_setting :drb_port
# Default: `$stderr`.
add_setting :error_stream
# Clean up and exit after the first failure (default: `false`).
add_setting :fail_fast
# The exit code to return if there are any failures (default: 1).
add_setting :failure_exit_code
# Determines the order in which examples are run (default: OS standard
# load order for files, declaration order for groups and examples).
define_reader :order
# Indicates files configured to be required
define_reader :requires
# Returns dirs that have been prepended to the load path by #lib=
define_reader :libs
# Default: `$stdout`.
# Also known as `output` and `out`
add_setting :output_stream, :alias_with => [:output, :out]
# Load files matching this pattern (default: `'**/*_spec.rb'`)
add_setting :pattern, :alias_with => :filename_pattern
# Report the times for the slowest examples (default: `false`).
# Use this to specify the number of examples to include in the profile.
add_setting :profile_examples
# Run all examples if none match the configured filters (default: `false`).
add_setting :run_all_when_everything_filtered
# Allow user to configure their own success/pending/failure colors
# @param [Symbol] should be one of the following: [:black, :white, :red, :green, :yellow, :blue, :magenta, :cyan]
add_setting :success_color
add_setting :pending_color
add_setting :failure_color
add_setting :default_color
add_setting :fixed_color
add_setting :detail_color
# Seed for random ordering (default: generated randomly each run).
#
# When you run specs with `--order random`, RSpec generates a random seed
# for the randomization and prints it to the `output_stream` (assuming
# you're using RSpec's built-in formatters). If you discover an ordering
# dependency (i.e. examples fail intermittently depending on order), set
# this (on Configuration or on the command line with `--seed`) to run
# using the same seed while you debug the issue.
#
# We recommend, actually, that you use the command line approach so you
# don't accidentally leave the seed encoded.
define_reader :seed
# When a block passed to pending fails (as expected), display the failure
# without reporting it as a failure (default: false).
add_setting :show_failures_in_pending_blocks
# Convert symbols to hashes with the symbol as a key with a value of
# `true` (default: false).
#
# This allows you to tag a group or example like this:
#
# describe "something slow", :slow do
# # ...
# end
#
# ... instead of having to type:
#
# describe "something slow", :slow => true do
# # ...
# end
add_setting :treat_symbols_as_metadata_keys_with_true_values
# @private
add_setting :tty
# @private
add_setting :include_or_extend_modules
# @private
add_setting :files_to_run
# @private
add_setting :expecting_with_rspec
# @private
attr_accessor :filter_manager
attr_reader :backtrace_cleaner
def initialize
@expectation_frameworks = []
@include_or_extend_modules = []
@mock_framework = nil
@files_to_run = []
@formatters = []
@color = false
@pattern = '**/*_spec.rb'
@failure_exit_code = 1
@backtrace_cleaner = BacktraceCleaner.new
@default_path = 'spec'
@filter_manager = FilterManager.new
@preferred_options = {}
@seed = srand % 0xFFFF
@failure_color = :red
@success_color = :green
@pending_color = :yellow
@default_color = :white
@fixed_color = :blue
@detail_color = :cyan
@profile_examples = false
@requires = []
@libs = []
end
# @private
#
# Used to set higher priority option values from the command line.
def force(hash)
if hash.has_key?(:seed)
hash[:order], hash[:seed] = order_and_seed_from_seed(hash[:seed])
elsif hash.has_key?(:order)
set_order_and_seed(hash)
end
@preferred_options.merge!(hash)
self.warnings = value_for :warnings, nil
end
# @private
def reset
@reporter = nil
@formatters.clear
end
# @overload add_setting(name)
# @overload add_setting(name, opts)
# @option opts [Symbol] :default
#
# set a default value for the generated getter and predicate methods:
#
# add_setting(:foo, :default => "default value")
#
# @option opts [Symbol] :alias_with
#
# Use `:alias_with` to alias the setter, getter, and predicate to another
# name, or names:
#
# add_setting(:foo, :alias_with => :bar)
# add_setting(:foo, :alias_with => [:bar, :baz])
#
# Adds a custom setting to the RSpec.configuration object.
#
# RSpec.configuration.add_setting :foo
#
# Used internally and by extension frameworks like rspec-rails, so they
# can add config settings that are domain specific. For example:
#
# RSpec.configure do |c|
# c.add_setting :use_transactional_fixtures,
# :default => true,
# :alias_with => :use_transactional_examples
# end
#
# `add_setting` creates three methods on the configuration object, a
# setter, a getter, and a predicate:
#
# RSpec.configuration.foo=(value)
# RSpec.configuration.foo
# RSpec.configuration.foo? # returns true if foo returns anything but nil or false
def add_setting(name, opts={})
default = opts.delete(:default)
(class << self; self; end).class_eval do
add_setting(name, opts)
end
send("#{name}=", default) if default
end
# Returns the configured mock framework adapter module
def mock_framework
mock_with :rspec unless @mock_framework
@mock_framework
end
# Delegates to mock_framework=(framework)
def mock_framework=(framework)
mock_with framework
end
# The patterns to discard from backtraces. Deprecated, use
# Configuration#backtrace_exclusion_patterns instead
#
# Defaults to RSpec::Core::BacktraceCleaner::DEFAULT_EXCLUSION_PATTERNS
#
# One can replace the list by using the setter or modify it through the
# getter
#
# To override this behaviour and display a full backtrace, use
# `--backtrace`on the command line, in a `.rspec` file, or in the
# `rspec_options` attribute of RSpec's rake task.
def backtrace_clean_patterns
RSpec.deprecate("RSpec::Core::Configuration#backtrace_clean_patterns",
"RSpec::Core::Configuration#backtrace_exclusion_patterns")
@backtrace_cleaner.exclusion_patterns
end
def backtrace_clean_patterns=(patterns)
RSpec.deprecate("RSpec::Core::Configuration#backtrace_clean_patterns",
"RSpec::Core::Configuration#backtrace_exclusion_patterns")
@backtrace_cleaner.exclusion_patterns = patterns
end
# The patterns to always include to backtraces.
#
# Defaults to [Regexp.new Dir.getwd] if the current working directory
# matches any of the exclusion patterns. Otherwise it defaults to empty.
#
# One can replace the list by using the setter or modify it through the
# getter
def backtrace_inclusion_patterns
@backtrace_cleaner.inclusion_patterns
end
def backtrace_inclusion_patterns=(patterns)
@backtrace_cleaner.inclusion_patterns = patterns
end
# The patterns to discard from backtraces.
#
# Defaults to RSpec::Core::BacktraceCleaner::DEFAULT_EXCLUSION_PATTERNS
#
# One can replace the list by using the setter or modify it through the
# getter
#
# To override this behaviour and display a full backtrace, use
# `--backtrace`on the command line, in a `.rspec` file, or in the
# `rspec_options` attribute of RSpec's rake task.
def backtrace_exclusion_patterns
@backtrace_cleaner.exclusion_patterns
end
def backtrace_exclusion_patterns=(patterns)
@backtrace_cleaner.exclusion_patterns = patterns
end
# Sets the mock framework adapter module.
#
# `framework` can be a Symbol or a Module.
#
# Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the
# named framework.
#
# Given `:nothing`, configures no framework. Use this if you don't use
# any mocking framework to save a little bit of overhead.
#
# Given a Module, includes that module in every example group. The module
# should adhere to RSpec's mock framework adapter API:
#
# setup_mocks_for_rspec
# - called before each example
#
# verify_mocks_for_rspec
# - called after each example. Framework should raise an exception
# when expectations fail
#
# teardown_mocks_for_rspec
# - called after verify_mocks_for_rspec (even if there are errors)
#
# If the module responds to `configuration` and `mock_with` receives a block,
# it will yield the configuration object to the block e.g.
#
# config.mock_with OtherMockFrameworkAdapter do |mod_config|
# mod_config.custom_setting = true
# end
def mock_with(framework)
framework_module = case framework
when Module
framework
when String, Symbol
require case framework.to_s
when /rspec/i
'rspec/core/mocking/with_rspec'
when /mocha/i
'rspec/core/mocking/with_mocha'
when /rr/i
'rspec/core/mocking/with_rr'
when /flexmock/i
'rspec/core/mocking/with_flexmock'
else
'rspec/core/mocking/with_absolutely_nothing'
end
RSpec::Core::MockFrameworkAdapter
end
new_name, old_name = [framework_module, @mock_framework].map do |mod|
mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed
end
unless new_name == old_name
assert_no_example_groups_defined(:mock_framework)
end
if block_given?
raise "#{framework_module} must respond to `configuration` so that mock_with can yield it." unless framework_module.respond_to?(:configuration)
yield framework_module.configuration
end
@mock_framework = framework_module
end
# Returns the configured expectation framework adapter module(s)
def expectation_frameworks
expect_with :rspec if @expectation_frameworks.empty?
@expectation_frameworks
end
# Delegates to expect_with(framework)
def expectation_framework=(framework)
expect_with(framework)
end
# Sets the expectation framework module(s) to be included in each example
# group.
#
# `frameworks` can be `:rspec`, `:stdlib`, a custom module, or any
# combination thereof:
#
# config.expect_with :rspec
# config.expect_with :stdlib
# config.expect_with :rspec, :stdlib
# config.expect_with OtherExpectationFramework
#
# RSpec will translate `:rspec` and `:stdlib` into the appropriate
# modules.
#
# ## Configuration
#
# If the module responds to `configuration`, `expect_with` will
# yield the `configuration` object if given a block:
#
# config.expect_with OtherExpectationFramework do |custom_config|
# custom_config.custom_setting = true
# end
def expect_with(*frameworks)
modules = frameworks.map do |framework|
case framework
when Module
framework
when :rspec
require 'rspec/expectations'
self.expecting_with_rspec = true
::RSpec::Matchers
when :stdlib
require 'test/unit/assertions'
::Test::Unit::Assertions
else
raise ArgumentError, "#{framework.inspect} is not supported"
end
end
if (modules - @expectation_frameworks).any?
assert_no_example_groups_defined(:expect_with)
end
if block_given?
raise "expect_with only accepts a block with a single argument. Call expect_with #{modules.length} times, once with each argument, instead." if modules.length > 1
raise "#{modules.first} must respond to `configuration` so that expect_with can yield it." unless modules.first.respond_to?(:configuration)
yield modules.first.configuration
end
@expectation_frameworks.push(*modules)
end
def full_backtrace?
@backtrace_cleaner.full_backtrace?
end
def full_backtrace=(true_or_false)
@backtrace_cleaner.full_backtrace = true_or_false
end
def color(output=output_stream)
# rspec's built-in formatters all call this with the output argument,
# but defaulting to output_stream for backward compatibility with
# formatters in extension libs
return false unless output_to_tty?(output)
value_for(:color, @color)
end
def color=(bool)
if bool
if RSpec.windows_os? and not ENV['ANSICON']
warn "You must use ANSICON 1.31 or later (http://adoxa.3eeweb.com/ansicon/) to use colour on Windows"
@color = false
else
@color = true
end
end
end
# TODO - deprecate color_enabled - probably not until the last 2.x
# release before 3.0
alias_method :color_enabled, :color
alias_method :color_enabled=, :color=
define_predicate_for :color_enabled, :color
def libs=(libs)
libs.map do |lib|
@libs.unshift lib
$LOAD_PATH.unshift lib
end
end
def requires=(paths)
RSpec.deprecate("RSpec::Core::Configuration#requires=(paths)",
"paths.each {|path| require path}")
paths.map {|path| require path}
@requires += paths
end
def debug=(bool)
return unless bool
begin
require 'ruby-debug'
Debugger.start
rescue LoadError => e
raise <<-EOM
#{'*'*50}
#{e.message}
If you have it installed as a ruby gem, then you need to either require
'rubygems' or configure the RUBYOPT environment variable with the value
'rubygems'.
#{e.backtrace.join("\n")}
#{'*'*50}
EOM
end
end
def debug?
!!defined?(Debugger)
end
# Run examples defined on `line_numbers` in all files to run.
def line_numbers=(line_numbers)
filter_run :line_numbers => line_numbers.map{|l| l.to_i}
end
def line_numbers
filter.fetch(:line_numbers,[])
end
def full_description=(description)
filter_run :full_description => Regexp.union(*Array(description).map {|d| Regexp.new(d) })
end
def full_description
filter.fetch :full_description, nil
end
# @overload add_formatter(formatter)
#
# Adds a formatter to the formatters collection. `formatter` can be a
# string representing any of the built-in formatters (see
# `built_in_formatter`), or a custom formatter class.
#
# ### Note
#
# For internal purposes, `add_formatter` also accepts the name of a class
# and path to a file that contains that class definition, but you should
# consider that a private api that may change at any time without notice.
def add_formatter(formatter_to_use, path=nil)
formatter_class =
built_in_formatter(formatter_to_use) ||
custom_formatter(formatter_to_use) ||
(raise ArgumentError, "Formatter '#{formatter_to_use}' unknown - maybe you meant 'documentation' or 'progress'?.")
formatters << formatter_class.new(path ? file_at(path) : output)
end
alias_method :formatter=, :add_formatter
def formatters
@formatters ||= []
end
def reporter
@reporter ||= begin
add_formatter('progress') if formatters.empty?
Reporter.new(*formatters)
end
end
# @api private
#
# Defaults `profile_examples` to 10 examples when `@profile_examples` is `true`.
#
def profile_examples
profile = value_for(:profile_examples, @profile_examples)
if profile && !profile.is_a?(Integer)
10
else
profile
end
end
# @private
def files_or_directories_to_run=(*files)
files = files.flatten
files << default_path if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty?
self.files_to_run = get_files_to_run(files)
end
# Creates a method that delegates to `example` including the submitted
# `args`. Used internally to add variants of `example` like `pending`:
#
# @example
# alias_example_to :pending, :pending => true
#
# # This lets you do this:
#
# describe Thing do
# pending "does something" do
# thing = Thing.new
# end
# end
#
# # ... which is the equivalent of
#
# describe Thing do
# it "does something", :pending => true do
# thing = Thing.new
# end
# end
def alias_example_to(new_name, *args)
extra_options = build_metadata_hash_from(args)
RSpec::Core::ExampleGroup.alias_example_to(new_name, extra_options)
end
# Define an alias for it_should_behave_like that allows different
# language (like "it_has_behavior" or "it_behaves_like") to be
# employed when including shared examples.
#
# Example:
#
# alias_it_behaves_like_to(:it_has_behavior, 'has behavior:')
#
# allows the user to include a shared example group like:
#
# describe Entity do
# it_has_behavior 'sortability' do
# let(:sortable) { Entity.new }
# end
# end
#
# which is reported in the output as:
#
# Entity
# has behavior: sortability
# # sortability examples here
def alias_it_behaves_like_to(new_name, report_label = '')
RSpec::Core::ExampleGroup.alias_it_behaves_like_to(new_name, report_label)
end
alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to
# Adds key/value pairs to the `inclusion_filter`. If the
# `treat_symbols_as_metadata_keys_with_true_values` config option is set
# to true and `args` includes any symbols that are not part of a hash,
# each symbol is treated as a key in the hash with the value `true`.
#
# ### Note
#
# Filters set using this method can be overridden from the command line
# or config files (e.g. `.rspec`).
#
# @example
# # given this declaration
# describe "something", :foo => 'bar' do
# # ...
# end
#
# # any of the following will include that group
# config.filter_run_including :foo => 'bar'
# config.filter_run_including :foo => /^ba/
# config.filter_run_including :foo => lambda {|v| v == 'bar'}
# config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g.
# config.filter_run_including :foo => lambda {|v| v == 'bar'}
#
# # given a proc with an arity of 2, the lambda is passed the value related to the key,
# # and the metadata itself e.g.
# config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # with treat_symbols_as_metadata_keys_with_true_values = true
# filter_run_including :foo # same as filter_run_including :foo => true
def filter_run_including(*args)
filter_manager.include_with_low_priority build_metadata_hash_from(args)
end
alias_method :filter_run, :filter_run_including
# Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't
# want any inclusion filter at all.
#
# ### Warning
#
# This overrides any inclusion filters/tags set on the command line or in
# configuration files.
def inclusion_filter=(filter)
filter_manager.include! build_metadata_hash_from([filter])
end
alias_method :filter=, :inclusion_filter=
# Returns the `inclusion_filter`. If none has been set, returns an empty
# hash.
def inclusion_filter
filter_manager.inclusions
end
alias_method :filter, :inclusion_filter
# Adds key/value pairs to the `exclusion_filter`. If the
# `treat_symbols_as_metadata_keys_with_true_values` config option is set
# to true and `args` excludes any symbols that are not part of a hash,
# each symbol is treated as a key in the hash with the value `true`.
#
# ### Note
#
# Filters set using this method can be overridden from the command line
# or config files (e.g. `.rspec`).
#
# @example
# # given this declaration
# describe "something", :foo => 'bar' do
# # ...
# end
#
# # any of the following will exclude that group
# config.filter_run_excluding :foo => 'bar'
# config.filter_run_excluding :foo => /^ba/
# config.filter_run_excluding :foo => lambda {|v| v == 'bar'}
# config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g.
# config.filter_run_excluding :foo => lambda {|v| v == 'bar'}
#
# # given a proc with an arity of 2, the lambda is passed the value related to the key,
# # and the metadata itself e.g.
# config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # with treat_symbols_as_metadata_keys_with_true_values = true
# filter_run_excluding :foo # same as filter_run_excluding :foo => true
def filter_run_excluding(*args)
filter_manager.exclude_with_low_priority build_metadata_hash_from(args)
end
# Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't
# want any exclusion filter at all.
#
# ### Warning
#
# This overrides any exclusion filters/tags set on the command line or in
# configuration files.
def exclusion_filter=(filter)
filter_manager.exclude! build_metadata_hash_from([filter])
end
# Returns the `exclusion_filter`. If none has been set, returns an empty
# hash.
def exclusion_filter
filter_manager.exclusions
end
# Tells RSpec to include `mod` in example groups. Methods defined in
# `mod` are exposed to examples (not example groups). Use `filters` to
# constrain the groups in which to include the module.
#
# @example
#
# module AuthenticationHelpers
# def login_as(user)
# # ...
# end
# end
#
# module UserHelpers
# def users(username)
# # ...
# end
# end
#
# RSpec.configure do |config|
# config.include(UserHelpers) # included in all modules
# config.include(AuthenticationHelpers, :type => :request)
# end
#
# describe "edit profile", :type => :request do
# it "can be viewed by owning user" do
# login_as users(:jdoe)
# get "/profiles/jdoe"
# assert_select ".username", :text => 'jdoe'
# end
# end
#
# @see #extend
def include(mod, *filters)
include_or_extend_modules << [:include, mod, build_metadata_hash_from(filters)]
end
# Tells RSpec to extend example groups with `mod`. Methods defined in
# `mod` are exposed to example groups (not examples). Use `filters` to
# constrain the groups to extend.
#
# Similar to `include`, but behavior is added to example groups, which
# are classes, rather than the examples, which are instances of those
# classes.
#
# @example
#
# module UiHelpers
# def run_in_browser
# # ...
# end
# end
#
# RSpec.configure do |config|
# config.extend(UiHelpers, :type => :request)
# end
#
# describe "edit profile", :type => :request do
# run_in_browser
#
# it "does stuff in the client" do
# # ...
# end
# end
#
# @see #include
def extend(mod, *filters)
include_or_extend_modules << [:extend, mod, build_metadata_hash_from(filters)]
end
# @private
#
# Used internally to extend a group with modules using `include` and/or
# `extend`.
def configure_group(group)
include_or_extend_modules.each do |include_or_extend, mod, filters|
next unless filters.empty? || group.any_apply?(filters)
send("safe_#{include_or_extend}", mod, group)
end
end
# @private
def safe_include(mod, host)
host.send(:include,mod) unless host < mod
end
# @private
def setup_load_path_and_require(paths)
directories = ['lib', default_path].select { |p| File.directory? p }
RSpec::Core::RubyProject.add_to_load_path(*directories)
paths.each {|path| require path}
@requires += paths
end
# @private
if RUBY_VERSION.to_f >= 1.9
def safe_extend(mod, host)
host.extend(mod) unless (class << host; self; end) < mod
end
else
def safe_extend(mod, host)
host.extend(mod) unless (class << host; self; end).included_modules.include?(mod)
end
end
# @private
def configure_mock_framework
RSpec::Core::ExampleGroup.send(:include, mock_framework)
end
# @private
def configure_expectation_framework
expectation_frameworks.each do |framework|
RSpec::Core::ExampleGroup.send(:include, framework)
end
end
# @private
def load_spec_files
files_to_run.uniq.each {|f| load File.expand_path(f) }
raise_if_rspec_1_is_loaded
end
# @private
DEFAULT_FORMATTER = lambda { |string| string }
# Formats the docstring output using the block provided.
#
# @example
# # This will strip the descriptions of both examples and example groups.
# RSpec.configure do |config|
# config.format_docstrings { |s| s.strip }
# end
def format_docstrings(&block)
@format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER
end
# @private
def format_docstrings_block
@format_docstrings_block ||= DEFAULT_FORMATTER
end
# @api
#
# Sets the seed value and sets `order='rand'`
def seed=(seed)
order_and_seed_from_seed(seed)
end
# @api
#
# Sets the order and, if order is `'rand:<seed>'`, also sets the seed.
def order=(type)
order_and_seed_from_order(type)
end
def randomize?
order.to_s.match(/rand/)
end
# @private
DEFAULT_ORDERING = lambda { |list| list }
# @private
RANDOM_ORDERING = lambda do |list|
Kernel.srand RSpec.configuration.seed
ordering = list.sort_by { Kernel.rand(list.size) }
Kernel.srand # reset random generation
ordering
end
# Sets a strategy by which to order examples.
#
# @example
# RSpec.configure do |config|
# config.order_examples do |examples|
# examples.reverse
# end
# end
#
# @see #order_groups
# @see #order_groups_and_examples
# @see #order=
# @see #seed=
def order_examples(&block)
@example_ordering_block = block
@order = "custom" unless built_in_orderer?(block)
end
# @private
def example_ordering_block
@example_ordering_block ||= DEFAULT_ORDERING
end
# Sets a strategy by which to order groups.
#
# @example
# RSpec.configure do |config|
# config.order_groups do |groups|
# groups.reverse
# end
# end
#
# @see #order_examples
# @see #order_groups_and_examples
# @see #order=
# @see #seed=
def order_groups(&block)
@group_ordering_block = block
@order = "custom" unless built_in_orderer?(block)
end
# @private
def group_ordering_block
@group_ordering_block ||= DEFAULT_ORDERING
end
# Sets a strategy by which to order groups and examples.
#
# @example
# RSpec.configure do |config|
# config.order_groups_and_examples do |groups_or_examples|
# groups_or_examples.reverse
# end
# end
#
# @see #order_groups
# @see #order_examples
# @see #order=
# @see #seed=
def order_groups_and_examples(&block)
order_groups(&block)
order_examples(&block)
end
# Set Ruby warnings on or off
def warnings= value
$VERBOSE = !!value
end
def warnings
$VERBOSE
end
private
def get_files_to_run(paths)
paths.map do |path|
path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR
File.directory?(path) ? gather_directories(path) : extract_location(path)
end.flatten.sort
end
def gather_directories(path)
stripped = "{#{pattern.gsub(/\s*,\s*/, ',')}}"
files = pattern =~ /^#{Regexp.escape path}/ ? Dir[stripped] : Dir["#{path}/#{stripped}"]
files.sort
end
def extract_location(path)
if path =~ /^(.*?)((?:\:\d+)+)$/
path, lines = $1, $2[1..-1].split(":").map{|n| n.to_i}
filter_manager.add_location path, lines
end
path
end
def command
$0.split(File::SEPARATOR).last
end
def value_for(key, default=nil)
@preferred_options.has_key?(key) ? @preferred_options[key] : default
end
def assert_no_example_groups_defined(config_option)
if RSpec.world.example_groups.any?
raise MustBeConfiguredBeforeExampleGroupsError.new(
"RSpec's #{config_option} configuration option must be configured before " +
"any example groups are defined, but you have already defined a group."
)
end
end
def raise_if_rspec_1_is_loaded
if defined?(Spec) && defined?(Spec::VERSION::MAJOR) && Spec::VERSION::MAJOR == 1
raise <<-MESSAGE
#{'*'*80}
You are running rspec-2, but it seems as though rspec-1 has been loaded as
well. This is likely due to a statement like this somewhere in the specs:
require 'spec'
Please locate that statement, remove it, and try again.
#{'*'*80}
MESSAGE
end
end
def output_to_tty?(output=output_stream)
tty? || (output.respond_to?(:tty?) && output.tty?)
end
def built_in_formatter(key)
case key.to_s
when 'd', 'doc', 'documentation', 's', 'n', 'spec', 'nested'
require 'rspec/core/formatters/documentation_formatter'
RSpec::Core::Formatters::DocumentationFormatter
when 'h', 'html'
require 'rspec/core/formatters/html_formatter'
RSpec::Core::Formatters::HtmlFormatter
when 't', 'textmate'
require 'rspec/core/formatters/text_mate_formatter'
RSpec::Core::Formatters::TextMateFormatter
when 'p', 'progress'
require 'rspec/core/formatters/progress_formatter'
RSpec::Core::Formatters::ProgressFormatter
when 'j', 'json'
require 'rspec/core/formatters/json_formatter'
RSpec::Core::Formatters::JsonFormatter
end
end
def custom_formatter(formatter_ref)
if Class === formatter_ref
formatter_ref
elsif string_const?(formatter_ref)
begin
eval(formatter_ref)
rescue NameError
require path_for(formatter_ref)
eval(formatter_ref)
end
end
end
def string_const?(str)
str.is_a?(String) && /\A[A-Z][a-zA-Z0-9_:]*\z/ =~ str
end
def path_for(const_ref)
underscore_with_fix_for_non_standard_rspec_naming(const_ref)
end
def underscore_with_fix_for_non_standard_rspec_naming(string)
underscore(string).sub(%r{(^|/)r_spec($|/)}, '\\1rspec\\2')
end
# activesupport/lib/active_support/inflector/methods.rb, line 48
def underscore(camel_cased_word)
word = camel_cased_word.to_s.dup
word.gsub!(/::/, '/')
word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2')
word.gsub!(/([a-z\d])([A-Z])/,'\1_\2')
word.tr!("-", "_")
word.downcase!
word
end
def file_at(path)
FileUtils.mkdir_p(File.dirname(path))
File.new(path, 'w')
end
def order_and_seed_from_seed(value)
order_groups_and_examples(&RANDOM_ORDERING)
@order, @seed = 'rand', value.to_i
[@order, @seed]
end
def set_order_and_seed(hash)
hash[:order], seed = order_and_seed_from_order(hash[:order])
hash[:seed] = seed if seed
end
def order_and_seed_from_order(type)
order, seed = type.to_s.split(':')
@order = order
@seed = seed = seed.to_i if seed
if randomize?
order_groups_and_examples(&RANDOM_ORDERING)
elsif order == 'default'
@order, @seed = nil, nil
order_groups_and_examples(&DEFAULT_ORDERING)
end
return order, seed
end
def built_in_orderer?(block)
[DEFAULT_ORDERING, RANDOM_ORDERING].include?(block)
end
end
end
end
| 1 | 9,151 | This is a _very_ complicated line. I'm not a fan of ternaries to begin with (although, I allow them in very simple situations), but this ternary has compound conditionals and would really need to be broken up. That said: if I'm reading this right, it sounds like this logic can make it where this lambda (which is called `RANDOM_ORDERING`) can make it not random and basically do the opposite of what it says. In general, it's good to avoid making methods or lambdas do the opposite of what they say. | rspec-rspec-core | rb |
@@ -52,7 +52,8 @@ if (TYPO3_MODE == 'BE') {
'',
[
// An array holding the controller-action-combinations that are accessible
- 'Administration' => 'index,setSite,setCore,noSiteAvailable'
+ 'Administration' => 'index,setSite,setCore,noSiteAvailable',
+ 'Backend\\Web\\Info\\ApacheSolrDocument' => 'index'
],
[
'access' => 'admin', | 1 | <?php
if (!defined('TYPO3_MODE')) {
die('Access denied.');
}
# ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- #
// add search plugin to content element wizard
if (TYPO3_MODE == 'BE') {
$TBE_MODULES_EXT['xMOD_db_new_content_el']['addElClasses']['ApacheSolrForTypo3\\Solr\\Backend\\ContentElementWizardIconProvider'] =
\TYPO3\CMS\Core\Utility\ExtensionManagementUtility::extPath($_EXTKEY) . 'Classes/Backend/ContentElementWizardIconProvider.php';
}
# ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- #
$extIconPath = 'EXT:solr/Resources/Public/Images/Icons/';
if (TYPO3_MODE === 'BE') {
$modulePrefix = 'extensions-solr-module';
$bitmapProvider = \TYPO3\CMS\Core\Imaging\IconProvider\BitmapIconProvider::class;
$svgProvider = \TYPO3\CMS\Core\Imaging\IconProvider\SvgIconProvider::class;
// register all module icons with extensions-solr-module-modulename
$iconRegistry = \TYPO3\CMS\Core\Utility\GeneralUtility::makeInstance(\TYPO3\CMS\Core\Imaging\IconRegistry::class);
$iconRegistry->registerIcon($modulePrefix . '-administration', $svgProvider,
['source' => $extIconPath . 'ModuleAdministration.svg']);
$iconRegistry->registerIcon($modulePrefix . '-overview', $bitmapProvider,
['source' => $extIconPath . 'Search.png']);
$iconRegistry->registerIcon($modulePrefix . '-indexqueue', $bitmapProvider,
['source' => $extIconPath . 'IndexQueue.png']);
$iconRegistry->registerIcon($modulePrefix . '-indexmaintenance', $bitmapProvider,
['source' => $extIconPath . 'IndexMaintenance.png']);
$iconRegistry->registerIcon($modulePrefix . '-indexfields', $bitmapProvider,
['source' => $extIconPath . 'IndexFields.png']);
$iconRegistry->registerIcon($modulePrefix . '-stopwords', $bitmapProvider,
['source' => $extIconPath . 'StopWords.png']);
$iconRegistry->registerIcon($modulePrefix . '-synonyms', $bitmapProvider,
['source' => $extIconPath . 'Synonyms.png']);
$iconRegistry->registerIcon($modulePrefix . '-searchstatistics', $bitmapProvider,
['source' => $extIconPath . 'SearchStatistics.png']);
$iconRegistry->registerIcon($modulePrefix . '-initsolrconnections', $svgProvider,
['source' => $extIconPath . 'InitSolrConnections.svg']);
// register plugin icon
$iconRegistry->registerIcon('extensions-solr-plugin-contentelement', $svgProvider,
['source' => $extIconPath . 'ContentElement.svg']);
}
if (TYPO3_MODE == 'BE') {
\TYPO3\CMS\Extbase\Utility\ExtensionUtility::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'tools',
'administration',
'',
[
// An array holding the controller-action-combinations that are accessible
'Administration' => 'index,setSite,setCore,noSiteAvailable'
],
[
'access' => 'admin',
'icon' => 'EXT:solr/Resources/Public/Images/Icons/ModuleAdministration.svg',
'labels' => 'LLL:EXT:' . $_EXTKEY . '/Resources/Private/Language/locallang.xlf',
]
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'Overview',
['index']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'IndexQueue',
['index,initializeIndexQueue,resetLogErrors,clearIndexQueue']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'IndexMaintenance',
['index,cleanUpIndex,emptyIndex,reloadIndexConfiguration']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'IndexFields',
['index']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'SearchStatistics',
['index']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'StopWords',
['index,saveStopWords']
);
ApacheSolrForTypo3\Solr\Backend\SolrModule\AdministrationModuleManager::registerModule(
'ApacheSolrForTypo3.' . $_EXTKEY,
'Synonyms',
['index,addSynonyms,deleteSynonyms']
);
// registering reports
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['reports']['tx_reports']['status']['providers']['solr'] = [
\ApacheSolrForTypo3\Solr\Report\SchemaStatus::class,
\ApacheSolrForTypo3\Solr\Report\SolrConfigStatus::class,
\ApacheSolrForTypo3\Solr\Report\SolrConfigurationStatus::class,
\ApacheSolrForTypo3\Solr\Report\SolrStatus::class,
\ApacheSolrForTypo3\Solr\Report\SolrVersionStatus::class,
\ApacheSolrForTypo3\Solr\Report\AccessFilterPluginInstalledStatus::class,
\ApacheSolrForTypo3\Solr\Report\AllowUrlFOpenStatus::class,
\ApacheSolrForTypo3\Solr\Report\FilterVarStatus::class
];
// Index Inspector
\TYPO3\CMS\Core\Utility\ExtensionManagementUtility::insertModuleFunction(
'web_info',
\ApacheSolrForTypo3\Solr\Backend\IndexInspector\IndexInspector::class,
null,
'LLL:EXT:solr/Resources/Private/Language/locallang.xlf:module_indexinspector'
);
// register Clear Cache Menu hook
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['additionalBackendItems']['cacheActions']['clearSolrConnectionCache'] = \ApacheSolrForTypo3\Solr\ConnectionManager::class;
}
if ((TYPO3_MODE === 'BE') || (TYPO3_MODE === 'FE' && isset($_POST['TSFE_EDIT']))) {
// the order of registering the garbage collector and the record monitor is important!
// for certain scenarios items must be removed by GC first, and then be re-added to to Index Queue
// hooking into TCE Main to monitor record updates that may require deleting documents from the index
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['t3lib/class.t3lib_tcemain.php']['processCmdmapClass'][] = \ApacheSolrForTypo3\Solr\GarbageCollector::class;
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['t3lib/class.t3lib_tcemain.php']['processDatamapClass'][] = \ApacheSolrForTypo3\Solr\GarbageCollector::class;
// hooking into TCE Main to monitor record updates that may require reindexing by the index queue
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['t3lib/class.t3lib_tcemain.php']['processCmdmapClass'][] = \ApacheSolrForTypo3\Solr\IndexQueue\RecordMonitor::class;
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['t3lib/class.t3lib_tcemain.php']['processDatamapClass'][] = \ApacheSolrForTypo3\Solr\IndexQueue\RecordMonitor::class;
}
# ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- #
// register click menu item to initialize the Solr connections for a single site
// visible for admin users only
\TYPO3\CMS\Core\Utility\ExtensionManagementUtility::addUserTSConfig('
[adminUser = 1]
options.contextMenu.table.pages.items.850 = ITEM
options.contextMenu.table.pages.items.850 {
name = Tx_Solr_initializeSolrConnections
label = Initialize Solr Connections
iconName = extensions-solr-module-initsolrconnections
displayCondition = getRecord|is_siteroot = 1
callbackAction = initializeSolrConnections
}
options.contextMenu.table.pages.items.851 = DIVIDER
[global]
');
\TYPO3\CMS\Core\Utility\ExtensionManagementUtility::registerExtDirectComponent(
'TYPO3.Solr.ContextMenuActionController',
\ApacheSolrForTypo3\Solr\ContextMenuActionController::class,
'web',
'admin'
);
// include JS in backend
$GLOBALS['TYPO3_CONF_VARS']['typo3/backend.php']['additionalBackendItems']['Solr.ContextMenuInitializeSolrConnectionsAction'] = \TYPO3\CMS\Core\Utility\ExtensionManagementUtility::extPath('solr') . 'Classes/BackendItem/ContextMenuActionJavascriptRegistration.php';
# ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- # ----- #
// replace the built-in search content element
\TYPO3\CMS\Core\Utility\ExtensionManagementUtility::addPiFlexFormValue(
'*',
'FILE:EXT:' . $_EXTKEY . '/Configuration/FlexForms/Results.xml',
'search'
);
$TCA['tt_content']['types']['search']['showitem'] =
'--palette--;LLL:EXT:cms/locallang_ttc.xml:palette.general;general,
--palette--;LLL:EXT:cms/locallang_ttc.xml:palette.header;header,
--div--;LLL:EXT:cms/locallang_ttc.xml:tabs.plugin,
pi_flexform;;;;1-1-1,
--div--;LLL:EXT:cms/locallang_ttc.xml:tabs.access,
--palette--;LLL:EXT:cms/locallang_ttc.xml:palette.visibility;visibility,
--palette--;LLL:EXT:cms/locallang_ttc.xml:palette.access;access,
--div--;LLL:EXT:cms/locallang_ttc.xml:tabs.appearance,
--palette--;LLL:EXT:cms/locallang_ttc.xml:palette.frames;frames,
--div--;LLL:EXT:cms/locallang_ttc.xml:tabs.behaviour,
--div--;LLL:EXT:cms/locallang_ttc.xml:tabs.extended';
| 1 | 6,177 | We should remove this | TYPO3-Solr-ext-solr | php |
@@ -205,6 +205,8 @@ type Config struct {
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
+
+ IptablesNATOutgoingInterfaceFilter string `config:"string;"`
}
type ProtoPort struct { | 1 | // Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
)
var (
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
)
var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IgnoreLooseRPF bool `config:"bool;false"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude string `config:"iface-list;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"string;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
// State tracking.
// nameToSource tracks where we loaded each config param from.
sourceToRawConfig map[Source]map[string]string
rawValues map[string]string
Err error
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (c *Config) InterfaceExcludes() []string {
return strings.Split(c.InterfaceExclude, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
currentSource := nameToSource[rawName]
param, ok := knownParams[strings.ToLower(rawName)]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for
// a plugin. Since we don't know the canonical
// name, use the raw name.
newRawValues[rawName] = rawValue
nameToSource[rawName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("Non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[name] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := apiconfig.LoadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case.
if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
}
if !config.IpInIpEnabled {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("IPIP disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Index(flags, "non-zero") > -1 {
metadata.NonZero = true
}
if strings.Index(flags, "die-on-fail") > -1 {
metadata.DieOnParseFailure = true
}
if strings.Index(flags, "local") > -1 {
metadata.Local = true
}
if defaultStr != "" {
if strings.Index(flags, "skip-default-validation") > -1 {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: make(map[string]string),
sourceToRawConfig: make(map[Source]map[string]string),
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 16,676 | We should validate this, I think. To add a validator: - change `string` to, say `iface-pattern` - add a new `case "iface-pattern"` to the `switch` in the `loadParams()` function: - in that `case`, you should be able to use a RegexpPattern like the one for `iface-list`. However, you'll need a different regexp. I think this regexp should work: `^[a-zA-Z0-9:._+-]{1,15}$` i.e. 1-15 characters consisting of alphanumerics, colon, dot, underscore, hyphen and plus. | projectcalico-felix | c |
@@ -59,6 +59,10 @@ func TestBalance(t *testing.T) {
require.Nil(err)
// Balance should == 30 now
require.Equal(0, state.Balance.Cmp(big.NewInt(30)))
+
+ // Sub 40 to the balance
+ err = state.SubBalance(big.NewInt(40))
+ require.Equal(err, ErrNotEnoughBalance)
}
func TestClone(t *testing.T) { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package state
import (
"encoding/hex"
"math/big"
"testing"
"github.com/golang/mock/gomock"
"github.com/iotexproject/go-pkgs/hash"
"github.com/stretchr/testify/require"
)
func TestEncodeDecode(t *testing.T) {
require := require.New(t)
s1 := Account{
Nonce: 0x10,
Balance: big.NewInt(20000000),
CodeHash: []byte("testing codehash"),
}
ss, err := s1.Serialize()
require.NoError(err)
require.NotEmpty(ss)
require.Equal(64, len(ss))
s2 := Account{}
require.NoError(s2.Deserialize(ss))
require.Equal(big.NewInt(20000000), s2.Balance)
require.Equal(uint64(0x10), s2.Nonce)
require.Equal(hash.ZeroHash256, s2.Root)
require.Equal([]byte("testing codehash"), s2.CodeHash)
}
func TestProto(t *testing.T) {
require := require.New(t)
raw := "1201301a200000000000000000000000000000000000000000000000000000000000000000"
ss, _ := hex.DecodeString(raw)
s1 := Account{}
require.NoError(Deserialize(&s1, ss))
d, err := Serialize(s1)
require.NoError(err)
require.Equal(raw, hex.EncodeToString(d))
}
func TestBalance(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
state := &Account{Balance: big.NewInt(20)}
// Add 10 to the balance
err := state.AddBalance(big.NewInt(10))
require.Nil(err)
// Balance should == 30 now
require.Equal(0, state.Balance.Cmp(big.NewInt(30)))
}
func TestClone(t *testing.T) {
require := require.New(t)
ss := &Account{
Nonce: 0x10,
Balance: big.NewInt(200),
}
account := ss.Clone()
require.Equal(big.NewInt(200), account.Balance)
require.Nil(account.AddBalance(big.NewInt(100)))
require.Equal(big.NewInt(200), ss.Balance)
require.Equal(big.NewInt(200+100), account.Balance)
}
| 1 | 19,963 | File is not `goimports`-ed with -local github.com/iotexproject/iotex-core (from `goimports`) | iotexproject-iotex-core | go |
@@ -53,6 +53,11 @@ public class FlinkOrcWriter implements OrcRowWriter<RowData> {
writer.writeRow(row, output);
}
+ @Override
+ public List<OrcValueWriter<?>> writers() {
+ return this.writer.writers();
+ }
+
@Override
public Stream<FieldMetrics<?>> metrics() {
return writer.metrics(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.data;
import java.util.Deque;
import java.util.List;
import java.util.stream.Stream;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.iceberg.FieldMetrics;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.orc.GenericOrcWriters;
import org.apache.iceberg.orc.OrcRowWriter;
import org.apache.iceberg.orc.OrcValueWriter;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
public class FlinkOrcWriter implements OrcRowWriter<RowData> {
private final FlinkOrcWriters.RowDataWriter writer;
private FlinkOrcWriter(RowType rowType, Schema iSchema) {
this.writer = (FlinkOrcWriters.RowDataWriter) FlinkSchemaVisitor.visit(rowType, iSchema, new WriteBuilder());
}
public static OrcRowWriter<RowData> buildWriter(RowType rowType, Schema iSchema) {
return new FlinkOrcWriter(rowType, iSchema);
}
@Override
public void write(RowData row, VectorizedRowBatch output) {
Preconditions.checkArgument(row != null, "value must not be null");
writer.writeRow(row, output);
}
@Override
public Stream<FieldMetrics<?>> metrics() {
return writer.metrics();
}
private static class WriteBuilder extends FlinkSchemaVisitor<OrcValueWriter<?>> {
private final Deque<Integer> fieldIds = Lists.newLinkedList();
private WriteBuilder() {
}
@Override
public void beforeField(Types.NestedField field) {
fieldIds.push(field.fieldId());
}
@Override
public void afterField(Types.NestedField field) {
fieldIds.pop();
}
@Override
public OrcValueWriter<RowData> record(Types.StructType iStruct,
List<OrcValueWriter<?>> results,
List<LogicalType> fieldType) {
return FlinkOrcWriters.struct(results, fieldType);
}
@Override
public OrcValueWriter<?> map(Types.MapType iMap, OrcValueWriter<?> key, OrcValueWriter<?> value,
LogicalType keyType, LogicalType valueType) {
return FlinkOrcWriters.map(key, value, keyType, valueType);
}
@Override
public OrcValueWriter<?> list(Types.ListType iList, OrcValueWriter<?> element, LogicalType elementType) {
return FlinkOrcWriters.list(element, elementType);
}
@Override
public OrcValueWriter<?> primitive(Type.PrimitiveType iPrimitive, LogicalType flinkPrimitive) {
switch (iPrimitive.typeId()) {
case BOOLEAN:
return GenericOrcWriters.booleans();
case INTEGER:
switch (flinkPrimitive.getTypeRoot()) {
case TINYINT:
return GenericOrcWriters.bytes();
case SMALLINT:
return GenericOrcWriters.shorts();
}
return GenericOrcWriters.ints();
case LONG:
return GenericOrcWriters.longs();
case FLOAT:
Preconditions.checkArgument(fieldIds.peek() != null,
String.format("[BUG] Cannot find field id for primitive field with type %s. This is likely because id " +
"information is not properly pushed during schema visiting.", iPrimitive));
return GenericOrcWriters.floats(fieldIds.peek());
case DOUBLE:
Preconditions.checkArgument(fieldIds.peek() != null,
String.format("[BUG] Cannot find field id for primitive field with type %s. This is likely because id " +
"information is not properly pushed during schema visiting.", iPrimitive));
return GenericOrcWriters.doubles(fieldIds.peek());
case DATE:
return FlinkOrcWriters.dates();
case TIME:
return FlinkOrcWriters.times();
case TIMESTAMP:
Types.TimestampType timestampType = (Types.TimestampType) iPrimitive;
if (timestampType.shouldAdjustToUTC()) {
return FlinkOrcWriters.timestampTzs();
} else {
return FlinkOrcWriters.timestamps();
}
case STRING:
return FlinkOrcWriters.strings();
case UUID:
case FIXED:
case BINARY:
return GenericOrcWriters.byteArrays();
case DECIMAL:
Types.DecimalType decimalType = (Types.DecimalType) iPrimitive;
return FlinkOrcWriters.decimals(decimalType.precision(), decimalType.scale());
default:
throw new IllegalArgumentException(String.format(
"Invalid iceberg type %s corresponding to Flink logical type %s", iPrimitive, flinkPrimitive));
}
}
}
}
| 1 | 43,025 | Nit: Here we don't need the extra `this` in iceberg because we usually use the `this` to distinguish whether it is a member variable or local variable when assigning value. | apache-iceberg | java |
@@ -71,7 +71,7 @@ public class AuthorizationLoggingTest {
engineRule.getManagementService().toggleTelemetry(true);
// then
- String message = "ENGINE-03029 Required admin authenticated group or user.";
+ String message = "ENGINE-16002 Exception while closing command context: ENGINE-03110 Required admin authenticated group or user or any of the following permissions:";
List<ILoggingEvent> filteredLog = loggingRule.getFilteredLog(CONTEXT_LOGGER, message);
assertThat(filteredLog.size()).isEqualTo(1); | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.test.api.authorization;
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.spi.ILoggingEvent;
import org.camunda.bpm.engine.AuthorizationService;
import org.camunda.bpm.engine.authorization.Authorization;
import org.camunda.bpm.engine.test.api.authorization.util.AuthorizationScenario;
import org.camunda.bpm.engine.test.api.authorization.util.AuthorizationTestRule;
import org.camunda.bpm.engine.test.util.ProvidedProcessEngineRule;
import org.camunda.commons.testing.ProcessEngineLoggingRule;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
public class AuthorizationLoggingTest {
protected static final String CONTEXT_LOGGER = "org.camunda.bpm.engine.context";
public ProvidedProcessEngineRule engineRule = new ProvidedProcessEngineRule();
public AuthorizationTestRule authRule = new AuthorizationTestRule(engineRule);
@Rule
public RuleChain chain = RuleChain.outerRule(engineRule).around(authRule);
@Rule
public ProcessEngineLoggingRule loggingRule = new ProcessEngineLoggingRule()
.watch(CONTEXT_LOGGER)
.level(Level.DEBUG);
@After
public void tearDown() {
engineRule.getProcessEngineConfiguration().setAuthorizationEnabled(false);
AuthorizationService authorizationService = engineRule.getAuthorizationService();
for (Authorization authorization : authorizationService.createAuthorizationQuery().list()) {
authorizationService.deleteAuthorization(authorization.getId());
}
}
@Test
public void shouldLogOnDebugLevel() {
// given
AuthorizationScenario scenario = new AuthorizationScenario().withoutAuthorizations();
authRule.init(scenario)
.withUser("userId")
.start();
// when
engineRule.getManagementService().toggleTelemetry(true);
// then
String message = "ENGINE-03029 Required admin authenticated group or user.";
List<ILoggingEvent> filteredLog = loggingRule.getFilteredLog(CONTEXT_LOGGER, message);
assertThat(filteredLog.size()).isEqualTo(1);
assertThat(filteredLog.get(0).getLevel()).isEqualTo(Level.DEBUG);
}
}
| 1 | 12,622 | Why do we have that extra "ENGINE-16002 Exception while closing command context:" now? Is that done intentionally by us or where does that now come from? | camunda-camunda-bpm-platform | java |
@@ -4,10 +4,15 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
+import java.util.Collections;
import java.util.Map.Entry;
+import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+
+import static com.codahale.metrics.MetricAttribute.*;
/**
* A reporter class for logging metrics values to a SLF4J {@link Logger} periodically, similar to | 1 | package com.codahale.metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* A reporter class for logging metrics values to a SLF4J {@link Logger} periodically, similar to
* {@link ConsoleReporter} or {@link CsvReporter}, but using the SLF4J framework instead. It also
* supports specifying a {@link Marker} instance that can be used by custom appenders and filters
* for the bound logging toolkit to further process metrics reports.
*/
public class Slf4jReporter extends ScheduledReporter {
/**
* Returns a new {@link Builder} for {@link Slf4jReporter}.
*
* @param registry the registry to report
* @return a {@link Builder} instance for a {@link Slf4jReporter}
*/
public static Builder forRegistry(MetricRegistry registry) {
return new Builder(registry);
}
public enum LoggingLevel { TRACE, DEBUG, INFO, WARN, ERROR }
/**
* A builder for {@link Slf4jReporter} instances. Defaults to logging to {@code metrics}, not
* using a marker, converting rates to events/second, converting durations to milliseconds, and
* not filtering metrics.
*/
public static class Builder {
private final MetricRegistry registry;
private Logger logger;
private LoggingLevel loggingLevel;
private Marker marker;
private String prefix;
private TimeUnit rateUnit;
private TimeUnit durationUnit;
private MetricFilter filter;
private ScheduledExecutorService executor;
private boolean shutdownExecutorOnStop;
private Builder(MetricRegistry registry) {
this.registry = registry;
this.logger = LoggerFactory.getLogger("metrics");
this.marker = null;
this.prefix = "";
this.rateUnit = TimeUnit.SECONDS;
this.durationUnit = TimeUnit.MILLISECONDS;
this.filter = MetricFilter.ALL;
this.loggingLevel = LoggingLevel.INFO;
this.executor = null;
this.shutdownExecutorOnStop = true;
}
/**
* Specifies whether or not, the executor (used for reporting) will be stopped with same time with reporter.
* Default value is true.
* Setting this parameter to false, has the sense in combining with providing external managed executor via {@link #scheduleOn(ScheduledExecutorService)}.
*
* @param shutdownExecutorOnStop if true, then executor will be stopped in same time with this reporter
* @return {@code this}
*/
public Builder shutdownExecutorOnStop(boolean shutdownExecutorOnStop) {
this.shutdownExecutorOnStop = shutdownExecutorOnStop;
return this;
}
/**
* Specifies the executor to use while scheduling reporting of metrics.
* Default value is null.
* Null value leads to executor will be auto created on start.
*
* @param executor the executor to use while scheduling reporting of metrics.
* @return {@code this}
*/
public Builder scheduleOn(ScheduledExecutorService executor) {
this.executor = executor;
return this;
}
/**
* Log metrics to the given logger.
*
* @param logger an SLF4J {@link Logger}
* @return {@code this}
*/
public Builder outputTo(Logger logger) {
this.logger = logger;
return this;
}
/**
* Mark all logged metrics with the given marker.
*
* @param marker an SLF4J {@link Marker}
* @return {@code this}
*/
public Builder markWith(Marker marker) {
this.marker = marker;
return this;
}
/**
* Prefix all metric names with the given string.
*
* @param prefix the prefix for all metric names
* @return {@code this}
*/
public Builder prefixedWith(String prefix) {
this.prefix = prefix;
return this;
}
/**
* Convert rates to the given time unit.
*
* @param rateUnit a unit of time
* @return {@code this}
*/
public Builder convertRatesTo(TimeUnit rateUnit) {
this.rateUnit = rateUnit;
return this;
}
/**
* Convert durations to the given time unit.
*
* @param durationUnit a unit of time
* @return {@code this}
*/
public Builder convertDurationsTo(TimeUnit durationUnit) {
this.durationUnit = durationUnit;
return this;
}
/**
* Only report metrics which match the given filter.
*
* @param filter a {@link MetricFilter}
* @return {@code this}
*/
public Builder filter(MetricFilter filter) {
this.filter = filter;
return this;
}
/**
* Use Logging Level when reporting.
*
* @param loggingLevel a (@link Slf4jReporter.LoggingLevel}
* @return {@code this}
*/
public Builder withLoggingLevel(LoggingLevel loggingLevel) {
this.loggingLevel = loggingLevel;
return this;
}
/**
* Builds a {@link Slf4jReporter} with the given properties.
*
* @return a {@link Slf4jReporter}
*/
public Slf4jReporter build() {
LoggerProxy loggerProxy;
switch (loggingLevel) {
case TRACE:
loggerProxy = new TraceLoggerProxy(logger);
break;
case INFO:
loggerProxy = new InfoLoggerProxy(logger);
break;
case WARN:
loggerProxy = new WarnLoggerProxy(logger);
break;
case ERROR:
loggerProxy = new ErrorLoggerProxy(logger);
break;
default:
case DEBUG:
loggerProxy = new DebugLoggerProxy(logger);
break;
}
return new Slf4jReporter(registry, loggerProxy, marker, prefix, rateUnit, durationUnit, filter, executor, shutdownExecutorOnStop);
}
}
private final LoggerProxy loggerProxy;
private final Marker marker;
private final String prefix;
private Slf4jReporter(MetricRegistry registry,
LoggerProxy loggerProxy,
Marker marker,
String prefix,
TimeUnit rateUnit,
TimeUnit durationUnit,
MetricFilter filter,
ScheduledExecutorService executor,
boolean shutdownExecutorOnStop) {
super(registry, "logger-reporter", filter, rateUnit, durationUnit, executor, shutdownExecutorOnStop);
this.loggerProxy = loggerProxy;
this.marker = marker;
this.prefix = prefix;
}
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
if (loggerProxy.isEnabled(marker)) {
for (Entry<String, Gauge> entry : gauges.entrySet()) {
logGauge(entry.getKey(), entry.getValue());
}
for (Entry<String, Counter> entry : counters.entrySet()) {
logCounter(entry.getKey(), entry.getValue());
}
for (Entry<String, Histogram> entry : histograms.entrySet()) {
logHistogram(entry.getKey(), entry.getValue());
}
for (Entry<String, Meter> entry : meters.entrySet()) {
logMeter(entry.getKey(), entry.getValue());
}
for (Entry<String, Timer> entry : timers.entrySet()) {
logTimer(entry.getKey(), entry.getValue());
}
}
}
private void logTimer(String name, Timer timer) {
final Snapshot snapshot = timer.getSnapshot();
loggerProxy.log(marker,
"type={}, name={}, count={}, min={}, max={}, mean={}, stddev={}, median={}, " +
"p75={}, p95={}, p98={}, p99={}, p999={}, mean_rate={}, m1={}, m5={}, " +
"m15={}, rate_unit={}, duration_unit={}",
"TIMER",
prefix(name),
timer.getCount(),
convertDuration(snapshot.getMin()),
convertDuration(snapshot.getMax()),
convertDuration(snapshot.getMean()),
convertDuration(snapshot.getStdDev()),
convertDuration(snapshot.getMedian()),
convertDuration(snapshot.get75thPercentile()),
convertDuration(snapshot.get95thPercentile()),
convertDuration(snapshot.get98thPercentile()),
convertDuration(snapshot.get99thPercentile()),
convertDuration(snapshot.get999thPercentile()),
convertRate(timer.getMeanRate()),
convertRate(timer.getOneMinuteRate()),
convertRate(timer.getFiveMinuteRate()),
convertRate(timer.getFifteenMinuteRate()),
getRateUnit(),
getDurationUnit());
}
private void logMeter(String name, Meter meter) {
loggerProxy.log(marker,
"type={}, name={}, count={}, mean_rate={}, m1={}, m5={}, m15={}, rate_unit={}",
"METER",
prefix(name),
meter.getCount(),
convertRate(meter.getMeanRate()),
convertRate(meter.getOneMinuteRate()),
convertRate(meter.getFiveMinuteRate()),
convertRate(meter.getFifteenMinuteRate()),
getRateUnit());
}
private void logHistogram(String name, Histogram histogram) {
final Snapshot snapshot = histogram.getSnapshot();
loggerProxy.log(marker,
"type={}, name={}, count={}, min={}, max={}, mean={}, stddev={}, " +
"median={}, p75={}, p95={}, p98={}, p99={}, p999={}",
"HISTOGRAM",
prefix(name),
histogram.getCount(),
snapshot.getMin(),
snapshot.getMax(),
snapshot.getMean(),
snapshot.getStdDev(),
snapshot.getMedian(),
snapshot.get75thPercentile(),
snapshot.get95thPercentile(),
snapshot.get98thPercentile(),
snapshot.get99thPercentile(),
snapshot.get999thPercentile());
}
private void logCounter(String name, Counter counter) {
loggerProxy.log(marker, "type={}, name={}, count={}", "COUNTER", prefix(name), counter.getCount());
}
private void logGauge(String name, Gauge<?> gauge) {
loggerProxy.log(marker, "type={}, name={}, value={}", "GAUGE", prefix(name), gauge.getValue());
}
@Override
protected String getRateUnit() {
return "events/" + super.getRateUnit();
}
private String prefix(String... components) {
return MetricRegistry.name(prefix, components);
}
/* private class to allow logger configuration */
static abstract class LoggerProxy {
protected final Logger logger;
public LoggerProxy(Logger logger) {
this.logger = logger;
}
abstract void log(Marker marker, String format, Object... arguments);
abstract boolean isEnabled(Marker marker);
}
/* private class to allow logger configuration */
private static class DebugLoggerProxy extends LoggerProxy {
public DebugLoggerProxy(Logger logger) {
super(logger);
}
@Override
public void log(Marker marker, String format, Object... arguments) {
logger.debug(marker, format, arguments);
}
@Override
public boolean isEnabled(Marker marker) {
return logger.isDebugEnabled(marker);
}
}
/* private class to allow logger configuration */
private static class TraceLoggerProxy extends LoggerProxy {
public TraceLoggerProxy(Logger logger) {
super(logger);
}
@Override
public void log(Marker marker, String format, Object... arguments) {
logger.trace(marker, format, arguments);
}
@Override
public boolean isEnabled(Marker marker) {
return logger.isTraceEnabled(marker);
}
}
/* private class to allow logger configuration */
private static class InfoLoggerProxy extends LoggerProxy {
public InfoLoggerProxy(Logger logger) {
super(logger);
}
@Override
public void log(Marker marker, String format, Object... arguments) {
logger.info(marker, format, arguments);
}
@Override
public boolean isEnabled(Marker marker) {
return logger.isInfoEnabled(marker);
}
}
/* private class to allow logger configuration */
private static class WarnLoggerProxy extends LoggerProxy {
public WarnLoggerProxy(Logger logger) {
super(logger);
}
@Override
public void log(Marker marker, String format, Object... arguments) {
logger.warn(marker, format, arguments);
}
@Override
public boolean isEnabled(Marker marker) {
return logger.isWarnEnabled(marker);
}
}
/* private class to allow logger configuration */
private static class ErrorLoggerProxy extends LoggerProxy {
public ErrorLoggerProxy(Logger logger) {
super(logger);
}
@Override
public void log(Marker marker, String format, Object... arguments) {
logger.error(marker, format, arguments);
}
@Override
public boolean isEnabled(Marker marker) {
return logger.isErrorEnabled(marker);
}
}
}
| 1 | 7,088 | Please don't use star imports, in the codebase we tend to use direct imports. | dropwizard-metrics | java |
@@ -1,8 +1,10 @@
+import {CREDENTIALS} from "../config.func";
+
module.exports = function(server) {
test('who am I?', () => {
return server.whoami().then(function (username) {
- expect(username).toMatch('test');
+ expect(username).toMatch(CREDENTIALS.user);
});
});
| 1 | module.exports = function(server) {
test('who am I?', () => {
return server.whoami().then(function (username) {
expect(username).toMatch('test');
});
});
};
| 1 | 18,070 | Filename can be `config.functional` | verdaccio-verdaccio | js |
@@ -44,11 +44,19 @@ nano::block_processor::block_processor (nano::node & node_a, nano::write_databas
this->condition.notify_all ();
}
};
+ thread = std::thread ([this] () {
+ nano::thread_role::set (nano::thread_role::name::block_processing);
+ this->process_blocks ();
+ });
}
nano::block_processor::~block_processor ()
{
stop ();
+ if (thread.joinable ())
+ {
+ thread.join ();
+ }
}
void nano::block_processor::stop () | 1 | #include <nano/lib/threading.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/blockprocessor.hpp>
#include <nano/node/election.hpp>
#include <nano/node/node.hpp>
#include <nano/node/websocket.hpp>
#include <nano/secure/blockstore.hpp>
#include <boost/format.hpp>
std::chrono::milliseconds constexpr nano::block_processor::confirmation_request_delay;
nano::block_post_events::block_post_events (std::function<nano::read_transaction ()> && get_transaction_a) :
get_transaction (std::move (get_transaction_a))
{
}
nano::block_post_events::~block_post_events ()
{
debug_assert (get_transaction != nullptr);
auto transaction (get_transaction ());
for (auto const & i : events)
{
i (transaction);
}
}
nano::block_processor::block_processor (nano::node & node_a, nano::write_database_queue & write_database_queue_a) :
next_log (std::chrono::steady_clock::now ()),
node (node_a),
write_database_queue (write_database_queue_a),
state_block_signature_verification (node.checker, node.ledger.network_params.ledger.epochs, node.config, node.logger, node.flags.block_processor_verification_size)
{
state_block_signature_verification.blocks_verified_callback = [this] (std::deque<std::pair<nano::unchecked_info, bool>> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures) {
this->process_verified_state_blocks (items, verifications, hashes, blocks_signatures);
};
state_block_signature_verification.transition_inactive_callback = [this] () {
if (this->flushing)
{
{
// Prevent a race with condition.wait in block_processor::flush
nano::lock_guard<nano::mutex> guard (this->mutex);
}
this->condition.notify_all ();
}
};
}
nano::block_processor::~block_processor ()
{
stop ();
}
void nano::block_processor::stop ()
{
{
nano::lock_guard<nano::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
state_block_signature_verification.stop ();
}
void nano::block_processor::flush ()
{
node.checker.flush ();
flushing = true;
nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped && (have_blocks () || active || state_block_signature_verification.is_active ()))
{
condition.wait (lock);
}
flushing = false;
}
size_t nano::block_processor::size ()
{
nano::unique_lock<nano::mutex> lock (mutex);
return (blocks.size () + state_block_signature_verification.size () + forced.size ());
}
bool nano::block_processor::full ()
{
return size () >= node.flags.block_processor_full_size;
}
bool nano::block_processor::half_full ()
{
return size () >= node.flags.block_processor_full_size / 2;
}
void nano::block_processor::add (std::shared_ptr<nano::block> const & block_a, uint64_t origination)
{
nano::unchecked_info info (block_a, 0, origination, nano::signature_verification::unknown);
add (info);
}
void nano::block_processor::add (nano::unchecked_info const & info_a, const bool push_front_preference_a)
{
debug_assert (!nano::work_validate_entry (*info_a.block));
bool quarter_full (size () > node.flags.block_processor_full_size / 4);
if (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || info_a.block->type () == nano::block_type::open || !info_a.account.is_zero ()))
{
state_block_signature_verification.add (info_a, false);
}
else if (push_front_preference_a && !quarter_full)
{
/* Push blocks from unchecked to front of processing deque to keep more operations with unchecked inside of single write transaction.
It's designed to help with realtime blocks traffic if block processor is not performing large task like bootstrap.
If deque is a quarter full then push back to allow other blocks processing. */
{
nano::lock_guard<nano::mutex> guard (mutex);
blocks.emplace_front (info_a, false);
}
condition.notify_all ();
}
else
{
{
nano::lock_guard<nano::mutex> guard (mutex);
blocks.emplace_front (info_a, false);
}
condition.notify_all ();
}
}
void nano::block_processor::add_local (nano::unchecked_info const & info_a, bool const watch_work_a)
{
release_assert (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || !info_a.account.is_zero ()));
debug_assert (!nano::work_validate_entry (*info_a.block));
state_block_signature_verification.add (info_a, watch_work_a);
}
void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a)
{
{
nano::lock_guard<nano::mutex> lock (mutex);
forced.push_back (block_a);
}
condition.notify_all ();
}
void nano::block_processor::update (std::shared_ptr<nano::block> const & block_a)
{
{
nano::lock_guard<nano::mutex> lock (mutex);
updates.push_back (block_a);
}
condition.notify_all ();
}
void nano::block_processor::wait_write ()
{
nano::lock_guard<nano::mutex> lock (mutex);
awaiting_write = true;
}
void nano::block_processor::process_blocks ()
{
nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped)
{
if (have_blocks_ready ())
{
active = true;
lock.unlock ();
process_batch (lock);
lock.lock ();
active = false;
}
else
{
condition.notify_one ();
condition.wait (lock);
}
}
}
bool nano::block_processor::should_log ()
{
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (next_log < now)
{
next_log = now + (node.config.logging.timing_logging () ? std::chrono::seconds (2) : std::chrono::seconds (15));
result = true;
}
return result;
}
bool nano::block_processor::have_blocks_ready ()
{
debug_assert (!mutex.try_lock ());
return !blocks.empty () || !forced.empty () || !updates.empty ();
}
bool nano::block_processor::have_blocks ()
{
debug_assert (!mutex.try_lock ());
return have_blocks_ready () || state_block_signature_verification.size () != 0;
}
void nano::block_processor::process_verified_state_blocks (std::deque<std::pair<nano::unchecked_info, bool>> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures)
{
{
nano::unique_lock<nano::mutex> lk (mutex);
for (auto i (0); i < verifications.size (); ++i)
{
debug_assert (verifications[i] == 1 || verifications[i] == 0);
auto & [item, watch_work] = items.front ();
if (!item.block->link ().is_zero () && node.ledger.is_epoch_link (item.block->link ()))
{
// Epoch blocks
if (verifications[i] == 1)
{
item.verified = nano::signature_verification::valid_epoch;
blocks.emplace_back (std::move (item), watch_work);
}
else
{
// Possible regular state blocks with epoch link (send subtype)
item.verified = nano::signature_verification::unknown;
blocks.emplace_back (std::move (item), watch_work);
}
}
else if (verifications[i] == 1)
{
// Non epoch blocks
item.verified = nano::signature_verification::valid;
blocks.emplace_back (std::move (item), watch_work);
}
else
{
requeue_invalid (hashes[i], item);
}
items.pop_front ();
}
}
condition.notify_all ();
}
void nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock_a)
{
auto scoped_write_guard = write_database_queue.wait (nano::writer::process_batch);
block_post_events post_events ([&store = node.store] { return store.tx_begin_read (); });
auto transaction (node.store.tx_begin_write ({ tables::accounts, tables::blocks, tables::frontiers, tables::pending, tables::unchecked }));
nano::timer<std::chrono::milliseconds> timer_l;
lock_a.lock ();
timer_l.start ();
// Processing blocks
unsigned number_of_blocks_processed (0), number_of_forced_processed (0), number_of_updates_processed (0);
auto deadline_reached = [&timer_l, deadline = node.config.block_processor_batch_max_time] { return timer_l.after_deadline (deadline); };
auto processor_batch_reached = [&number_of_blocks_processed, max = node.flags.block_processor_batch_size] { return number_of_blocks_processed >= max; };
auto store_batch_reached = [&number_of_blocks_processed, max = node.store.max_block_write_batch_num ()] { return number_of_blocks_processed >= max; };
while (have_blocks_ready () && (!deadline_reached () || !processor_batch_reached ()) && !awaiting_write && !store_batch_reached ())
{
if ((blocks.size () + state_block_signature_verification.size () + forced.size () + updates.size () > 64) && should_log ())
{
node.logger.always_log (boost::str (boost::format ("%1% blocks (+ %2% state blocks) (+ %3% forced, %4% updates) in processing queue") % blocks.size () % state_block_signature_verification.size () % forced.size () % updates.size ()));
}
bool watch_work{ false };
if (!updates.empty ())
{
auto block (updates.front ());
updates.pop_front ();
lock_a.unlock ();
auto hash (block->hash ());
if (node.store.block_exists (transaction, hash))
{
node.store.block_put (transaction, hash, *block);
}
++number_of_updates_processed;
}
else
{
nano::unchecked_info info;
nano::block_hash hash (0);
bool force (false);
if (forced.empty ())
{
std::tie (info, watch_work) = blocks.front ();
blocks.pop_front ();
hash = info.block->hash ();
}
else
{
info = nano::unchecked_info (forced.front (), 0, nano::seconds_since_epoch (), nano::signature_verification::unknown);
forced.pop_front ();
hash = info.block->hash ();
force = true;
number_of_forced_processed++;
}
lock_a.unlock ();
if (force)
{
auto successor (node.ledger.successor (transaction, info.block->qualified_root ()));
if (successor != nullptr && successor->hash () != hash)
{
// Replace our block with the winner and roll back any dependent blocks
if (node.config.logging.ledger_rollback_logging ())
{
node.logger.always_log (boost::str (boost::format ("Rolling back %1% and replacing with %2%") % successor->hash ().to_string () % hash.to_string ()));
}
std::vector<std::shared_ptr<nano::block>> rollback_list;
if (node.ledger.rollback (transaction, successor->hash (), rollback_list))
{
node.logger.always_log (nano::severity_level::error, boost::str (boost::format ("Failed to roll back %1% because it or a successor was confirmed") % successor->hash ().to_string ()));
}
else if (node.config.logging.ledger_rollback_logging ())
{
node.logger.always_log (boost::str (boost::format ("%1% blocks rolled back") % rollback_list.size ()));
}
// Deleting from votes cache & wallet work watcher, stop active transaction
for (auto & i : rollback_list)
{
node.history.erase (i->root ());
node.wallets.watcher->remove (*i);
// Stop all rolled back active transactions except initial
if (i->hash () != successor->hash ())
{
node.active.erase (*i);
}
}
}
}
number_of_blocks_processed++;
process_one (transaction, post_events, info, watch_work, force);
}
lock_a.lock ();
}
awaiting_write = false;
lock_a.unlock ();
if (node.config.logging.timing_logging () && number_of_blocks_processed != 0 && timer_l.stop () > std::chrono::milliseconds (100))
{
node.logger.always_log (boost::str (boost::format ("Processed %1% blocks (%2% blocks were forced) in %3% %4%") % number_of_blocks_processed % number_of_forced_processed % timer_l.value ().count () % timer_l.unit ()));
}
}
void nano::block_processor::process_live (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::shared_ptr<nano::block> const & block_a, nano::process_return const & process_return_a, const bool watch_work_a, nano::block_origin const origin_a)
{
// Add to work watcher to prevent dropping the election
if (watch_work_a)
{
node.wallets.watcher->add (block_a);
}
// Start collecting quorum on block
if (watch_work_a || node.ledger.dependents_confirmed (transaction_a, *block_a))
{
node.active.insert (block_a, process_return_a.previous_balance.number ());
}
else
{
node.active.trigger_inactive_votes_cache_election (block_a);
}
// Announce block contents to the network
if (origin_a == nano::block_origin::local)
{
node.network.flood_block_initial (block_a);
}
if (node.websocket_server && node.websocket_server->any_subscriber (nano::websocket::topic::new_unconfirmed_block))
{
node.websocket_server->broadcast (nano::websocket::message_builder ().new_block_arrived (*block_a));
}
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, nano::unchecked_info info_a, const bool watch_work_a, const bool forced_a, nano::block_origin const origin_a)
{
nano::process_return result;
auto block (info_a.block);
auto hash (block->hash ());
result = node.ledger.process (transaction_a, *block, info_a.verified);
switch (result.code)
{
case nano::process_result::progress:
{
release_assert (info_a.account.is_zero () || info_a.account == node.store.block_account_calculated (*block));
if (node.config.logging.ledger_logging ())
{
std::string block_string;
block->serialize_json (block_string, node.config.logging.single_line_record ());
node.logger.try_log (boost::str (boost::format ("Processing block %1%: %2%") % hash.to_string () % block_string));
}
if ((info_a.modified > nano::seconds_since_epoch () - 300 && node.block_arrival.recent (hash)) || forced_a)
{
events_a.events.emplace_back ([this, hash, block = info_a.block, result, watch_work_a, origin_a] (nano::transaction const & post_event_transaction_a) { process_live (post_event_transaction_a, hash, block, result, watch_work_a, origin_a); });
}
queue_unchecked (transaction_a, hash);
/* For send blocks check epoch open unchecked (gap pending).
For state blocks check only send subtype and only if block epoch is not last epoch.
If epoch is last, then pending entry shouldn't trigger same epoch open block for destination account. */
if (block->type () == nano::block_type::send || (block->type () == nano::block_type::state && block->sideband ().details.is_send && std::underlying_type_t<nano::epoch> (block->sideband ().details.epoch) < std::underlying_type_t<nano::epoch> (nano::epoch::max)))
{
/* block->destination () for legacy send blocks
block->link () for state blocks (send subtype) */
queue_unchecked (transaction_a, block->destination ().is_zero () ? block->link () : block->destination ());
}
break;
}
case nano::process_result::gap_previous:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap previous for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (block->previous (), hash);
node.store.unchecked_put (transaction_a, unchecked_key, info_a);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_previous);
break;
}
case nano::process_result::gap_source:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap source for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (node.ledger.block_source (transaction_a, *(block)), hash);
node.store.unchecked_put (transaction_a, unchecked_key, info_a);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
case nano::process_result::gap_epoch_open_pending:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap pending entries for epoch open: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (block->account (), hash); // Specific unchecked key starting with epoch open block account public key
node.store.unchecked_put (transaction_a, unchecked_key, info_a);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
case nano::process_result::old:
{
if (node.config.logging.ledger_duplicate_logging ())
{
node.logger.try_log (boost::str (boost::format ("Old for: %1%") % hash.to_string ()));
}
events_a.events.emplace_back ([this, block = info_a.block, origin_a] (nano::transaction const & post_event_transaction_a) { process_old (post_event_transaction_a, block, origin_a); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::old);
break;
}
case nano::process_result::bad_signature:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Bad signature for: %1%") % hash.to_string ()));
}
events_a.events.emplace_back ([this, hash, info_a] (nano::transaction const & /* unused */) { requeue_invalid (hash, info_a); });
break;
}
case nano::process_result::negative_spend:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Negative spend for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::unreceivable:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Unreceivable for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::fork:
{
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::fork);
events_a.events.emplace_back ([this, block] (nano::transaction const &) { this->node.active.publish (block); });
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Fork for: %1% root: %2%") % hash.to_string () % block->root ().to_string ()));
}
break;
}
case nano::process_result::opened_burn_account:
{
node.logger.always_log (boost::str (boost::format ("*** Rejecting open block for burn account ***: %1%") % hash.to_string ()));
break;
}
case nano::process_result::balance_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Balance mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::representative_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Representative mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::block_position:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Block %1% cannot follow predecessor %2%") % hash.to_string () % block->previous ().to_string ()));
}
break;
}
case nano::process_result::insufficient_work:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Insufficient work for %1% : %2% (difficulty %3%)") % hash.to_string () % nano::to_string_hex (block->block_work ()) % nano::to_string_hex (block->difficulty ())));
}
break;
}
}
return result;
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, std::shared_ptr<nano::block> const & block_a, const bool watch_work_a)
{
nano::unchecked_info info (block_a, block_a->account (), 0, nano::signature_verification::unknown);
auto result (process_one (transaction_a, events_a, info, watch_work_a));
return result;
}
void nano::block_processor::process_old (nano::transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a, nano::block_origin const origin_a)
{
node.active.restart (transaction_a, block_a);
// First try to update election difficulty, then attempt to restart an election
if (!node.active.update_difficulty (block_a, true))
{
// Let others know about the difficulty update
if (origin_a == nano::block_origin::local)
{
node.network.flood_block_initial (block_a);
}
}
}
void nano::block_processor::queue_unchecked (nano::write_transaction const & transaction_a, nano::hash_or_account const & hash_or_account_a)
{
auto unchecked_blocks (node.store.unchecked_get (transaction_a, hash_or_account_a.hash));
for (auto & info : unchecked_blocks)
{
if (!node.flags.disable_block_processor_unchecked_deletion)
{
node.store.unchecked_del (transaction_a, nano::unchecked_key (hash_or_account_a, info.block->hash ()));
}
add (info, true);
}
node.gap_cache.erase (hash_or_account_a.hash);
}
void nano::block_processor::requeue_invalid (nano::block_hash const & hash_a, nano::unchecked_info const & info_a)
{
debug_assert (hash_a == info_a.block->hash ());
node.bootstrap_initiator.lazy_requeue (hash_a, info_a.block->previous (), info_a.confirmed);
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_processor & block_processor, std::string const & name)
{
size_t blocks_count;
size_t forced_count;
{
nano::lock_guard<nano::mutex> guard (block_processor.mutex);
blocks_count = block_processor.blocks.size ();
forced_count = block_processor.forced.size ();
}
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (collect_container_info (block_processor.state_block_signature_verification, "state_block_signature_verification"));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) }));
return composite;
}
| 1 | 16,688 | Should the thread join be done in the stop function maybe? I am simply wondering and sharing my thoughts. This is my thinking: * The join used to be done in the stop function of node class and now it has moved in the destructor. * The class state_block_signature_verification joins its thread at the stop function too. | nanocurrency-nano-node | cpp |
@@ -22,3 +22,8 @@ var WalletCmd = &cobra.Command{
fmt.Println("Print: " + strings.Join(args, " "))
},
}
+
+func init() {
+ WalletCmd.AddCommand(walletCreateCmd)
+ WalletCmd.AddCommand(walletListCmd)
+} | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package wallet
import (
"fmt"
"strings"
"github.com/spf13/cobra"
)
// WalletCmd represents the wallet command
var WalletCmd = &cobra.Command{
Use: "wallet",
Short: "Manage accounts",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
| 1 | 15,755 | don't use `init` function (from `gochecknoinits`) | iotexproject-iotex-core | go |
@@ -3,7 +3,7 @@
# Purpose:
# sns-ruby-example-enable-resource.rb demonstrates how to enable an Amazon Simple Notification Services (SNS) resource using
-# the AWS SDK for JavaScript (v3).
+# the AWS SDK for Ruby.
# Inputs:
# - MY_TOPIC_ARN | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Purpose:
# sns-ruby-example-enable-resource.rb demonstrates how to enable an Amazon Simple Notification Services (SNS) resource using
# the AWS SDK for JavaScript (v3).
# Inputs:
# - MY_TOPIC_ARN
# - MY_RESOURCE_ARN
# - REGION
# - POLICY_NAME
# snippet-start:[sns.Ruby.enableResource]
require 'aws-sdk-sns' # v2: require 'aws-sdk'
policy = '{
"Version":"2008-10-17",
"Id":"__default_policy_ID",
"Statement":[{
"Sid":"__default_statement_ID",
"Effect":"Allow",
"Principal":{
"AWS":"*"
},
"Action":["SNS:Publish"],
"Resource":"' + MY_TOPIC_ARN + '",
"Condition":{
"ArnEquals":{
"AWS:SourceArn":"' + MY_RESOURCE_ARN + '"}
}
}]
}'
sns = Aws::SNS::Resource.new(region: 'REGION')
# Get topic by ARN
topic = sns.topic()
# Add policy to topic
topic.set_attributes({
attribute_name: "POLICY_NAME",
attribute_value: policy
})
# snippet-end:[sns.Ruby.enableResource]
| 1 | 20,566 | Simple Notification **Service** (singular) | awsdocs-aws-doc-sdk-examples | rb |
@@ -15,6 +15,7 @@ DEFINE_int64(int64_test, 10, "Test flag for int64 type");
DEFINE_bool(bool_test, false, "Test flag for bool type");
DEFINE_double(double_test, 3.14159, "Test flag for double type");
DEFINE_string(string_test, "Hello World", "Test flag for string type");
+DEFINE_uint32(crash_test, 1024, "The flag could not be read");
namespace nebula {
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "base/Base.h"
#include <gtest/gtest.h>
#include <folly/json.h>
#include "webservice/WebService.h"
#include "process/ProcessUtils.h"
DEFINE_int32(int32_test, 10, "Test flag for int32 type");
DEFINE_int64(int64_test, 10, "Test flag for int64 type");
DEFINE_bool(bool_test, false, "Test flag for bool type");
DEFINE_double(double_test, 3.14159, "Test flag for double type");
DEFINE_string(string_test, "Hello World", "Test flag for string type");
namespace nebula {
class FlagsAccessTestEnv : public ::testing::Environment {
public:
void SetUp() override {
FLAGS_ws_http_port = 0;
FLAGS_ws_h2_port = 0;
VLOG(1) << "Starting web service...";
auto status = WebService::start();
ASSERT_TRUE(status.ok()) << status;
}
void TearDown() override {
WebService::stop();
VLOG(1) << "Web service stopped";
}
};
bool getUrl(const std::string& urlPath, std::string& respBody) {
auto url = folly::stringPrintf("http://%s:%d%s",
FLAGS_ws_ip.c_str(),
FLAGS_ws_http_port,
urlPath.c_str());
VLOG(1) << "Retrieving url: " << url;
auto command = folly::stringPrintf("/usr/bin/curl -G \"%s\" 2> /dev/null",
url.c_str());
auto result = ProcessUtils::runCommand(command.c_str());
if (!result.ok()) {
LOG(ERROR) << "Failed to run curl: " << result.status();
return false;
}
respBody = result.value();
return true;
}
TEST(FlagsAccessTest, GetSetTest) {
std::string resp;
ASSERT_TRUE(getUrl("/get_flags?flags=int32_test", resp));
EXPECT_EQ(folly::stringPrintf("int32_test=%d\n", FLAGS_int32_test), resp);
ASSERT_TRUE(getUrl("/get_flags?flags=int64_test,bool_test,string_test", resp));
EXPECT_EQ(folly::stringPrintf("int64_test=%ld\nbool_test=%s\nstring_test=\"%s\"\n",
FLAGS_int64_test,
(FLAGS_bool_test ? "1" : "0"),
FLAGS_string_test.c_str()),
resp);
ASSERT_TRUE(getUrl("/set_flag?flag=int64_test&value=20", resp));
ASSERT_EQ("true", resp);
ASSERT_TRUE(getUrl("/get_flags?flags=int64_test", resp));
EXPECT_EQ(std::string("int64_test=20\n"), resp);
}
TEST(FlagsAccessTest, JsonTest) {
std::string resp;
ASSERT_TRUE(getUrl("/get_flags?flags=double_test&returnjson", resp));
auto json = folly::parseJson(resp);
ASSERT_TRUE(json.isArray());
ASSERT_EQ(1UL, json.size());
ASSERT_TRUE(json[0].isObject());
ASSERT_EQ(2UL, json[0].size());
auto it = json[0].find("name");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isString());
EXPECT_EQ("double_test", it->second.getString());
it = json[0].find("value");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isDouble());
EXPECT_DOUBLE_EQ(FLAGS_double_test, it->second.getDouble());
}
TEST(FlagsAccessTest, VerboseTest) {
std::string resp;
ASSERT_TRUE(getUrl("/get_flags?flags=int32_test&returnjson&verbose", resp));
auto json = folly::parseJson(resp);
ASSERT_TRUE(json.isArray());
ASSERT_EQ(1UL, json.size());
ASSERT_TRUE(json[0].isObject());
ASSERT_EQ(7UL, json[0].size());
auto it = json[0].find("name");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isString());
EXPECT_EQ("int32_test", it->second.getString());
it = json[0].find("value");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isInt());
EXPECT_EQ(FLAGS_int32_test, it->second.getInt());
it = json[0].find("type");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isString());
EXPECT_EQ("int32", it->second.getString());
it = json[0].find("file");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isString());
EXPECT_EQ(__FILE__, it->second.getString());
it = json[0].find("is_default");
ASSERT_NE(json[0].items().end(), it);
ASSERT_TRUE(it->second.isBool());
EXPECT_TRUE(it->second.getBool());
}
TEST(FlagsAccessTest, ErrorTest) {
std::string resp;
ASSERT_TRUE(getUrl("/get_flags123?flags=int32_test", resp));
EXPECT_TRUE(resp.empty());
}
} // namespace nebula
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
::testing::AddGlobalTestEnvironment(new nebula::FlagsAccessTestEnv());
return RUN_ALL_TESTS();
}
| 1 | 16,592 | You are fixing the crash problem, and you have fixed it, so it won't crash anymore. So `crash_test` is not a proper name. | vesoft-inc-nebula | cpp |
@@ -68,8 +68,12 @@ public class SampleNamer extends NameFormatterDelegator {
return localVarName(Name.lowerCamel("request"));
}
- /** Returns the variable name of the request body. */
public String getRequestBodyVarName() {
+ return getRequestBodyVarName("");
+ }
+
+ /** Returns the variable name of the request body. */
+ public String getRequestBodyVarName(String requestBodyTypeName) {
return localVarName(Name.lowerCamel("requestBody"));
}
| 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.transformer;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NameFormatter;
import com.google.api.codegen.util.NameFormatterDelegator;
import com.google.common.base.Strings;
/** Provides language-specific names for variables and classes. */
public class SampleNamer extends NameFormatterDelegator {
public SampleNamer(NameFormatter nameFormatter) {
super(nameFormatter);
}
/** Returns the application name of the sample. */
public String getSampleApplicationName(String apiCanonicalName) {
return "Google-" + apiCanonicalName.replace(" ", "") + "Sample/0.1";
}
/** Returns the class name of the sample. */
public String getSampleClassName(String apiTypeName) {
return publicClassName(Name.upperCamel(apiTypeName, "Example"));
}
/** Returns the variable name of the service. */
public String getServiceVarName(String apiTypeName) {
return localVarName(Name.upperCamel(apiTypeName, "Service"));
}
/** Returns the variable name for a field. */
public String getFieldVarName(String fieldName) {
return localVarName(Name.lowerCamel(fieldName));
}
/** Returns the resource getter method name for a resource field. */
public String getResourceGetterName(String fieldName) {
return publicMethodName(Name.lowerCamel("get", fieldName));
}
/**
* Returns the variable name for a resource field.
*
* <p>If resourceTypeName is an empty string, "item" is returned.
*/
public String getResourceVarName(String resourceTypeName) {
if (Strings.isNullOrEmpty(resourceTypeName)) {
return localVarName(Name.lowerCamel("item"));
}
return localVarName(Name.upperCamel(resourceTypeName));
}
/** Returns the variable name of the request. */
public String getRequestVarName() {
return localVarName(Name.lowerCamel("request"));
}
/** Returns the variable name of the request body. */
public String getRequestBodyVarName() {
return localVarName(Name.lowerCamel("requestBody"));
}
/** Returns the variable name of the response. */
public String getResponseVarName() {
return localVarName(Name.lowerCamel("response"));
}
/** Returns the name of the createService function. */
public String createServiceFuncName(String apiTypeName) {
return publicMethodName(Name.upperCamel("Create", apiTypeName, "Service"));
}
}
| 1 | 19,741 | If this default doesn't depend on the argument, shouldn't it be delegated as the default for the no-arg version instead? | googleapis-gapic-generator | java |
@@ -84,7 +84,7 @@ def read_api_key_safe():
def _get_config_file(path):
- get_or_create_file(path)
+ get_or_create_file(path, permissions=0o600)
return path
| 1 | import json
import os
import time
from six.moves import queue
from localstack import config
from localstack.constants import API_ENDPOINT
from localstack.utils.common import JsonObject, get_or_create_file
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import save_file, short_uid, timestamp
from localstack.utils.run import FuncThread
PROCESS_ID = short_uid()
MACHINE_ID = None
# event type constants
EVENT_START_INFRA = "inf.up"
EVENT_STOP_INFRA = "inf.dn"
EVENT_KINESIS_CREATE_STREAM = "kns.cs"
EVENT_KINESIS_DELETE_STREAM = "kns.ds"
EVENT_LAMBDA_CREATE_FUNC = "lmb.cf"
EVENT_LAMBDA_DELETE_FUNC = "lmb.df"
EVENT_LAMBDA_INVOKE_FUNC = "lmb.if"
EVENT_SQS_CREATE_QUEUE = "sqs.cq"
EVENT_SQS_DELETE_QUEUE = "sqs.dq"
EVENT_SNS_CREATE_TOPIC = "sns.ct"
EVENT_SNS_DELETE_TOPIC = "sns.dt"
EVENT_S3_CREATE_BUCKET = "s3.cb"
EVENT_S3_DELETE_BUCKET = "s3.db"
EVENT_STEPFUNCTIONS_CREATE_SM = "stf.cm"
EVENT_STEPFUNCTIONS_DELETE_SM = "stf.dm"
EVENT_APIGW_CREATE_API = "agw.ca"
EVENT_APIGW_DELETE_API = "agw.da"
EVENT_DYNAMODB_CREATE_TABLE = "ddb.ct"
EVENT_DYNAMODB_DELETE_TABLE = "ddb.dt"
EVENT_DYNAMODB_CREATE_STREAM = "ddb.cs"
EVENT_CLOUDFORMATION_CREATE_STACK = "clf.cs"
EVENT_ES_CREATE_DOMAIN = "es.cd"
EVENT_ES_DELETE_DOMAIN = "es.dd"
EVENT_FIREHOSE_CREATE_STREAM = "fho.cs"
EVENT_FIREHOSE_DELETE_STREAM = "fho.ds"
# sender thread and queue
SENDER_THREAD = None
EVENT_QUEUE = queue.Queue()
class AnalyticsEvent(JsonObject):
def __init__(self, **kwargs):
self.t = kwargs.get("timestamp") or kwargs.get("t") or timestamp()
self.m_id = kwargs.get("machine_id") or kwargs.get("m_id") or get_machine_id()
self.p_id = kwargs.get("process_id") or kwargs.get("p_id") or get_process_id()
self.p = kwargs.get("payload") if kwargs.get("payload") is not None else kwargs.get("p")
self.k = kwargs.get("api_key") or kwargs.get("k") or read_api_key_safe()
self.e_t = kwargs.get("event_type") or kwargs.get("e_t")
def timestamp(self):
return self.t
def machine_id(self):
return self.m_id
def process_id(self):
return self.p_id
def event_type(self):
return self.e_t
def payload(self):
return self.p
def api_key(self):
return self.k
def read_api_key_safe():
try:
from localstack_ext.bootstrap.licensing import read_api_key
return read_api_key()
except Exception:
return None
def _get_config_file(path):
get_or_create_file(path)
return path
def get_config_file_homedir():
return _get_config_file(config.CONFIG_FILE_PATH)
def get_config_file_tempdir():
return _get_config_file(os.path.join(config.TMP_FOLDER, ".localstack"))
def get_machine_id():
global MACHINE_ID
if MACHINE_ID:
return MACHINE_ID
# determine MACHINE_ID from config files
configs_map = {}
# TODO check if this distinction is needed - config.CONFIG_FILE_PATH already handles tmp vs home folder
config_file_tmp = get_config_file_tempdir()
config_file_home = get_config_file_homedir()
for config_file in (config_file_home, config_file_tmp):
if config_file:
local_configs = configs_map[config_file] = config.load_config_file(
config_file=config_file
)
if "machine_id" in local_configs:
MACHINE_ID = local_configs["machine_id"]
break
# if we can neither find NOR create the config files, fall back to process id
if not configs_map:
return PROCESS_ID
# assign default id if empty
if not MACHINE_ID:
MACHINE_ID = short_uid()
# update MACHINE_ID in all config files
for config_file, configs in configs_map.items():
configs["machine_id"] = MACHINE_ID
save_file(config_file, json.dumps(configs))
return MACHINE_ID
def get_process_id():
return PROCESS_ID
def poll_and_send_messages(params):
while True:
try:
event = EVENT_QUEUE.get(block=True, timeout=None)
event = event.to_dict()
endpoint = "%s/events" % API_ENDPOINT.rstrip("/")
requests.post(endpoint, json=event)
except Exception:
# silently fail, make collection of usage data as non-intrusive as possible
time.sleep(1)
def is_travis():
return os.environ.get("TRAVIS", "").lower() in ["true", "1"]
def get_hash(name):
if not name:
return "0"
max_hash = 10000000000
hashed = hash(name) % max_hash
hashed = hex(hashed).replace("0x", "")
return hashed
def fire_event(event_type, payload=None):
if config.DISABLE_EVENTS:
return
global SENDER_THREAD
if not SENDER_THREAD:
SENDER_THREAD = FuncThread(poll_and_send_messages, {})
SENDER_THREAD.start()
api_key = read_api_key_safe()
if not api_key:
# only store events if API key has been specified
return
from localstack.utils.analytics import log
from localstack.utils.testutil import ( # leave here to avoid circular dependency
is_local_test_mode,
)
if payload is None:
payload = {}
if isinstance(payload, dict):
if is_travis():
payload["travis"] = True
if is_local_test_mode():
payload["int"] = True
# event = AnalyticsEvent(event_type=event_type, payload=payload, api_key=api_key)
# EVENT_QUEUE.put_nowait(event) FIXME: remove old logging code entirely before next release
log.event("legacy", {"event": event_type, "payload": payload})
| 1 | 13,593 | I did not use the new save_config_file method in here, because I am not sure whether this whole logic is still necessary? There is an TODO about it there as well. | localstack-localstack | py |
@@ -22,7 +22,7 @@ define(['dom', 'appRouter', 'connectionManager'], function (dom, appRouter, conn
return void appRouter.showItem(items[0]);
}
- var url = 'itemdetails.html?id=' + itemId + '&serverId=' + serverId;
+ var url = 'details?id=' + itemId + '&serverId=' + serverId;
Dashboard.navigate(url);
});
e.stopPropagation(); | 1 | define(['dom', 'appRouter', 'connectionManager'], function (dom, appRouter, connectionManager) {
'use strict';
function onGroupedCardClick(e, card) {
var itemId = card.getAttribute('data-id');
var serverId = card.getAttribute('data-serverid');
var apiClient = connectionManager.getApiClient(serverId);
var userId = apiClient.getCurrentUserId();
var playedIndicator = card.querySelector('.playedIndicator');
var playedIndicatorHtml = playedIndicator ? playedIndicator.innerHTML : null;
var options = {
Limit: parseInt(playedIndicatorHtml || '10'),
Fields: 'PrimaryImageAspectRatio,DateCreated',
ParentId: itemId,
GroupItems: false
};
var actionableParent = dom.parentWithTag(e.target, ['A', 'BUTTON', 'INPUT']);
if (!actionableParent || actionableParent.classList.contains('cardContent')) {
apiClient.getJSON(apiClient.getUrl('Users/' + userId + '/Items/Latest', options)).then(function (items) {
if (1 === items.length) {
return void appRouter.showItem(items[0]);
}
var url = 'itemdetails.html?id=' + itemId + '&serverId=' + serverId;
Dashboard.navigate(url);
});
e.stopPropagation();
e.preventDefault();
return false;
}
}
function onItemsContainerClick(e) {
var groupedCard = dom.parentWithClass(e.target, 'groupedCard');
if (groupedCard) {
onGroupedCardClick(e, groupedCard);
}
}
return {
onItemsContainerClick: onItemsContainerClick
};
});
| 1 | 15,407 | Why was this changed? | jellyfin-jellyfin-web | js |
@@ -98,6 +98,8 @@ class IoUBalancedNegSampler(RandomSampler):
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
+ # for sampling interval calculation
+ self.floor_thr == 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set) | 1 | import numpy as np
import torch
from .random_sampler import RandomSampler
class IoUBalancedNegSampler(RandomSampler):
"""IoU Balanced Sampling
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
The others are sampled from proposals whose IoU are higher than
`floor_thr`. These proposals are sampled from some bins evenly, which are
split by `num_bins` via IoU evenly.
Args:
num (int): number of proposals.
pos_fraction (float): fraction of positive proposals.
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
set to -1 if all using IoU balanced sampling.
floor_fraction (float): sampling fraction of proposals under floor_thr.
num_bins (int): number of bins in IoU balanced sampling.
"""
def __init__(self,
num,
pos_fraction,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
**kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
**kwargs)
assert floor_thr >= 0 or floor_thr == -1
assert 0 <= floor_fraction <= 1
assert num_bins >= 1
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
max_iou = max_overlaps.max()
iou_interval = (max_iou - self.floor_thr) / self.num_bins
per_num_expected = int(num_expected / self.num_bins)
sampled_inds = []
for i in range(self.num_bins):
start_iou = self.floor_thr + i * iou_interval
end_iou = self.floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou,
max_overlaps < end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero(assign_result.gt_inds == 0)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
# balance sampling for negative samples
neg_set = set(neg_inds.cpu().numpy())
if self.floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0,
max_overlaps < self.floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= self.floor_thr)[0])
elif self.floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - self.floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if self.num_bins >= 2:
iou_sampled_inds = self.sample_via_interval(
max_overlaps, set(iou_sampling_neg_inds),
num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(
assign_result.gt_inds.device)
return sampled_inds
| 1 | 18,323 | I think you meant, self.floor_thr = 0 | open-mmlab-mmdetection | py |
@@ -14,13 +14,12 @@ import (
"github.com/99designs/gqlgen/graphql"
"github.com/99designs/gqlgen/graphql/introspection"
+ "github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
+ "github.com/chaos-mesh/chaos-mesh/pkg/ctrlserver/graph/model"
gqlparser "github.com/vektah/gqlparser/v2"
"github.com/vektah/gqlparser/v2/ast"
- v1 "k8s.io/api/core/v1"
+ "k8s.io/api/core/v1"
v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
- "github.com/chaos-mesh/chaos-mesh/pkg/ctrlserver/graph/model"
)
// region ************************** generated!.gotpl ************************** | 1 | // Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
package generated
import (
"bytes"
"context"
"errors"
"io"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/99designs/gqlgen/graphql"
"github.com/99designs/gqlgen/graphql/introspection"
gqlparser "github.com/vektah/gqlparser/v2"
"github.com/vektah/gqlparser/v2/ast"
v1 "k8s.io/api/core/v1"
v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/pkg/ctrlserver/graph/model"
)
// region ************************** generated!.gotpl **************************
// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
return &executableSchema{
resolvers: cfg.Resolvers,
directives: cfg.Directives,
complexity: cfg.Complexity,
}
}
type Config struct {
Resolvers ResolverRoot
Directives DirectiveRoot
Complexity ComplexityRoot
}
type ResolverRoot interface {
AttrOverrideSpec() AttrOverrideSpecResolver
BandwidthSpec() BandwidthSpecResolver
ChaosCondition() ChaosConditionResolver
ContainerStateRunning() ContainerStateRunningResolver
ContainerStateTerminated() ContainerStateTerminatedResolver
CorruptSpec() CorruptSpecResolver
ExperimentStatus() ExperimentStatusResolver
HTTPChaos() HTTPChaosResolver
HTTPChaosSpec() HTTPChaosSpecResolver
HTTPChaosStatus() HTTPChaosStatusResolver
IOChaos() IOChaosResolver
IOChaosAction() IOChaosActionResolver
IOChaosSpec() IOChaosSpecResolver
IOChaosStatus() IOChaosStatusResolver
IoFault() IoFaultResolver
Logger() LoggerResolver
MistakeSpec() MistakeSpecResolver
Namespace() NamespaceResolver
NetworkChaos() NetworkChaosResolver
OwnerReference() OwnerReferenceResolver
Pod() PodResolver
PodCondition() PodConditionResolver
PodHTTPChaos() PodHTTPChaosResolver
PodHttpChaosReplaceActions() PodHttpChaosReplaceActionsResolver
PodHttpChaosRule() PodHttpChaosRuleResolver
PodHttpChaosSelector() PodHttpChaosSelectorResolver
PodIOChaos() PodIOChaosResolver
PodNetworkChaos() PodNetworkChaosResolver
PodSelectorSpec() PodSelectorSpecResolver
PodStatus() PodStatusResolver
Process() ProcessResolver
Query() QueryResolver
RawIptables() RawIptablesResolver
RawTrafficControl() RawTrafficControlResolver
Record() RecordResolver
StressChaos() StressChaosResolver
}
type DirectiveRoot struct {
}
type ComplexityRoot struct {
AttrOverrideSpec struct {
Atime func(childComplexity int) int
Blocks func(childComplexity int) int
Ctime func(childComplexity int) int
Gid func(childComplexity int) int
Ino func(childComplexity int) int
Kind func(childComplexity int) int
Mtime func(childComplexity int) int
Nlink func(childComplexity int) int
Perm func(childComplexity int) int
Rdev func(childComplexity int) int
Size func(childComplexity int) int
UID func(childComplexity int) int
}
BandwidthSpec struct {
Buffer func(childComplexity int) int
Limit func(childComplexity int) int
Minburst func(childComplexity int) int
Peakrate func(childComplexity int) int
Rate func(childComplexity int) int
}
ChaosCondition struct {
Reason func(childComplexity int) int
Status func(childComplexity int) int
Type func(childComplexity int) int
}
ContainerState struct {
Running func(childComplexity int) int
Terminated func(childComplexity int) int
Waiting func(childComplexity int) int
}
ContainerStateRunning struct {
StartedAt func(childComplexity int) int
}
ContainerStateTerminated struct {
ContainerID func(childComplexity int) int
ExitCode func(childComplexity int) int
FinishedAt func(childComplexity int) int
Message func(childComplexity int) int
Reason func(childComplexity int) int
Signal func(childComplexity int) int
StartedAt func(childComplexity int) int
}
ContainerStateWaiting struct {
Message func(childComplexity int) int
Reason func(childComplexity int) int
}
ContainerStatus struct {
ContainerID func(childComplexity int) int
Image func(childComplexity int) int
ImageID func(childComplexity int) int
LastTerminationState func(childComplexity int) int
Name func(childComplexity int) int
Ready func(childComplexity int) int
RestartCount func(childComplexity int) int
Started func(childComplexity int) int
State func(childComplexity int) int
}
CorruptSpec struct {
Correlation func(childComplexity int) int
Corrup func(childComplexity int) int
}
DelaySpec struct {
Correlation func(childComplexity int) int
Jitter func(childComplexity int) int
Latency func(childComplexity int) int
Reorder func(childComplexity int) int
}
DuplicateSpec struct {
Correlation func(childComplexity int) int
Duplicate func(childComplexity int) int
}
ExperimentStatus struct {
DesiredPhase func(childComplexity int) int
Records func(childComplexity int) int
}
Fd struct {
Fd func(childComplexity int) int
Target func(childComplexity int) int
}
HTTPChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Podhttp func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
UID func(childComplexity int) int
}
HTTPChaosSpec struct {
Abort func(childComplexity int) int
Code func(childComplexity int) int
Delay func(childComplexity int) int
Duration func(childComplexity int) int
Method func(childComplexity int) int
Mode func(childComplexity int) int
Patch func(childComplexity int) int
Path func(childComplexity int) int
Port func(childComplexity int) int
Replace func(childComplexity int) int
RequestHeaders func(childComplexity int) int
ResponseHeaders func(childComplexity int) int
Selector func(childComplexity int) int
Target func(childComplexity int) int
Value func(childComplexity int) int
}
HTTPChaosStatus struct {
Conditions func(childComplexity int) int
Experiment func(childComplexity int) int
Instances func(childComplexity int) int
}
IOChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Podios func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
UID func(childComplexity int) int
}
IOChaosAction struct {
Atime func(childComplexity int) int
Blocks func(childComplexity int) int
Ctime func(childComplexity int) int
Faults func(childComplexity int) int
Filling func(childComplexity int) int
Gid func(childComplexity int) int
Ino func(childComplexity int) int
Kind func(childComplexity int) int
Latency func(childComplexity int) int
MaxLength func(childComplexity int) int
MaxOccurrences func(childComplexity int) int
Methods func(childComplexity int) int
Mtime func(childComplexity int) int
Nlink func(childComplexity int) int
Path func(childComplexity int) int
Percent func(childComplexity int) int
Perm func(childComplexity int) int
Rdev func(childComplexity int) int
Size func(childComplexity int) int
Source func(childComplexity int) int
Type func(childComplexity int) int
UID func(childComplexity int) int
}
IOChaosSpec struct {
Action func(childComplexity int) int
Attr func(childComplexity int) int
ContainerNames func(childComplexity int) int
Delay func(childComplexity int) int
Duration func(childComplexity int) int
Errno func(childComplexity int) int
Methods func(childComplexity int) int
Mistake func(childComplexity int) int
Mode func(childComplexity int) int
Path func(childComplexity int) int
Percent func(childComplexity int) int
Selector func(childComplexity int) int
Value func(childComplexity int) int
VolumePath func(childComplexity int) int
}
IOChaosStatus struct {
Conditions func(childComplexity int) int
Experiment func(childComplexity int) int
Instances func(childComplexity int) int
}
IoFault struct {
Errno func(childComplexity int) int
Weight func(childComplexity int) int
}
Logger struct {
Component func(childComplexity int, ns string, component model.Component) int
Pod func(childComplexity int, ns string, name string) int
}
LossSpec struct {
Correlation func(childComplexity int) int
Loss func(childComplexity int) int
}
MistakeSpec struct {
Filling func(childComplexity int) int
MaxLength func(childComplexity int) int
MaxOccurrences func(childComplexity int) int
}
Namespace struct {
Component func(childComplexity int, component model.Component) int
Httpchaos func(childComplexity int, name *string) int
Iochaos func(childComplexity int, name *string) int
Networkchaos func(childComplexity int, name *string) int
Ns func(childComplexity int) int
Pod func(childComplexity int, name *string) int
Podhttpchaos func(childComplexity int, name *string) int
Podiochaos func(childComplexity int, name *string) int
Podnetworkchaos func(childComplexity int, name *string) int
Stresschaos func(childComplexity int, name *string) int
}
NetworkChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Podnetwork func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
UID func(childComplexity int) int
}
OwnerReference struct {
APIVersion func(childComplexity int) int
BlockOwnerDeletion func(childComplexity int) int
Controller func(childComplexity int) int
Kind func(childComplexity int) int
Name func(childComplexity int) int
UID func(childComplexity int) int
}
Pod struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
Daemon func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Ipset func(childComplexity int) int
Iptables func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Logs func(childComplexity int) int
Mounts func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Processes func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
TcQdisc func(childComplexity int) int
UID func(childComplexity int) int
}
PodCondition struct {
LastProbeTime func(childComplexity int) int
LastTransitionTime func(childComplexity int) int
Message func(childComplexity int) int
Reason func(childComplexity int) int
Status func(childComplexity int) int
Type func(childComplexity int) int
}
PodHTTPChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Pod func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
UID func(childComplexity int) int
}
PodHTTPChaosActions struct {
Abort func(childComplexity int) int
Delay func(childComplexity int) int
Patch func(childComplexity int) int
Replace func(childComplexity int) int
}
PodHTTPChaosPatchActions struct {
Body func(childComplexity int) int
Headers func(childComplexity int) int
Queries func(childComplexity int) int
}
PodHTTPChaosPatchBodyAction struct {
Type func(childComplexity int) int
Value func(childComplexity int) int
}
PodHTTPChaosReplaceActions struct {
Body func(childComplexity int) int
Code func(childComplexity int) int
Headers func(childComplexity int) int
Method func(childComplexity int) int
Path func(childComplexity int) int
Queries func(childComplexity int) int
}
PodHTTPChaosRule struct {
Actions func(childComplexity int) int
Port func(childComplexity int) int
Selector func(childComplexity int) int
Source func(childComplexity int) int
Target func(childComplexity int) int
}
PodHTTPChaosSelector struct {
Code func(childComplexity int) int
Method func(childComplexity int) int
Path func(childComplexity int) int
Port func(childComplexity int) int
RequestHeaders func(childComplexity int) int
ResponseHeaders func(childComplexity int) int
}
PodHTTPChaosSpec struct {
Rules func(childComplexity int) int
}
PodHTTPChaosStatus struct {
FailedMessage func(childComplexity int) int
ObservedGeneration func(childComplexity int) int
Pid func(childComplexity int) int
StartTime func(childComplexity int) int
}
PodIOChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Ios func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Pod func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
UID func(childComplexity int) int
}
PodIOChaosSpec struct {
Actions func(childComplexity int) int
Container func(childComplexity int) int
VolumeMountPath func(childComplexity int) int
}
PodIOChaosStatus struct {
FailedMessage func(childComplexity int) int
ObservedGeneration func(childComplexity int) int
Pid func(childComplexity int) int
StartTime func(childComplexity int) int
}
PodIP struct {
IP func(childComplexity int) int
}
PodNetworkChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
Pod func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
Spec func(childComplexity int) int
Status func(childComplexity int) int
UID func(childComplexity int) int
}
PodNetworkChaosSpec struct {
IPSets func(childComplexity int) int
Iptables func(childComplexity int) int
TrafficControls func(childComplexity int) int
}
PodNetworkChaosStatus struct {
FailedMessage func(childComplexity int) int
ObservedGeneration func(childComplexity int) int
}
PodSelectorSpec struct {
AnnotationSelectors func(childComplexity int) int
FieldSelectors func(childComplexity int) int
LabelSelectors func(childComplexity int) int
Namespaces func(childComplexity int) int
NodeSelectors func(childComplexity int) int
Nodes func(childComplexity int) int
PodPhaseSelectors func(childComplexity int) int
Pods func(childComplexity int) int
}
PodSpec struct {
NodeName func(childComplexity int) int
}
PodStatus struct {
Conditions func(childComplexity int) int
ContainerStatuses func(childComplexity int) int
EphemeralContainerStatuses func(childComplexity int) int
HostIP func(childComplexity int) int
InitContainerStatuses func(childComplexity int) int
Message func(childComplexity int) int
NominatedNodeName func(childComplexity int) int
Phase func(childComplexity int) int
PodIP func(childComplexity int) int
PodIPs func(childComplexity int) int
QosClass func(childComplexity int) int
Reason func(childComplexity int) int
StartTime func(childComplexity int) int
}
Process struct {
Command func(childComplexity int) int
Fds func(childComplexity int) int
Pid func(childComplexity int) int
Pod func(childComplexity int) int
}
Query struct {
Namespace func(childComplexity int, ns *string) int
}
RawIPSet struct {
Cidrs func(childComplexity int) int
Name func(childComplexity int) int
Source func(childComplexity int) int
}
RawIptables struct {
Direction func(childComplexity int) int
IPSets func(childComplexity int) int
Name func(childComplexity int) int
Source func(childComplexity int) int
}
RawTrafficControl struct {
Bandwidth func(childComplexity int) int
Corrupt func(childComplexity int) int
Delay func(childComplexity int) int
Duplicate func(childComplexity int) int
IPSet func(childComplexity int) int
Loss func(childComplexity int) int
Source func(childComplexity int) int
Type func(childComplexity int) int
}
Record struct {
Id func(childComplexity int) int
Phase func(childComplexity int) int
SelectorKey func(childComplexity int) int
}
ReorderSpec struct {
Correlation func(childComplexity int) int
Gap func(childComplexity int) int
Reorder func(childComplexity int) int
}
StressChaos struct {
APIVersion func(childComplexity int) int
Annotations func(childComplexity int) int
ClusterName func(childComplexity int) int
CreationTimestamp func(childComplexity int) int
DeletionGracePeriodSeconds func(childComplexity int) int
DeletionTimestamp func(childComplexity int) int
Finalizers func(childComplexity int) int
GenerateName func(childComplexity int) int
Generation func(childComplexity int) int
Kind func(childComplexity int) int
Labels func(childComplexity int) int
Name func(childComplexity int) int
Namespace func(childComplexity int) int
OwnerReferences func(childComplexity int) int
ResourceVersion func(childComplexity int) int
SelfLink func(childComplexity int) int
UID func(childComplexity int) int
}
Timespec struct {
Nsec func(childComplexity int) int
Sec func(childComplexity int) int
}
}
type AttrOverrideSpecResolver interface {
Ino(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Size(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Blocks(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Kind(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*string, error)
Perm(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Nlink(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
UID(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Gid(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
Rdev(ctx context.Context, obj *v1alpha1.AttrOverrideSpec) (*int, error)
}
type BandwidthSpecResolver interface {
Limit(ctx context.Context, obj *v1alpha1.BandwidthSpec) (int, error)
Buffer(ctx context.Context, obj *v1alpha1.BandwidthSpec) (int, error)
Peakrate(ctx context.Context, obj *v1alpha1.BandwidthSpec) (*int, error)
Minburst(ctx context.Context, obj *v1alpha1.BandwidthSpec) (*int, error)
}
type ChaosConditionResolver interface {
Type(ctx context.Context, obj *v1alpha1.ChaosCondition) (string, error)
Status(ctx context.Context, obj *v1alpha1.ChaosCondition) (string, error)
}
type ContainerStateRunningResolver interface {
StartedAt(ctx context.Context, obj *v1.ContainerStateRunning) (*time.Time, error)
}
type ContainerStateTerminatedResolver interface {
StartedAt(ctx context.Context, obj *v1.ContainerStateTerminated) (*time.Time, error)
FinishedAt(ctx context.Context, obj *v1.ContainerStateTerminated) (*time.Time, error)
}
type CorruptSpecResolver interface {
Corrup(ctx context.Context, obj *v1alpha1.CorruptSpec) (string, error)
}
type ExperimentStatusResolver interface {
DesiredPhase(ctx context.Context, obj *v1alpha1.ExperimentStatus) (string, error)
}
type HTTPChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.HTTPChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.HTTPChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.HTTPChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.HTTPChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.HTTPChaos) (map[string]interface{}, error)
Podhttp(ctx context.Context, obj *v1alpha1.HTTPChaos) ([]*v1alpha1.PodHttpChaos, error)
}
type HTTPChaosSpecResolver interface {
Mode(ctx context.Context, obj *v1alpha1.HTTPChaosSpec) (string, error)
Target(ctx context.Context, obj *v1alpha1.HTTPChaosSpec) (string, error)
RequestHeaders(ctx context.Context, obj *v1alpha1.HTTPChaosSpec) (map[string]interface{}, error)
ResponseHeaders(ctx context.Context, obj *v1alpha1.HTTPChaosSpec) (map[string]interface{}, error)
}
type HTTPChaosStatusResolver interface {
Instances(ctx context.Context, obj *v1alpha1.HTTPChaosStatus) (map[string]interface{}, error)
}
type IOChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.IOChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.IOChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.IOChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.IOChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.IOChaos) (map[string]interface{}, error)
Podios(ctx context.Context, obj *v1alpha1.IOChaos) ([]*v1alpha1.PodIOChaos, error)
}
type IOChaosActionResolver interface {
Type(ctx context.Context, obj *v1alpha1.IOChaosAction) (string, error)
Methods(ctx context.Context, obj *v1alpha1.IOChaosAction) ([]string, error)
Ino(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Size(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Blocks(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Kind(ctx context.Context, obj *v1alpha1.IOChaosAction) (*string, error)
Perm(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Nlink(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
UID(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Gid(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Rdev(ctx context.Context, obj *v1alpha1.IOChaosAction) (*int, error)
Filling(ctx context.Context, obj *v1alpha1.IOChaosAction) (*string, error)
}
type IOChaosSpecResolver interface {
Mode(ctx context.Context, obj *v1alpha1.IOChaosSpec) (string, error)
Action(ctx context.Context, obj *v1alpha1.IOChaosSpec) (string, error)
Errno(ctx context.Context, obj *v1alpha1.IOChaosSpec) (*int, error)
Methods(ctx context.Context, obj *v1alpha1.IOChaosSpec) ([]string, error)
}
type IOChaosStatusResolver interface {
Instances(ctx context.Context, obj *v1alpha1.IOChaosStatus) (map[string]interface{}, error)
}
type IoFaultResolver interface {
Errno(ctx context.Context, obj *v1alpha1.IoFault) (int, error)
}
type LoggerResolver interface {
Component(ctx context.Context, ns string, component model.Component) (<-chan string, error)
Pod(ctx context.Context, ns string, name string) (<-chan string, error)
}
type MistakeSpecResolver interface {
Filling(ctx context.Context, obj *v1alpha1.MistakeSpec) (*string, error)
}
type NamespaceResolver interface {
Component(ctx context.Context, obj *model.Namespace, component model.Component) ([]*v1.Pod, error)
Pod(ctx context.Context, obj *model.Namespace, name *string) ([]*v1.Pod, error)
Stresschaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.StressChaos, error)
Iochaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.IOChaos, error)
Podiochaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.PodIOChaos, error)
Httpchaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.HTTPChaos, error)
Podhttpchaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.PodHttpChaos, error)
Networkchaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.NetworkChaos, error)
Podnetworkchaos(ctx context.Context, obj *model.Namespace, name *string) ([]*v1alpha1.PodNetworkChaos, error)
}
type NetworkChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.NetworkChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.NetworkChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.NetworkChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.NetworkChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.NetworkChaos) (map[string]interface{}, error)
Podnetwork(ctx context.Context, obj *v1alpha1.NetworkChaos) ([]*v1alpha1.PodNetworkChaos, error)
}
type OwnerReferenceResolver interface {
UID(ctx context.Context, obj *v11.OwnerReference) (string, error)
}
type PodResolver interface {
UID(ctx context.Context, obj *v1.Pod) (string, error)
CreationTimestamp(ctx context.Context, obj *v1.Pod) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1.Pod) (*time.Time, error)
Labels(ctx context.Context, obj *v1.Pod) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1.Pod) (map[string]interface{}, error)
Logs(ctx context.Context, obj *v1.Pod) (string, error)
Daemon(ctx context.Context, obj *v1.Pod) (*v1.Pod, error)
Processes(ctx context.Context, obj *v1.Pod) ([]*model.Process, error)
Mounts(ctx context.Context, obj *v1.Pod) ([]string, error)
Ipset(ctx context.Context, obj *v1.Pod) (string, error)
TcQdisc(ctx context.Context, obj *v1.Pod) (string, error)
Iptables(ctx context.Context, obj *v1.Pod) (string, error)
}
type PodConditionResolver interface {
Type(ctx context.Context, obj *v1.PodCondition) (string, error)
Status(ctx context.Context, obj *v1.PodCondition) (string, error)
LastProbeTime(ctx context.Context, obj *v1.PodCondition) (*time.Time, error)
LastTransitionTime(ctx context.Context, obj *v1.PodCondition) (*time.Time, error)
}
type PodHTTPChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.PodHttpChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.PodHttpChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.PodHttpChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.PodHttpChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.PodHttpChaos) (map[string]interface{}, error)
Pod(ctx context.Context, obj *v1alpha1.PodHttpChaos) (*v1.Pod, error)
}
type PodHttpChaosReplaceActionsResolver interface {
Body(ctx context.Context, obj *v1alpha1.PodHttpChaosReplaceActions) (*string, error)
Queries(ctx context.Context, obj *v1alpha1.PodHttpChaosReplaceActions) (map[string]interface{}, error)
Headers(ctx context.Context, obj *v1alpha1.PodHttpChaosReplaceActions) (map[string]interface{}, error)
}
type PodHttpChaosRuleResolver interface {
Target(ctx context.Context, obj *v1alpha1.PodHttpChaosRule) (string, error)
}
type PodHttpChaosSelectorResolver interface {
RequestHeaders(ctx context.Context, obj *v1alpha1.PodHttpChaosSelector) (map[string]interface{}, error)
ResponseHeaders(ctx context.Context, obj *v1alpha1.PodHttpChaosSelector) (map[string]interface{}, error)
}
type PodIOChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.PodIOChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.PodIOChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.PodIOChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.PodIOChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.PodIOChaos) (map[string]interface{}, error)
Pod(ctx context.Context, obj *v1alpha1.PodIOChaos) (*v1.Pod, error)
Ios(ctx context.Context, obj *v1alpha1.PodIOChaos) ([]*v1alpha1.IOChaos, error)
}
type PodNetworkChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (map[string]interface{}, error)
Pod(ctx context.Context, obj *v1alpha1.PodNetworkChaos) (*v1.Pod, error)
}
type PodSelectorSpecResolver interface {
Pods(ctx context.Context, obj *v1alpha1.PodSelectorSpec) (map[string]interface{}, error)
NodeSelectors(ctx context.Context, obj *v1alpha1.PodSelectorSpec) (map[string]interface{}, error)
FieldSelectors(ctx context.Context, obj *v1alpha1.PodSelectorSpec) (map[string]interface{}, error)
LabelSelectors(ctx context.Context, obj *v1alpha1.PodSelectorSpec) (map[string]interface{}, error)
AnnotationSelectors(ctx context.Context, obj *v1alpha1.PodSelectorSpec) (map[string]interface{}, error)
}
type PodStatusResolver interface {
Phase(ctx context.Context, obj *v1.PodStatus) (string, error)
StartTime(ctx context.Context, obj *v1.PodStatus) (*time.Time, error)
QosClass(ctx context.Context, obj *v1.PodStatus) (string, error)
}
type ProcessResolver interface {
Fds(ctx context.Context, obj *model.Process) ([]*model.Fd, error)
}
type QueryResolver interface {
Namespace(ctx context.Context, ns *string) ([]*model.Namespace, error)
}
type RawIptablesResolver interface {
Direction(ctx context.Context, obj *v1alpha1.RawIptables) (string, error)
}
type RawTrafficControlResolver interface {
Type(ctx context.Context, obj *v1alpha1.RawTrafficControl) (string, error)
}
type RecordResolver interface {
Phase(ctx context.Context, obj *v1alpha1.Record) (string, error)
}
type StressChaosResolver interface {
UID(ctx context.Context, obj *v1alpha1.StressChaos) (string, error)
CreationTimestamp(ctx context.Context, obj *v1alpha1.StressChaos) (*time.Time, error)
DeletionTimestamp(ctx context.Context, obj *v1alpha1.StressChaos) (*time.Time, error)
Labels(ctx context.Context, obj *v1alpha1.StressChaos) (map[string]interface{}, error)
Annotations(ctx context.Context, obj *v1alpha1.StressChaos) (map[string]interface{}, error)
}
type executableSchema struct {
resolvers ResolverRoot
directives DirectiveRoot
complexity ComplexityRoot
}
func (e *executableSchema) Schema() *ast.Schema {
return parsedSchema
}
func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
ec := executionContext{nil, e}
_ = ec
switch typeName + "." + field {
case "AttrOverrideSpec.atime":
if e.complexity.AttrOverrideSpec.Atime == nil {
break
}
return e.complexity.AttrOverrideSpec.Atime(childComplexity), true
case "AttrOverrideSpec.blocks":
if e.complexity.AttrOverrideSpec.Blocks == nil {
break
}
return e.complexity.AttrOverrideSpec.Blocks(childComplexity), true
case "AttrOverrideSpec.ctime":
if e.complexity.AttrOverrideSpec.Ctime == nil {
break
}
return e.complexity.AttrOverrideSpec.Ctime(childComplexity), true
case "AttrOverrideSpec.gid":
if e.complexity.AttrOverrideSpec.Gid == nil {
break
}
return e.complexity.AttrOverrideSpec.Gid(childComplexity), true
case "AttrOverrideSpec.ino":
if e.complexity.AttrOverrideSpec.Ino == nil {
break
}
return e.complexity.AttrOverrideSpec.Ino(childComplexity), true
case "AttrOverrideSpec.kind":
if e.complexity.AttrOverrideSpec.Kind == nil {
break
}
return e.complexity.AttrOverrideSpec.Kind(childComplexity), true
case "AttrOverrideSpec.mtime":
if e.complexity.AttrOverrideSpec.Mtime == nil {
break
}
return e.complexity.AttrOverrideSpec.Mtime(childComplexity), true
case "AttrOverrideSpec.nlink":
if e.complexity.AttrOverrideSpec.Nlink == nil {
break
}
return e.complexity.AttrOverrideSpec.Nlink(childComplexity), true
case "AttrOverrideSpec.perm":
if e.complexity.AttrOverrideSpec.Perm == nil {
break
}
return e.complexity.AttrOverrideSpec.Perm(childComplexity), true
case "AttrOverrideSpec.rdev":
if e.complexity.AttrOverrideSpec.Rdev == nil {
break
}
return e.complexity.AttrOverrideSpec.Rdev(childComplexity), true
case "AttrOverrideSpec.size":
if e.complexity.AttrOverrideSpec.Size == nil {
break
}
return e.complexity.AttrOverrideSpec.Size(childComplexity), true
case "AttrOverrideSpec.uid":
if e.complexity.AttrOverrideSpec.UID == nil {
break
}
return e.complexity.AttrOverrideSpec.UID(childComplexity), true
case "BandwidthSpec.buffer":
if e.complexity.BandwidthSpec.Buffer == nil {
break
}
return e.complexity.BandwidthSpec.Buffer(childComplexity), true
case "BandwidthSpec.limit":
if e.complexity.BandwidthSpec.Limit == nil {
break
}
return e.complexity.BandwidthSpec.Limit(childComplexity), true
case "BandwidthSpec.minburst":
if e.complexity.BandwidthSpec.Minburst == nil {
break
}
return e.complexity.BandwidthSpec.Minburst(childComplexity), true
case "BandwidthSpec.peakrate":
if e.complexity.BandwidthSpec.Peakrate == nil {
break
}
return e.complexity.BandwidthSpec.Peakrate(childComplexity), true
case "BandwidthSpec.rate":
if e.complexity.BandwidthSpec.Rate == nil {
break
}
return e.complexity.BandwidthSpec.Rate(childComplexity), true
case "ChaosCondition.reason":
if e.complexity.ChaosCondition.Reason == nil {
break
}
return e.complexity.ChaosCondition.Reason(childComplexity), true
case "ChaosCondition.status":
if e.complexity.ChaosCondition.Status == nil {
break
}
return e.complexity.ChaosCondition.Status(childComplexity), true
case "ChaosCondition.type":
if e.complexity.ChaosCondition.Type == nil {
break
}
return e.complexity.ChaosCondition.Type(childComplexity), true
case "ContainerState.running":
if e.complexity.ContainerState.Running == nil {
break
}
return e.complexity.ContainerState.Running(childComplexity), true
case "ContainerState.terminated":
if e.complexity.ContainerState.Terminated == nil {
break
}
return e.complexity.ContainerState.Terminated(childComplexity), true
case "ContainerState.waiting":
if e.complexity.ContainerState.Waiting == nil {
break
}
return e.complexity.ContainerState.Waiting(childComplexity), true
case "ContainerStateRunning.startedAt":
if e.complexity.ContainerStateRunning.StartedAt == nil {
break
}
return e.complexity.ContainerStateRunning.StartedAt(childComplexity), true
case "ContainerStateTerminated.containerID":
if e.complexity.ContainerStateTerminated.ContainerID == nil {
break
}
return e.complexity.ContainerStateTerminated.ContainerID(childComplexity), true
case "ContainerStateTerminated.exitCode":
if e.complexity.ContainerStateTerminated.ExitCode == nil {
break
}
return e.complexity.ContainerStateTerminated.ExitCode(childComplexity), true
case "ContainerStateTerminated.finishedAt":
if e.complexity.ContainerStateTerminated.FinishedAt == nil {
break
}
return e.complexity.ContainerStateTerminated.FinishedAt(childComplexity), true
case "ContainerStateTerminated.message":
if e.complexity.ContainerStateTerminated.Message == nil {
break
}
return e.complexity.ContainerStateTerminated.Message(childComplexity), true
case "ContainerStateTerminated.reason":
if e.complexity.ContainerStateTerminated.Reason == nil {
break
}
return e.complexity.ContainerStateTerminated.Reason(childComplexity), true
case "ContainerStateTerminated.signal":
if e.complexity.ContainerStateTerminated.Signal == nil {
break
}
return e.complexity.ContainerStateTerminated.Signal(childComplexity), true
case "ContainerStateTerminated.startedAt":
if e.complexity.ContainerStateTerminated.StartedAt == nil {
break
}
return e.complexity.ContainerStateTerminated.StartedAt(childComplexity), true
case "ContainerStateWaiting.message":
if e.complexity.ContainerStateWaiting.Message == nil {
break
}
return e.complexity.ContainerStateWaiting.Message(childComplexity), true
case "ContainerStateWaiting.reason":
if e.complexity.ContainerStateWaiting.Reason == nil {
break
}
return e.complexity.ContainerStateWaiting.Reason(childComplexity), true
case "ContainerStatus.containerID":
if e.complexity.ContainerStatus.ContainerID == nil {
break
}
return e.complexity.ContainerStatus.ContainerID(childComplexity), true
case "ContainerStatus.image":
if e.complexity.ContainerStatus.Image == nil {
break
}
return e.complexity.ContainerStatus.Image(childComplexity), true
case "ContainerStatus.imageID":
if e.complexity.ContainerStatus.ImageID == nil {
break
}
return e.complexity.ContainerStatus.ImageID(childComplexity), true
case "ContainerStatus.lastTerminationState":
if e.complexity.ContainerStatus.LastTerminationState == nil {
break
}
return e.complexity.ContainerStatus.LastTerminationState(childComplexity), true
case "ContainerStatus.name":
if e.complexity.ContainerStatus.Name == nil {
break
}
return e.complexity.ContainerStatus.Name(childComplexity), true
case "ContainerStatus.ready":
if e.complexity.ContainerStatus.Ready == nil {
break
}
return e.complexity.ContainerStatus.Ready(childComplexity), true
case "ContainerStatus.restartCount":
if e.complexity.ContainerStatus.RestartCount == nil {
break
}
return e.complexity.ContainerStatus.RestartCount(childComplexity), true
case "ContainerStatus.started":
if e.complexity.ContainerStatus.Started == nil {
break
}
return e.complexity.ContainerStatus.Started(childComplexity), true
case "ContainerStatus.State":
if e.complexity.ContainerStatus.State == nil {
break
}
return e.complexity.ContainerStatus.State(childComplexity), true
case "CorruptSpec.correlation":
if e.complexity.CorruptSpec.Correlation == nil {
break
}
return e.complexity.CorruptSpec.Correlation(childComplexity), true
case "CorruptSpec.corrup":
if e.complexity.CorruptSpec.Corrup == nil {
break
}
return e.complexity.CorruptSpec.Corrup(childComplexity), true
case "DelaySpec.correlation":
if e.complexity.DelaySpec.Correlation == nil {
break
}
return e.complexity.DelaySpec.Correlation(childComplexity), true
case "DelaySpec.jitter":
if e.complexity.DelaySpec.Jitter == nil {
break
}
return e.complexity.DelaySpec.Jitter(childComplexity), true
case "DelaySpec.latency":
if e.complexity.DelaySpec.Latency == nil {
break
}
return e.complexity.DelaySpec.Latency(childComplexity), true
case "DelaySpec.reorder":
if e.complexity.DelaySpec.Reorder == nil {
break
}
return e.complexity.DelaySpec.Reorder(childComplexity), true
case "DuplicateSpec.correlation":
if e.complexity.DuplicateSpec.Correlation == nil {
break
}
return e.complexity.DuplicateSpec.Correlation(childComplexity), true
case "DuplicateSpec.duplicate":
if e.complexity.DuplicateSpec.Duplicate == nil {
break
}
return e.complexity.DuplicateSpec.Duplicate(childComplexity), true
case "ExperimentStatus.desiredPhase":
if e.complexity.ExperimentStatus.DesiredPhase == nil {
break
}
return e.complexity.ExperimentStatus.DesiredPhase(childComplexity), true
case "ExperimentStatus.Records":
if e.complexity.ExperimentStatus.Records == nil {
break
}
return e.complexity.ExperimentStatus.Records(childComplexity), true
case "Fd.fd":
if e.complexity.Fd.Fd == nil {
break
}
return e.complexity.Fd.Fd(childComplexity), true
case "Fd.target":
if e.complexity.Fd.Target == nil {
break
}
return e.complexity.Fd.Target(childComplexity), true
case "HTTPChaos.apiVersion":
if e.complexity.HTTPChaos.APIVersion == nil {
break
}
return e.complexity.HTTPChaos.APIVersion(childComplexity), true
case "HTTPChaos.annotations":
if e.complexity.HTTPChaos.Annotations == nil {
break
}
return e.complexity.HTTPChaos.Annotations(childComplexity), true
case "HTTPChaos.clusterName":
if e.complexity.HTTPChaos.ClusterName == nil {
break
}
return e.complexity.HTTPChaos.ClusterName(childComplexity), true
case "HTTPChaos.creationTimestamp":
if e.complexity.HTTPChaos.CreationTimestamp == nil {
break
}
return e.complexity.HTTPChaos.CreationTimestamp(childComplexity), true
case "HTTPChaos.deletionGracePeriodSeconds":
if e.complexity.HTTPChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.HTTPChaos.DeletionGracePeriodSeconds(childComplexity), true
case "HTTPChaos.deletionTimestamp":
if e.complexity.HTTPChaos.DeletionTimestamp == nil {
break
}
return e.complexity.HTTPChaos.DeletionTimestamp(childComplexity), true
case "HTTPChaos.finalizers":
if e.complexity.HTTPChaos.Finalizers == nil {
break
}
return e.complexity.HTTPChaos.Finalizers(childComplexity), true
case "HTTPChaos.generateName":
if e.complexity.HTTPChaos.GenerateName == nil {
break
}
return e.complexity.HTTPChaos.GenerateName(childComplexity), true
case "HTTPChaos.generation":
if e.complexity.HTTPChaos.Generation == nil {
break
}
return e.complexity.HTTPChaos.Generation(childComplexity), true
case "HTTPChaos.kind":
if e.complexity.HTTPChaos.Kind == nil {
break
}
return e.complexity.HTTPChaos.Kind(childComplexity), true
case "HTTPChaos.labels":
if e.complexity.HTTPChaos.Labels == nil {
break
}
return e.complexity.HTTPChaos.Labels(childComplexity), true
case "HTTPChaos.name":
if e.complexity.HTTPChaos.Name == nil {
break
}
return e.complexity.HTTPChaos.Name(childComplexity), true
case "HTTPChaos.namespace":
if e.complexity.HTTPChaos.Namespace == nil {
break
}
return e.complexity.HTTPChaos.Namespace(childComplexity), true
case "HTTPChaos.ownerReferences":
if e.complexity.HTTPChaos.OwnerReferences == nil {
break
}
return e.complexity.HTTPChaos.OwnerReferences(childComplexity), true
case "HTTPChaos.podhttp":
if e.complexity.HTTPChaos.Podhttp == nil {
break
}
return e.complexity.HTTPChaos.Podhttp(childComplexity), true
case "HTTPChaos.resourceVersion":
if e.complexity.HTTPChaos.ResourceVersion == nil {
break
}
return e.complexity.HTTPChaos.ResourceVersion(childComplexity), true
case "HTTPChaos.selfLink":
if e.complexity.HTTPChaos.SelfLink == nil {
break
}
return e.complexity.HTTPChaos.SelfLink(childComplexity), true
case "HTTPChaos.spec":
if e.complexity.HTTPChaos.Spec == nil {
break
}
return e.complexity.HTTPChaos.Spec(childComplexity), true
case "HTTPChaos.status":
if e.complexity.HTTPChaos.Status == nil {
break
}
return e.complexity.HTTPChaos.Status(childComplexity), true
case "HTTPChaos.uid":
if e.complexity.HTTPChaos.UID == nil {
break
}
return e.complexity.HTTPChaos.UID(childComplexity), true
case "HTTPChaosSpec.abort":
if e.complexity.HTTPChaosSpec.Abort == nil {
break
}
return e.complexity.HTTPChaosSpec.Abort(childComplexity), true
case "HTTPChaosSpec.code":
if e.complexity.HTTPChaosSpec.Code == nil {
break
}
return e.complexity.HTTPChaosSpec.Code(childComplexity), true
case "HTTPChaosSpec.delay":
if e.complexity.HTTPChaosSpec.Delay == nil {
break
}
return e.complexity.HTTPChaosSpec.Delay(childComplexity), true
case "HTTPChaosSpec.duration":
if e.complexity.HTTPChaosSpec.Duration == nil {
break
}
return e.complexity.HTTPChaosSpec.Duration(childComplexity), true
case "HTTPChaosSpec.method":
if e.complexity.HTTPChaosSpec.Method == nil {
break
}
return e.complexity.HTTPChaosSpec.Method(childComplexity), true
case "HTTPChaosSpec.mode":
if e.complexity.HTTPChaosSpec.Mode == nil {
break
}
return e.complexity.HTTPChaosSpec.Mode(childComplexity), true
case "HTTPChaosSpec.patch":
if e.complexity.HTTPChaosSpec.Patch == nil {
break
}
return e.complexity.HTTPChaosSpec.Patch(childComplexity), true
case "HTTPChaosSpec.path":
if e.complexity.HTTPChaosSpec.Path == nil {
break
}
return e.complexity.HTTPChaosSpec.Path(childComplexity), true
case "HTTPChaosSpec.port":
if e.complexity.HTTPChaosSpec.Port == nil {
break
}
return e.complexity.HTTPChaosSpec.Port(childComplexity), true
case "HTTPChaosSpec.replace":
if e.complexity.HTTPChaosSpec.Replace == nil {
break
}
return e.complexity.HTTPChaosSpec.Replace(childComplexity), true
case "HTTPChaosSpec.requestHeaders":
if e.complexity.HTTPChaosSpec.RequestHeaders == nil {
break
}
return e.complexity.HTTPChaosSpec.RequestHeaders(childComplexity), true
case "HTTPChaosSpec.responseHeaders":
if e.complexity.HTTPChaosSpec.ResponseHeaders == nil {
break
}
return e.complexity.HTTPChaosSpec.ResponseHeaders(childComplexity), true
case "HTTPChaosSpec.selector":
if e.complexity.HTTPChaosSpec.Selector == nil {
break
}
return e.complexity.HTTPChaosSpec.Selector(childComplexity), true
case "HTTPChaosSpec.target":
if e.complexity.HTTPChaosSpec.Target == nil {
break
}
return e.complexity.HTTPChaosSpec.Target(childComplexity), true
case "HTTPChaosSpec.value":
if e.complexity.HTTPChaosSpec.Value == nil {
break
}
return e.complexity.HTTPChaosSpec.Value(childComplexity), true
case "HTTPChaosStatus.conditions":
if e.complexity.HTTPChaosStatus.Conditions == nil {
break
}
return e.complexity.HTTPChaosStatus.Conditions(childComplexity), true
case "HTTPChaosStatus.experiment":
if e.complexity.HTTPChaosStatus.Experiment == nil {
break
}
return e.complexity.HTTPChaosStatus.Experiment(childComplexity), true
case "HTTPChaosStatus.instances":
if e.complexity.HTTPChaosStatus.Instances == nil {
break
}
return e.complexity.HTTPChaosStatus.Instances(childComplexity), true
case "IOChaos.apiVersion":
if e.complexity.IOChaos.APIVersion == nil {
break
}
return e.complexity.IOChaos.APIVersion(childComplexity), true
case "IOChaos.annotations":
if e.complexity.IOChaos.Annotations == nil {
break
}
return e.complexity.IOChaos.Annotations(childComplexity), true
case "IOChaos.clusterName":
if e.complexity.IOChaos.ClusterName == nil {
break
}
return e.complexity.IOChaos.ClusterName(childComplexity), true
case "IOChaos.creationTimestamp":
if e.complexity.IOChaos.CreationTimestamp == nil {
break
}
return e.complexity.IOChaos.CreationTimestamp(childComplexity), true
case "IOChaos.deletionGracePeriodSeconds":
if e.complexity.IOChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.IOChaos.DeletionGracePeriodSeconds(childComplexity), true
case "IOChaos.deletionTimestamp":
if e.complexity.IOChaos.DeletionTimestamp == nil {
break
}
return e.complexity.IOChaos.DeletionTimestamp(childComplexity), true
case "IOChaos.finalizers":
if e.complexity.IOChaos.Finalizers == nil {
break
}
return e.complexity.IOChaos.Finalizers(childComplexity), true
case "IOChaos.generateName":
if e.complexity.IOChaos.GenerateName == nil {
break
}
return e.complexity.IOChaos.GenerateName(childComplexity), true
case "IOChaos.generation":
if e.complexity.IOChaos.Generation == nil {
break
}
return e.complexity.IOChaos.Generation(childComplexity), true
case "IOChaos.kind":
if e.complexity.IOChaos.Kind == nil {
break
}
return e.complexity.IOChaos.Kind(childComplexity), true
case "IOChaos.labels":
if e.complexity.IOChaos.Labels == nil {
break
}
return e.complexity.IOChaos.Labels(childComplexity), true
case "IOChaos.name":
if e.complexity.IOChaos.Name == nil {
break
}
return e.complexity.IOChaos.Name(childComplexity), true
case "IOChaos.namespace":
if e.complexity.IOChaos.Namespace == nil {
break
}
return e.complexity.IOChaos.Namespace(childComplexity), true
case "IOChaos.ownerReferences":
if e.complexity.IOChaos.OwnerReferences == nil {
break
}
return e.complexity.IOChaos.OwnerReferences(childComplexity), true
case "IOChaos.podios":
if e.complexity.IOChaos.Podios == nil {
break
}
return e.complexity.IOChaos.Podios(childComplexity), true
case "IOChaos.resourceVersion":
if e.complexity.IOChaos.ResourceVersion == nil {
break
}
return e.complexity.IOChaos.ResourceVersion(childComplexity), true
case "IOChaos.selfLink":
if e.complexity.IOChaos.SelfLink == nil {
break
}
return e.complexity.IOChaos.SelfLink(childComplexity), true
case "IOChaos.spec":
if e.complexity.IOChaos.Spec == nil {
break
}
return e.complexity.IOChaos.Spec(childComplexity), true
case "IOChaos.status":
if e.complexity.IOChaos.Status == nil {
break
}
return e.complexity.IOChaos.Status(childComplexity), true
case "IOChaos.uid":
if e.complexity.IOChaos.UID == nil {
break
}
return e.complexity.IOChaos.UID(childComplexity), true
case "IOChaosAction.atime":
if e.complexity.IOChaosAction.Atime == nil {
break
}
return e.complexity.IOChaosAction.Atime(childComplexity), true
case "IOChaosAction.blocks":
if e.complexity.IOChaosAction.Blocks == nil {
break
}
return e.complexity.IOChaosAction.Blocks(childComplexity), true
case "IOChaosAction.ctime":
if e.complexity.IOChaosAction.Ctime == nil {
break
}
return e.complexity.IOChaosAction.Ctime(childComplexity), true
case "IOChaosAction.faults":
if e.complexity.IOChaosAction.Faults == nil {
break
}
return e.complexity.IOChaosAction.Faults(childComplexity), true
case "IOChaosAction.filling":
if e.complexity.IOChaosAction.Filling == nil {
break
}
return e.complexity.IOChaosAction.Filling(childComplexity), true
case "IOChaosAction.gid":
if e.complexity.IOChaosAction.Gid == nil {
break
}
return e.complexity.IOChaosAction.Gid(childComplexity), true
case "IOChaosAction.ino":
if e.complexity.IOChaosAction.Ino == nil {
break
}
return e.complexity.IOChaosAction.Ino(childComplexity), true
case "IOChaosAction.kind":
if e.complexity.IOChaosAction.Kind == nil {
break
}
return e.complexity.IOChaosAction.Kind(childComplexity), true
case "IOChaosAction.latency":
if e.complexity.IOChaosAction.Latency == nil {
break
}
return e.complexity.IOChaosAction.Latency(childComplexity), true
case "IOChaosAction.maxLength":
if e.complexity.IOChaosAction.MaxLength == nil {
break
}
return e.complexity.IOChaosAction.MaxLength(childComplexity), true
case "IOChaosAction.maxOccurrences":
if e.complexity.IOChaosAction.MaxOccurrences == nil {
break
}
return e.complexity.IOChaosAction.MaxOccurrences(childComplexity), true
case "IOChaosAction.methods":
if e.complexity.IOChaosAction.Methods == nil {
break
}
return e.complexity.IOChaosAction.Methods(childComplexity), true
case "IOChaosAction.mtime":
if e.complexity.IOChaosAction.Mtime == nil {
break
}
return e.complexity.IOChaosAction.Mtime(childComplexity), true
case "IOChaosAction.nlink":
if e.complexity.IOChaosAction.Nlink == nil {
break
}
return e.complexity.IOChaosAction.Nlink(childComplexity), true
case "IOChaosAction.path":
if e.complexity.IOChaosAction.Path == nil {
break
}
return e.complexity.IOChaosAction.Path(childComplexity), true
case "IOChaosAction.percent":
if e.complexity.IOChaosAction.Percent == nil {
break
}
return e.complexity.IOChaosAction.Percent(childComplexity), true
case "IOChaosAction.perm":
if e.complexity.IOChaosAction.Perm == nil {
break
}
return e.complexity.IOChaosAction.Perm(childComplexity), true
case "IOChaosAction.rdev":
if e.complexity.IOChaosAction.Rdev == nil {
break
}
return e.complexity.IOChaosAction.Rdev(childComplexity), true
case "IOChaosAction.size":
if e.complexity.IOChaosAction.Size == nil {
break
}
return e.complexity.IOChaosAction.Size(childComplexity), true
case "IOChaosAction.source":
if e.complexity.IOChaosAction.Source == nil {
break
}
return e.complexity.IOChaosAction.Source(childComplexity), true
case "IOChaosAction.type":
if e.complexity.IOChaosAction.Type == nil {
break
}
return e.complexity.IOChaosAction.Type(childComplexity), true
case "IOChaosAction.uid":
if e.complexity.IOChaosAction.UID == nil {
break
}
return e.complexity.IOChaosAction.UID(childComplexity), true
case "IOChaosSpec.action":
if e.complexity.IOChaosSpec.Action == nil {
break
}
return e.complexity.IOChaosSpec.Action(childComplexity), true
case "IOChaosSpec.attr":
if e.complexity.IOChaosSpec.Attr == nil {
break
}
return e.complexity.IOChaosSpec.Attr(childComplexity), true
case "IOChaosSpec.containerNames":
if e.complexity.IOChaosSpec.ContainerNames == nil {
break
}
return e.complexity.IOChaosSpec.ContainerNames(childComplexity), true
case "IOChaosSpec.delay":
if e.complexity.IOChaosSpec.Delay == nil {
break
}
return e.complexity.IOChaosSpec.Delay(childComplexity), true
case "IOChaosSpec.duration":
if e.complexity.IOChaosSpec.Duration == nil {
break
}
return e.complexity.IOChaosSpec.Duration(childComplexity), true
case "IOChaosSpec.errno":
if e.complexity.IOChaosSpec.Errno == nil {
break
}
return e.complexity.IOChaosSpec.Errno(childComplexity), true
case "IOChaosSpec.methods":
if e.complexity.IOChaosSpec.Methods == nil {
break
}
return e.complexity.IOChaosSpec.Methods(childComplexity), true
case "IOChaosSpec.mistake":
if e.complexity.IOChaosSpec.Mistake == nil {
break
}
return e.complexity.IOChaosSpec.Mistake(childComplexity), true
case "IOChaosSpec.mode":
if e.complexity.IOChaosSpec.Mode == nil {
break
}
return e.complexity.IOChaosSpec.Mode(childComplexity), true
case "IOChaosSpec.path":
if e.complexity.IOChaosSpec.Path == nil {
break
}
return e.complexity.IOChaosSpec.Path(childComplexity), true
case "IOChaosSpec.percent":
if e.complexity.IOChaosSpec.Percent == nil {
break
}
return e.complexity.IOChaosSpec.Percent(childComplexity), true
case "IOChaosSpec.selector":
if e.complexity.IOChaosSpec.Selector == nil {
break
}
return e.complexity.IOChaosSpec.Selector(childComplexity), true
case "IOChaosSpec.value":
if e.complexity.IOChaosSpec.Value == nil {
break
}
return e.complexity.IOChaosSpec.Value(childComplexity), true
case "IOChaosSpec.volumePath":
if e.complexity.IOChaosSpec.VolumePath == nil {
break
}
return e.complexity.IOChaosSpec.VolumePath(childComplexity), true
case "IOChaosStatus.conditions":
if e.complexity.IOChaosStatus.Conditions == nil {
break
}
return e.complexity.IOChaosStatus.Conditions(childComplexity), true
case "IOChaosStatus.experiment":
if e.complexity.IOChaosStatus.Experiment == nil {
break
}
return e.complexity.IOChaosStatus.Experiment(childComplexity), true
case "IOChaosStatus.instances":
if e.complexity.IOChaosStatus.Instances == nil {
break
}
return e.complexity.IOChaosStatus.Instances(childComplexity), true
case "IoFault.errno":
if e.complexity.IoFault.Errno == nil {
break
}
return e.complexity.IoFault.Errno(childComplexity), true
case "IoFault.weight":
if e.complexity.IoFault.Weight == nil {
break
}
return e.complexity.IoFault.Weight(childComplexity), true
case "Logger.component":
if e.complexity.Logger.Component == nil {
break
}
args, err := ec.field_Logger_component_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Logger.Component(childComplexity, args["ns"].(string), args["component"].(model.Component)), true
case "Logger.pod":
if e.complexity.Logger.Pod == nil {
break
}
args, err := ec.field_Logger_pod_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Logger.Pod(childComplexity, args["ns"].(string), args["name"].(string)), true
case "LossSpec.correlation":
if e.complexity.LossSpec.Correlation == nil {
break
}
return e.complexity.LossSpec.Correlation(childComplexity), true
case "LossSpec.loss":
if e.complexity.LossSpec.Loss == nil {
break
}
return e.complexity.LossSpec.Loss(childComplexity), true
case "MistakeSpec.filling":
if e.complexity.MistakeSpec.Filling == nil {
break
}
return e.complexity.MistakeSpec.Filling(childComplexity), true
case "MistakeSpec.maxLength":
if e.complexity.MistakeSpec.MaxLength == nil {
break
}
return e.complexity.MistakeSpec.MaxLength(childComplexity), true
case "MistakeSpec.maxOccurrences":
if e.complexity.MistakeSpec.MaxOccurrences == nil {
break
}
return e.complexity.MistakeSpec.MaxOccurrences(childComplexity), true
case "Namespace.component":
if e.complexity.Namespace.Component == nil {
break
}
args, err := ec.field_Namespace_component_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Component(childComplexity, args["component"].(model.Component)), true
case "Namespace.httpchaos":
if e.complexity.Namespace.Httpchaos == nil {
break
}
args, err := ec.field_Namespace_httpchaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Httpchaos(childComplexity, args["name"].(*string)), true
case "Namespace.iochaos":
if e.complexity.Namespace.Iochaos == nil {
break
}
args, err := ec.field_Namespace_iochaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Iochaos(childComplexity, args["name"].(*string)), true
case "Namespace.networkchaos":
if e.complexity.Namespace.Networkchaos == nil {
break
}
args, err := ec.field_Namespace_networkchaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Networkchaos(childComplexity, args["name"].(*string)), true
case "Namespace.ns":
if e.complexity.Namespace.Ns == nil {
break
}
return e.complexity.Namespace.Ns(childComplexity), true
case "Namespace.pod":
if e.complexity.Namespace.Pod == nil {
break
}
args, err := ec.field_Namespace_pod_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Pod(childComplexity, args["name"].(*string)), true
case "Namespace.podhttpchaos":
if e.complexity.Namespace.Podhttpchaos == nil {
break
}
args, err := ec.field_Namespace_podhttpchaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Podhttpchaos(childComplexity, args["name"].(*string)), true
case "Namespace.podiochaos":
if e.complexity.Namespace.Podiochaos == nil {
break
}
args, err := ec.field_Namespace_podiochaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Podiochaos(childComplexity, args["name"].(*string)), true
case "Namespace.podnetworkchaos":
if e.complexity.Namespace.Podnetworkchaos == nil {
break
}
args, err := ec.field_Namespace_podnetworkchaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Podnetworkchaos(childComplexity, args["name"].(*string)), true
case "Namespace.stresschaos":
if e.complexity.Namespace.Stresschaos == nil {
break
}
args, err := ec.field_Namespace_stresschaos_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Namespace.Stresschaos(childComplexity, args["name"].(*string)), true
case "NetworkChaos.apiVersion":
if e.complexity.NetworkChaos.APIVersion == nil {
break
}
return e.complexity.NetworkChaos.APIVersion(childComplexity), true
case "NetworkChaos.annotations":
if e.complexity.NetworkChaos.Annotations == nil {
break
}
return e.complexity.NetworkChaos.Annotations(childComplexity), true
case "NetworkChaos.clusterName":
if e.complexity.NetworkChaos.ClusterName == nil {
break
}
return e.complexity.NetworkChaos.ClusterName(childComplexity), true
case "NetworkChaos.creationTimestamp":
if e.complexity.NetworkChaos.CreationTimestamp == nil {
break
}
return e.complexity.NetworkChaos.CreationTimestamp(childComplexity), true
case "NetworkChaos.deletionGracePeriodSeconds":
if e.complexity.NetworkChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.NetworkChaos.DeletionGracePeriodSeconds(childComplexity), true
case "NetworkChaos.deletionTimestamp":
if e.complexity.NetworkChaos.DeletionTimestamp == nil {
break
}
return e.complexity.NetworkChaos.DeletionTimestamp(childComplexity), true
case "NetworkChaos.finalizers":
if e.complexity.NetworkChaos.Finalizers == nil {
break
}
return e.complexity.NetworkChaos.Finalizers(childComplexity), true
case "NetworkChaos.generateName":
if e.complexity.NetworkChaos.GenerateName == nil {
break
}
return e.complexity.NetworkChaos.GenerateName(childComplexity), true
case "NetworkChaos.generation":
if e.complexity.NetworkChaos.Generation == nil {
break
}
return e.complexity.NetworkChaos.Generation(childComplexity), true
case "NetworkChaos.kind":
if e.complexity.NetworkChaos.Kind == nil {
break
}
return e.complexity.NetworkChaos.Kind(childComplexity), true
case "NetworkChaos.labels":
if e.complexity.NetworkChaos.Labels == nil {
break
}
return e.complexity.NetworkChaos.Labels(childComplexity), true
case "NetworkChaos.name":
if e.complexity.NetworkChaos.Name == nil {
break
}
return e.complexity.NetworkChaos.Name(childComplexity), true
case "NetworkChaos.namespace":
if e.complexity.NetworkChaos.Namespace == nil {
break
}
return e.complexity.NetworkChaos.Namespace(childComplexity), true
case "NetworkChaos.ownerReferences":
if e.complexity.NetworkChaos.OwnerReferences == nil {
break
}
return e.complexity.NetworkChaos.OwnerReferences(childComplexity), true
case "NetworkChaos.podnetwork":
if e.complexity.NetworkChaos.Podnetwork == nil {
break
}
return e.complexity.NetworkChaos.Podnetwork(childComplexity), true
case "NetworkChaos.resourceVersion":
if e.complexity.NetworkChaos.ResourceVersion == nil {
break
}
return e.complexity.NetworkChaos.ResourceVersion(childComplexity), true
case "NetworkChaos.selfLink":
if e.complexity.NetworkChaos.SelfLink == nil {
break
}
return e.complexity.NetworkChaos.SelfLink(childComplexity), true
case "NetworkChaos.uid":
if e.complexity.NetworkChaos.UID == nil {
break
}
return e.complexity.NetworkChaos.UID(childComplexity), true
case "OwnerReference.apiVersion":
if e.complexity.OwnerReference.APIVersion == nil {
break
}
return e.complexity.OwnerReference.APIVersion(childComplexity), true
case "OwnerReference.blockOwnerDeletion":
if e.complexity.OwnerReference.BlockOwnerDeletion == nil {
break
}
return e.complexity.OwnerReference.BlockOwnerDeletion(childComplexity), true
case "OwnerReference.controller":
if e.complexity.OwnerReference.Controller == nil {
break
}
return e.complexity.OwnerReference.Controller(childComplexity), true
case "OwnerReference.kind":
if e.complexity.OwnerReference.Kind == nil {
break
}
return e.complexity.OwnerReference.Kind(childComplexity), true
case "OwnerReference.name":
if e.complexity.OwnerReference.Name == nil {
break
}
return e.complexity.OwnerReference.Name(childComplexity), true
case "OwnerReference.uid":
if e.complexity.OwnerReference.UID == nil {
break
}
return e.complexity.OwnerReference.UID(childComplexity), true
case "Pod.apiVersion":
if e.complexity.Pod.APIVersion == nil {
break
}
return e.complexity.Pod.APIVersion(childComplexity), true
case "Pod.annotations":
if e.complexity.Pod.Annotations == nil {
break
}
return e.complexity.Pod.Annotations(childComplexity), true
case "Pod.clusterName":
if e.complexity.Pod.ClusterName == nil {
break
}
return e.complexity.Pod.ClusterName(childComplexity), true
case "Pod.creationTimestamp":
if e.complexity.Pod.CreationTimestamp == nil {
break
}
return e.complexity.Pod.CreationTimestamp(childComplexity), true
case "Pod.daemon":
if e.complexity.Pod.Daemon == nil {
break
}
return e.complexity.Pod.Daemon(childComplexity), true
case "Pod.deletionGracePeriodSeconds":
if e.complexity.Pod.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.Pod.DeletionGracePeriodSeconds(childComplexity), true
case "Pod.deletionTimestamp":
if e.complexity.Pod.DeletionTimestamp == nil {
break
}
return e.complexity.Pod.DeletionTimestamp(childComplexity), true
case "Pod.finalizers":
if e.complexity.Pod.Finalizers == nil {
break
}
return e.complexity.Pod.Finalizers(childComplexity), true
case "Pod.generateName":
if e.complexity.Pod.GenerateName == nil {
break
}
return e.complexity.Pod.GenerateName(childComplexity), true
case "Pod.generation":
if e.complexity.Pod.Generation == nil {
break
}
return e.complexity.Pod.Generation(childComplexity), true
case "Pod.ipset":
if e.complexity.Pod.Ipset == nil {
break
}
return e.complexity.Pod.Ipset(childComplexity), true
case "Pod.iptables":
if e.complexity.Pod.Iptables == nil {
break
}
return e.complexity.Pod.Iptables(childComplexity), true
case "Pod.kind":
if e.complexity.Pod.Kind == nil {
break
}
return e.complexity.Pod.Kind(childComplexity), true
case "Pod.labels":
if e.complexity.Pod.Labels == nil {
break
}
return e.complexity.Pod.Labels(childComplexity), true
case "Pod.logs":
if e.complexity.Pod.Logs == nil {
break
}
return e.complexity.Pod.Logs(childComplexity), true
case "Pod.mounts":
if e.complexity.Pod.Mounts == nil {
break
}
return e.complexity.Pod.Mounts(childComplexity), true
case "Pod.name":
if e.complexity.Pod.Name == nil {
break
}
return e.complexity.Pod.Name(childComplexity), true
case "Pod.namespace":
if e.complexity.Pod.Namespace == nil {
break
}
return e.complexity.Pod.Namespace(childComplexity), true
case "Pod.ownerReferences":
if e.complexity.Pod.OwnerReferences == nil {
break
}
return e.complexity.Pod.OwnerReferences(childComplexity), true
case "Pod.processes":
if e.complexity.Pod.Processes == nil {
break
}
return e.complexity.Pod.Processes(childComplexity), true
case "Pod.resourceVersion":
if e.complexity.Pod.ResourceVersion == nil {
break
}
return e.complexity.Pod.ResourceVersion(childComplexity), true
case "Pod.selfLink":
if e.complexity.Pod.SelfLink == nil {
break
}
return e.complexity.Pod.SelfLink(childComplexity), true
case "Pod.spec":
if e.complexity.Pod.Spec == nil {
break
}
return e.complexity.Pod.Spec(childComplexity), true
case "Pod.status":
if e.complexity.Pod.Status == nil {
break
}
return e.complexity.Pod.Status(childComplexity), true
case "Pod.tcQdisc":
if e.complexity.Pod.TcQdisc == nil {
break
}
return e.complexity.Pod.TcQdisc(childComplexity), true
case "Pod.uid":
if e.complexity.Pod.UID == nil {
break
}
return e.complexity.Pod.UID(childComplexity), true
case "PodCondition.lastProbeTime":
if e.complexity.PodCondition.LastProbeTime == nil {
break
}
return e.complexity.PodCondition.LastProbeTime(childComplexity), true
case "PodCondition.lastTransitionTime":
if e.complexity.PodCondition.LastTransitionTime == nil {
break
}
return e.complexity.PodCondition.LastTransitionTime(childComplexity), true
case "PodCondition.message":
if e.complexity.PodCondition.Message == nil {
break
}
return e.complexity.PodCondition.Message(childComplexity), true
case "PodCondition.reason":
if e.complexity.PodCondition.Reason == nil {
break
}
return e.complexity.PodCondition.Reason(childComplexity), true
case "PodCondition.status":
if e.complexity.PodCondition.Status == nil {
break
}
return e.complexity.PodCondition.Status(childComplexity), true
case "PodCondition.type":
if e.complexity.PodCondition.Type == nil {
break
}
return e.complexity.PodCondition.Type(childComplexity), true
case "PodHTTPChaos.apiVersion":
if e.complexity.PodHTTPChaos.APIVersion == nil {
break
}
return e.complexity.PodHTTPChaos.APIVersion(childComplexity), true
case "PodHTTPChaos.annotations":
if e.complexity.PodHTTPChaos.Annotations == nil {
break
}
return e.complexity.PodHTTPChaos.Annotations(childComplexity), true
case "PodHTTPChaos.clusterName":
if e.complexity.PodHTTPChaos.ClusterName == nil {
break
}
return e.complexity.PodHTTPChaos.ClusterName(childComplexity), true
case "PodHTTPChaos.creationTimestamp":
if e.complexity.PodHTTPChaos.CreationTimestamp == nil {
break
}
return e.complexity.PodHTTPChaos.CreationTimestamp(childComplexity), true
case "PodHTTPChaos.deletionGracePeriodSeconds":
if e.complexity.PodHTTPChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.PodHTTPChaos.DeletionGracePeriodSeconds(childComplexity), true
case "PodHTTPChaos.deletionTimestamp":
if e.complexity.PodHTTPChaos.DeletionTimestamp == nil {
break
}
return e.complexity.PodHTTPChaos.DeletionTimestamp(childComplexity), true
case "PodHTTPChaos.finalizers":
if e.complexity.PodHTTPChaos.Finalizers == nil {
break
}
return e.complexity.PodHTTPChaos.Finalizers(childComplexity), true
case "PodHTTPChaos.generateName":
if e.complexity.PodHTTPChaos.GenerateName == nil {
break
}
return e.complexity.PodHTTPChaos.GenerateName(childComplexity), true
case "PodHTTPChaos.generation":
if e.complexity.PodHTTPChaos.Generation == nil {
break
}
return e.complexity.PodHTTPChaos.Generation(childComplexity), true
case "PodHTTPChaos.kind":
if e.complexity.PodHTTPChaos.Kind == nil {
break
}
return e.complexity.PodHTTPChaos.Kind(childComplexity), true
case "PodHTTPChaos.labels":
if e.complexity.PodHTTPChaos.Labels == nil {
break
}
return e.complexity.PodHTTPChaos.Labels(childComplexity), true
case "PodHTTPChaos.name":
if e.complexity.PodHTTPChaos.Name == nil {
break
}
return e.complexity.PodHTTPChaos.Name(childComplexity), true
case "PodHTTPChaos.namespace":
if e.complexity.PodHTTPChaos.Namespace == nil {
break
}
return e.complexity.PodHTTPChaos.Namespace(childComplexity), true
case "PodHTTPChaos.ownerReferences":
if e.complexity.PodHTTPChaos.OwnerReferences == nil {
break
}
return e.complexity.PodHTTPChaos.OwnerReferences(childComplexity), true
case "PodHTTPChaos.pod":
if e.complexity.PodHTTPChaos.Pod == nil {
break
}
return e.complexity.PodHTTPChaos.Pod(childComplexity), true
case "PodHTTPChaos.resourceVersion":
if e.complexity.PodHTTPChaos.ResourceVersion == nil {
break
}
return e.complexity.PodHTTPChaos.ResourceVersion(childComplexity), true
case "PodHTTPChaos.selfLink":
if e.complexity.PodHTTPChaos.SelfLink == nil {
break
}
return e.complexity.PodHTTPChaos.SelfLink(childComplexity), true
case "PodHTTPChaos.spec":
if e.complexity.PodHTTPChaos.Spec == nil {
break
}
return e.complexity.PodHTTPChaos.Spec(childComplexity), true
case "PodHTTPChaos.status":
if e.complexity.PodHTTPChaos.Status == nil {
break
}
return e.complexity.PodHTTPChaos.Status(childComplexity), true
case "PodHTTPChaos.uid":
if e.complexity.PodHTTPChaos.UID == nil {
break
}
return e.complexity.PodHTTPChaos.UID(childComplexity), true
case "PodHttpChaosActions.abort":
if e.complexity.PodHTTPChaosActions.Abort == nil {
break
}
return e.complexity.PodHTTPChaosActions.Abort(childComplexity), true
case "PodHttpChaosActions.delay":
if e.complexity.PodHTTPChaosActions.Delay == nil {
break
}
return e.complexity.PodHTTPChaosActions.Delay(childComplexity), true
case "PodHttpChaosActions.patch":
if e.complexity.PodHTTPChaosActions.Patch == nil {
break
}
return e.complexity.PodHTTPChaosActions.Patch(childComplexity), true
case "PodHttpChaosActions.replace":
if e.complexity.PodHTTPChaosActions.Replace == nil {
break
}
return e.complexity.PodHTTPChaosActions.Replace(childComplexity), true
case "PodHttpChaosPatchActions.body":
if e.complexity.PodHTTPChaosPatchActions.Body == nil {
break
}
return e.complexity.PodHTTPChaosPatchActions.Body(childComplexity), true
case "PodHttpChaosPatchActions.headers":
if e.complexity.PodHTTPChaosPatchActions.Headers == nil {
break
}
return e.complexity.PodHTTPChaosPatchActions.Headers(childComplexity), true
case "PodHttpChaosPatchActions.queries":
if e.complexity.PodHTTPChaosPatchActions.Queries == nil {
break
}
return e.complexity.PodHTTPChaosPatchActions.Queries(childComplexity), true
case "PodHttpChaosPatchBodyAction.type":
if e.complexity.PodHTTPChaosPatchBodyAction.Type == nil {
break
}
return e.complexity.PodHTTPChaosPatchBodyAction.Type(childComplexity), true
case "PodHttpChaosPatchBodyAction.value":
if e.complexity.PodHTTPChaosPatchBodyAction.Value == nil {
break
}
return e.complexity.PodHTTPChaosPatchBodyAction.Value(childComplexity), true
case "PodHttpChaosReplaceActions.body":
if e.complexity.PodHTTPChaosReplaceActions.Body == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Body(childComplexity), true
case "PodHttpChaosReplaceActions.code":
if e.complexity.PodHTTPChaosReplaceActions.Code == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Code(childComplexity), true
case "PodHttpChaosReplaceActions.headers":
if e.complexity.PodHTTPChaosReplaceActions.Headers == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Headers(childComplexity), true
case "PodHttpChaosReplaceActions.method":
if e.complexity.PodHTTPChaosReplaceActions.Method == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Method(childComplexity), true
case "PodHttpChaosReplaceActions.path":
if e.complexity.PodHTTPChaosReplaceActions.Path == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Path(childComplexity), true
case "PodHttpChaosReplaceActions.queries":
if e.complexity.PodHTTPChaosReplaceActions.Queries == nil {
break
}
return e.complexity.PodHTTPChaosReplaceActions.Queries(childComplexity), true
case "PodHttpChaosRule.actions":
if e.complexity.PodHTTPChaosRule.Actions == nil {
break
}
return e.complexity.PodHTTPChaosRule.Actions(childComplexity), true
case "PodHttpChaosRule.port":
if e.complexity.PodHTTPChaosRule.Port == nil {
break
}
return e.complexity.PodHTTPChaosRule.Port(childComplexity), true
case "PodHttpChaosRule.selector":
if e.complexity.PodHTTPChaosRule.Selector == nil {
break
}
return e.complexity.PodHTTPChaosRule.Selector(childComplexity), true
case "PodHttpChaosRule.source":
if e.complexity.PodHTTPChaosRule.Source == nil {
break
}
return e.complexity.PodHTTPChaosRule.Source(childComplexity), true
case "PodHttpChaosRule.target":
if e.complexity.PodHTTPChaosRule.Target == nil {
break
}
return e.complexity.PodHTTPChaosRule.Target(childComplexity), true
case "PodHttpChaosSelector.code":
if e.complexity.PodHTTPChaosSelector.Code == nil {
break
}
return e.complexity.PodHTTPChaosSelector.Code(childComplexity), true
case "PodHttpChaosSelector.method":
if e.complexity.PodHTTPChaosSelector.Method == nil {
break
}
return e.complexity.PodHTTPChaosSelector.Method(childComplexity), true
case "PodHttpChaosSelector.path":
if e.complexity.PodHTTPChaosSelector.Path == nil {
break
}
return e.complexity.PodHTTPChaosSelector.Path(childComplexity), true
case "PodHttpChaosSelector.port":
if e.complexity.PodHTTPChaosSelector.Port == nil {
break
}
return e.complexity.PodHTTPChaosSelector.Port(childComplexity), true
case "PodHttpChaosSelector.requestHeaders":
if e.complexity.PodHTTPChaosSelector.RequestHeaders == nil {
break
}
return e.complexity.PodHTTPChaosSelector.RequestHeaders(childComplexity), true
case "PodHttpChaosSelector.responseHeaders":
if e.complexity.PodHTTPChaosSelector.ResponseHeaders == nil {
break
}
return e.complexity.PodHTTPChaosSelector.ResponseHeaders(childComplexity), true
case "PodHttpChaosSpec.rules":
if e.complexity.PodHTTPChaosSpec.Rules == nil {
break
}
return e.complexity.PodHTTPChaosSpec.Rules(childComplexity), true
case "PodHttpChaosStatus.failedMessage":
if e.complexity.PodHTTPChaosStatus.FailedMessage == nil {
break
}
return e.complexity.PodHTTPChaosStatus.FailedMessage(childComplexity), true
case "PodHttpChaosStatus.observedGeneration":
if e.complexity.PodHTTPChaosStatus.ObservedGeneration == nil {
break
}
return e.complexity.PodHTTPChaosStatus.ObservedGeneration(childComplexity), true
case "PodHttpChaosStatus.pid":
if e.complexity.PodHTTPChaosStatus.Pid == nil {
break
}
return e.complexity.PodHTTPChaosStatus.Pid(childComplexity), true
case "PodHttpChaosStatus.startTime":
if e.complexity.PodHTTPChaosStatus.StartTime == nil {
break
}
return e.complexity.PodHTTPChaosStatus.StartTime(childComplexity), true
case "PodIOChaos.apiVersion":
if e.complexity.PodIOChaos.APIVersion == nil {
break
}
return e.complexity.PodIOChaos.APIVersion(childComplexity), true
case "PodIOChaos.annotations":
if e.complexity.PodIOChaos.Annotations == nil {
break
}
return e.complexity.PodIOChaos.Annotations(childComplexity), true
case "PodIOChaos.clusterName":
if e.complexity.PodIOChaos.ClusterName == nil {
break
}
return e.complexity.PodIOChaos.ClusterName(childComplexity), true
case "PodIOChaos.creationTimestamp":
if e.complexity.PodIOChaos.CreationTimestamp == nil {
break
}
return e.complexity.PodIOChaos.CreationTimestamp(childComplexity), true
case "PodIOChaos.deletionGracePeriodSeconds":
if e.complexity.PodIOChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.PodIOChaos.DeletionGracePeriodSeconds(childComplexity), true
case "PodIOChaos.deletionTimestamp":
if e.complexity.PodIOChaos.DeletionTimestamp == nil {
break
}
return e.complexity.PodIOChaos.DeletionTimestamp(childComplexity), true
case "PodIOChaos.finalizers":
if e.complexity.PodIOChaos.Finalizers == nil {
break
}
return e.complexity.PodIOChaos.Finalizers(childComplexity), true
case "PodIOChaos.generateName":
if e.complexity.PodIOChaos.GenerateName == nil {
break
}
return e.complexity.PodIOChaos.GenerateName(childComplexity), true
case "PodIOChaos.generation":
if e.complexity.PodIOChaos.Generation == nil {
break
}
return e.complexity.PodIOChaos.Generation(childComplexity), true
case "PodIOChaos.ios":
if e.complexity.PodIOChaos.Ios == nil {
break
}
return e.complexity.PodIOChaos.Ios(childComplexity), true
case "PodIOChaos.kind":
if e.complexity.PodIOChaos.Kind == nil {
break
}
return e.complexity.PodIOChaos.Kind(childComplexity), true
case "PodIOChaos.labels":
if e.complexity.PodIOChaos.Labels == nil {
break
}
return e.complexity.PodIOChaos.Labels(childComplexity), true
case "PodIOChaos.name":
if e.complexity.PodIOChaos.Name == nil {
break
}
return e.complexity.PodIOChaos.Name(childComplexity), true
case "PodIOChaos.namespace":
if e.complexity.PodIOChaos.Namespace == nil {
break
}
return e.complexity.PodIOChaos.Namespace(childComplexity), true
case "PodIOChaos.ownerReferences":
if e.complexity.PodIOChaos.OwnerReferences == nil {
break
}
return e.complexity.PodIOChaos.OwnerReferences(childComplexity), true
case "PodIOChaos.pod":
if e.complexity.PodIOChaos.Pod == nil {
break
}
return e.complexity.PodIOChaos.Pod(childComplexity), true
case "PodIOChaos.resourceVersion":
if e.complexity.PodIOChaos.ResourceVersion == nil {
break
}
return e.complexity.PodIOChaos.ResourceVersion(childComplexity), true
case "PodIOChaos.selfLink":
if e.complexity.PodIOChaos.SelfLink == nil {
break
}
return e.complexity.PodIOChaos.SelfLink(childComplexity), true
case "PodIOChaos.spec":
if e.complexity.PodIOChaos.Spec == nil {
break
}
return e.complexity.PodIOChaos.Spec(childComplexity), true
case "PodIOChaos.status":
if e.complexity.PodIOChaos.Status == nil {
break
}
return e.complexity.PodIOChaos.Status(childComplexity), true
case "PodIOChaos.uid":
if e.complexity.PodIOChaos.UID == nil {
break
}
return e.complexity.PodIOChaos.UID(childComplexity), true
case "PodIOChaosSpec.actions":
if e.complexity.PodIOChaosSpec.Actions == nil {
break
}
return e.complexity.PodIOChaosSpec.Actions(childComplexity), true
case "PodIOChaosSpec.container":
if e.complexity.PodIOChaosSpec.Container == nil {
break
}
return e.complexity.PodIOChaosSpec.Container(childComplexity), true
case "PodIOChaosSpec.volumeMountPath":
if e.complexity.PodIOChaosSpec.VolumeMountPath == nil {
break
}
return e.complexity.PodIOChaosSpec.VolumeMountPath(childComplexity), true
case "PodIOChaosStatus.failedMessage":
if e.complexity.PodIOChaosStatus.FailedMessage == nil {
break
}
return e.complexity.PodIOChaosStatus.FailedMessage(childComplexity), true
case "PodIOChaosStatus.observedGeneration":
if e.complexity.PodIOChaosStatus.ObservedGeneration == nil {
break
}
return e.complexity.PodIOChaosStatus.ObservedGeneration(childComplexity), true
case "PodIOChaosStatus.pid":
if e.complexity.PodIOChaosStatus.Pid == nil {
break
}
return e.complexity.PodIOChaosStatus.Pid(childComplexity), true
case "PodIOChaosStatus.startTime":
if e.complexity.PodIOChaosStatus.StartTime == nil {
break
}
return e.complexity.PodIOChaosStatus.StartTime(childComplexity), true
case "PodIP.ip":
if e.complexity.PodIP.IP == nil {
break
}
return e.complexity.PodIP.IP(childComplexity), true
case "PodNetworkChaos.apiVersion":
if e.complexity.PodNetworkChaos.APIVersion == nil {
break
}
return e.complexity.PodNetworkChaos.APIVersion(childComplexity), true
case "PodNetworkChaos.annotations":
if e.complexity.PodNetworkChaos.Annotations == nil {
break
}
return e.complexity.PodNetworkChaos.Annotations(childComplexity), true
case "PodNetworkChaos.clusterName":
if e.complexity.PodNetworkChaos.ClusterName == nil {
break
}
return e.complexity.PodNetworkChaos.ClusterName(childComplexity), true
case "PodNetworkChaos.creationTimestamp":
if e.complexity.PodNetworkChaos.CreationTimestamp == nil {
break
}
return e.complexity.PodNetworkChaos.CreationTimestamp(childComplexity), true
case "PodNetworkChaos.deletionGracePeriodSeconds":
if e.complexity.PodNetworkChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.PodNetworkChaos.DeletionGracePeriodSeconds(childComplexity), true
case "PodNetworkChaos.deletionTimestamp":
if e.complexity.PodNetworkChaos.DeletionTimestamp == nil {
break
}
return e.complexity.PodNetworkChaos.DeletionTimestamp(childComplexity), true
case "PodNetworkChaos.finalizers":
if e.complexity.PodNetworkChaos.Finalizers == nil {
break
}
return e.complexity.PodNetworkChaos.Finalizers(childComplexity), true
case "PodNetworkChaos.generateName":
if e.complexity.PodNetworkChaos.GenerateName == nil {
break
}
return e.complexity.PodNetworkChaos.GenerateName(childComplexity), true
case "PodNetworkChaos.generation":
if e.complexity.PodNetworkChaos.Generation == nil {
break
}
return e.complexity.PodNetworkChaos.Generation(childComplexity), true
case "PodNetworkChaos.kind":
if e.complexity.PodNetworkChaos.Kind == nil {
break
}
return e.complexity.PodNetworkChaos.Kind(childComplexity), true
case "PodNetworkChaos.labels":
if e.complexity.PodNetworkChaos.Labels == nil {
break
}
return e.complexity.PodNetworkChaos.Labels(childComplexity), true
case "PodNetworkChaos.name":
if e.complexity.PodNetworkChaos.Name == nil {
break
}
return e.complexity.PodNetworkChaos.Name(childComplexity), true
case "PodNetworkChaos.namespace":
if e.complexity.PodNetworkChaos.Namespace == nil {
break
}
return e.complexity.PodNetworkChaos.Namespace(childComplexity), true
case "PodNetworkChaos.ownerReferences":
if e.complexity.PodNetworkChaos.OwnerReferences == nil {
break
}
return e.complexity.PodNetworkChaos.OwnerReferences(childComplexity), true
case "PodNetworkChaos.pod":
if e.complexity.PodNetworkChaos.Pod == nil {
break
}
return e.complexity.PodNetworkChaos.Pod(childComplexity), true
case "PodNetworkChaos.resourceVersion":
if e.complexity.PodNetworkChaos.ResourceVersion == nil {
break
}
return e.complexity.PodNetworkChaos.ResourceVersion(childComplexity), true
case "PodNetworkChaos.selfLink":
if e.complexity.PodNetworkChaos.SelfLink == nil {
break
}
return e.complexity.PodNetworkChaos.SelfLink(childComplexity), true
case "PodNetworkChaos.spec":
if e.complexity.PodNetworkChaos.Spec == nil {
break
}
return e.complexity.PodNetworkChaos.Spec(childComplexity), true
case "PodNetworkChaos.status":
if e.complexity.PodNetworkChaos.Status == nil {
break
}
return e.complexity.PodNetworkChaos.Status(childComplexity), true
case "PodNetworkChaos.uid":
if e.complexity.PodNetworkChaos.UID == nil {
break
}
return e.complexity.PodNetworkChaos.UID(childComplexity), true
case "PodNetworkChaosSpec.ipSets":
if e.complexity.PodNetworkChaosSpec.IPSets == nil {
break
}
return e.complexity.PodNetworkChaosSpec.IPSets(childComplexity), true
case "PodNetworkChaosSpec.iptables":
if e.complexity.PodNetworkChaosSpec.Iptables == nil {
break
}
return e.complexity.PodNetworkChaosSpec.Iptables(childComplexity), true
case "PodNetworkChaosSpec.trafficControls":
if e.complexity.PodNetworkChaosSpec.TrafficControls == nil {
break
}
return e.complexity.PodNetworkChaosSpec.TrafficControls(childComplexity), true
case "PodNetworkChaosStatus.failedMessage":
if e.complexity.PodNetworkChaosStatus.FailedMessage == nil {
break
}
return e.complexity.PodNetworkChaosStatus.FailedMessage(childComplexity), true
case "PodNetworkChaosStatus.observedGeneration":
if e.complexity.PodNetworkChaosStatus.ObservedGeneration == nil {
break
}
return e.complexity.PodNetworkChaosStatus.ObservedGeneration(childComplexity), true
case "PodSelectorSpec.annotationSelectors":
if e.complexity.PodSelectorSpec.AnnotationSelectors == nil {
break
}
return e.complexity.PodSelectorSpec.AnnotationSelectors(childComplexity), true
case "PodSelectorSpec.fieldSelectors":
if e.complexity.PodSelectorSpec.FieldSelectors == nil {
break
}
return e.complexity.PodSelectorSpec.FieldSelectors(childComplexity), true
case "PodSelectorSpec.labelSelectors":
if e.complexity.PodSelectorSpec.LabelSelectors == nil {
break
}
return e.complexity.PodSelectorSpec.LabelSelectors(childComplexity), true
case "PodSelectorSpec.namespaces":
if e.complexity.PodSelectorSpec.Namespaces == nil {
break
}
return e.complexity.PodSelectorSpec.Namespaces(childComplexity), true
case "PodSelectorSpec.nodeSelectors":
if e.complexity.PodSelectorSpec.NodeSelectors == nil {
break
}
return e.complexity.PodSelectorSpec.NodeSelectors(childComplexity), true
case "PodSelectorSpec.nodes":
if e.complexity.PodSelectorSpec.Nodes == nil {
break
}
return e.complexity.PodSelectorSpec.Nodes(childComplexity), true
case "PodSelectorSpec.podPhaseSelectors":
if e.complexity.PodSelectorSpec.PodPhaseSelectors == nil {
break
}
return e.complexity.PodSelectorSpec.PodPhaseSelectors(childComplexity), true
case "PodSelectorSpec.pods":
if e.complexity.PodSelectorSpec.Pods == nil {
break
}
return e.complexity.PodSelectorSpec.Pods(childComplexity), true
case "PodSpec.nodeName":
if e.complexity.PodSpec.NodeName == nil {
break
}
return e.complexity.PodSpec.NodeName(childComplexity), true
case "PodStatus.conditions":
if e.complexity.PodStatus.Conditions == nil {
break
}
return e.complexity.PodStatus.Conditions(childComplexity), true
case "PodStatus.containerStatuses":
if e.complexity.PodStatus.ContainerStatuses == nil {
break
}
return e.complexity.PodStatus.ContainerStatuses(childComplexity), true
case "PodStatus.ephemeralContainerStatuses":
if e.complexity.PodStatus.EphemeralContainerStatuses == nil {
break
}
return e.complexity.PodStatus.EphemeralContainerStatuses(childComplexity), true
case "PodStatus.hostIP":
if e.complexity.PodStatus.HostIP == nil {
break
}
return e.complexity.PodStatus.HostIP(childComplexity), true
case "PodStatus.initContainerStatuses":
if e.complexity.PodStatus.InitContainerStatuses == nil {
break
}
return e.complexity.PodStatus.InitContainerStatuses(childComplexity), true
case "PodStatus.message":
if e.complexity.PodStatus.Message == nil {
break
}
return e.complexity.PodStatus.Message(childComplexity), true
case "PodStatus.nominatedNodeName":
if e.complexity.PodStatus.NominatedNodeName == nil {
break
}
return e.complexity.PodStatus.NominatedNodeName(childComplexity), true
case "PodStatus.phase":
if e.complexity.PodStatus.Phase == nil {
break
}
return e.complexity.PodStatus.Phase(childComplexity), true
case "PodStatus.podIP":
if e.complexity.PodStatus.PodIP == nil {
break
}
return e.complexity.PodStatus.PodIP(childComplexity), true
case "PodStatus.podIPs":
if e.complexity.PodStatus.PodIPs == nil {
break
}
return e.complexity.PodStatus.PodIPs(childComplexity), true
case "PodStatus.qosClass":
if e.complexity.PodStatus.QosClass == nil {
break
}
return e.complexity.PodStatus.QosClass(childComplexity), true
case "PodStatus.reason":
if e.complexity.PodStatus.Reason == nil {
break
}
return e.complexity.PodStatus.Reason(childComplexity), true
case "PodStatus.startTime":
if e.complexity.PodStatus.StartTime == nil {
break
}
return e.complexity.PodStatus.StartTime(childComplexity), true
case "Process.command":
if e.complexity.Process.Command == nil {
break
}
return e.complexity.Process.Command(childComplexity), true
case "Process.fds":
if e.complexity.Process.Fds == nil {
break
}
return e.complexity.Process.Fds(childComplexity), true
case "Process.pid":
if e.complexity.Process.Pid == nil {
break
}
return e.complexity.Process.Pid(childComplexity), true
case "Process.pod":
if e.complexity.Process.Pod == nil {
break
}
return e.complexity.Process.Pod(childComplexity), true
case "Query.namespace":
if e.complexity.Query.Namespace == nil {
break
}
args, err := ec.field_Query_namespace_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Query.Namespace(childComplexity, args["ns"].(*string)), true
case "RawIPSet.cidrs":
if e.complexity.RawIPSet.Cidrs == nil {
break
}
return e.complexity.RawIPSet.Cidrs(childComplexity), true
case "RawIPSet.name":
if e.complexity.RawIPSet.Name == nil {
break
}
return e.complexity.RawIPSet.Name(childComplexity), true
case "RawIPSet.source":
if e.complexity.RawIPSet.Source == nil {
break
}
return e.complexity.RawIPSet.Source(childComplexity), true
case "RawIptables.direction":
if e.complexity.RawIptables.Direction == nil {
break
}
return e.complexity.RawIptables.Direction(childComplexity), true
case "RawIptables.ipSets":
if e.complexity.RawIptables.IPSets == nil {
break
}
return e.complexity.RawIptables.IPSets(childComplexity), true
case "RawIptables.name":
if e.complexity.RawIptables.Name == nil {
break
}
return e.complexity.RawIptables.Name(childComplexity), true
case "RawIptables.source":
if e.complexity.RawIptables.Source == nil {
break
}
return e.complexity.RawIptables.Source(childComplexity), true
case "RawTrafficControl.Bandwidth":
if e.complexity.RawTrafficControl.Bandwidth == nil {
break
}
return e.complexity.RawTrafficControl.Bandwidth(childComplexity), true
case "RawTrafficControl.corrupt":
if e.complexity.RawTrafficControl.Corrupt == nil {
break
}
return e.complexity.RawTrafficControl.Corrupt(childComplexity), true
case "RawTrafficControl.delay":
if e.complexity.RawTrafficControl.Delay == nil {
break
}
return e.complexity.RawTrafficControl.Delay(childComplexity), true
case "RawTrafficControl.duplicate":
if e.complexity.RawTrafficControl.Duplicate == nil {
break
}
return e.complexity.RawTrafficControl.Duplicate(childComplexity), true
case "RawTrafficControl.ipSet":
if e.complexity.RawTrafficControl.IPSet == nil {
break
}
return e.complexity.RawTrafficControl.IPSet(childComplexity), true
case "RawTrafficControl.loss":
if e.complexity.RawTrafficControl.Loss == nil {
break
}
return e.complexity.RawTrafficControl.Loss(childComplexity), true
case "RawTrafficControl.source":
if e.complexity.RawTrafficControl.Source == nil {
break
}
return e.complexity.RawTrafficControl.Source(childComplexity), true
case "RawTrafficControl.type":
if e.complexity.RawTrafficControl.Type == nil {
break
}
return e.complexity.RawTrafficControl.Type(childComplexity), true
case "Record.id":
if e.complexity.Record.Id == nil {
break
}
return e.complexity.Record.Id(childComplexity), true
case "Record.phase":
if e.complexity.Record.Phase == nil {
break
}
return e.complexity.Record.Phase(childComplexity), true
case "Record.selectorKey":
if e.complexity.Record.SelectorKey == nil {
break
}
return e.complexity.Record.SelectorKey(childComplexity), true
case "ReorderSpec.correlation":
if e.complexity.ReorderSpec.Correlation == nil {
break
}
return e.complexity.ReorderSpec.Correlation(childComplexity), true
case "ReorderSpec.gap":
if e.complexity.ReorderSpec.Gap == nil {
break
}
return e.complexity.ReorderSpec.Gap(childComplexity), true
case "ReorderSpec.reorder":
if e.complexity.ReorderSpec.Reorder == nil {
break
}
return e.complexity.ReorderSpec.Reorder(childComplexity), true
case "StressChaos.apiVersion":
if e.complexity.StressChaos.APIVersion == nil {
break
}
return e.complexity.StressChaos.APIVersion(childComplexity), true
case "StressChaos.annotations":
if e.complexity.StressChaos.Annotations == nil {
break
}
return e.complexity.StressChaos.Annotations(childComplexity), true
case "StressChaos.clusterName":
if e.complexity.StressChaos.ClusterName == nil {
break
}
return e.complexity.StressChaos.ClusterName(childComplexity), true
case "StressChaos.creationTimestamp":
if e.complexity.StressChaos.CreationTimestamp == nil {
break
}
return e.complexity.StressChaos.CreationTimestamp(childComplexity), true
case "StressChaos.deletionGracePeriodSeconds":
if e.complexity.StressChaos.DeletionGracePeriodSeconds == nil {
break
}
return e.complexity.StressChaos.DeletionGracePeriodSeconds(childComplexity), true
case "StressChaos.deletionTimestamp":
if e.complexity.StressChaos.DeletionTimestamp == nil {
break
}
return e.complexity.StressChaos.DeletionTimestamp(childComplexity), true
case "StressChaos.finalizers":
if e.complexity.StressChaos.Finalizers == nil {
break
}
return e.complexity.StressChaos.Finalizers(childComplexity), true
case "StressChaos.generateName":
if e.complexity.StressChaos.GenerateName == nil {
break
}
return e.complexity.StressChaos.GenerateName(childComplexity), true
case "StressChaos.generation":
if e.complexity.StressChaos.Generation == nil {
break
}
return e.complexity.StressChaos.Generation(childComplexity), true
case "StressChaos.kind":
if e.complexity.StressChaos.Kind == nil {
break
}
return e.complexity.StressChaos.Kind(childComplexity), true
case "StressChaos.labels":
if e.complexity.StressChaos.Labels == nil {
break
}
return e.complexity.StressChaos.Labels(childComplexity), true
case "StressChaos.name":
if e.complexity.StressChaos.Name == nil {
break
}
return e.complexity.StressChaos.Name(childComplexity), true
case "StressChaos.namespace":
if e.complexity.StressChaos.Namespace == nil {
break
}
return e.complexity.StressChaos.Namespace(childComplexity), true
case "StressChaos.ownerReferences":
if e.complexity.StressChaos.OwnerReferences == nil {
break
}
return e.complexity.StressChaos.OwnerReferences(childComplexity), true
case "StressChaos.resourceVersion":
if e.complexity.StressChaos.ResourceVersion == nil {
break
}
return e.complexity.StressChaos.ResourceVersion(childComplexity), true
case "StressChaos.selfLink":
if e.complexity.StressChaos.SelfLink == nil {
break
}
return e.complexity.StressChaos.SelfLink(childComplexity), true
case "StressChaos.uid":
if e.complexity.StressChaos.UID == nil {
break
}
return e.complexity.StressChaos.UID(childComplexity), true
case "Timespec.nsec":
if e.complexity.Timespec.Nsec == nil {
break
}
return e.complexity.Timespec.Nsec(childComplexity), true
case "Timespec.sec":
if e.complexity.Timespec.Sec == nil {
break
}
return e.complexity.Timespec.Sec(childComplexity), true
}
return 0, false
}
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
rc := graphql.GetOperationContext(ctx)
ec := executionContext{rc, e}
first := true
switch rc.Operation.Operation {
case ast.Query:
return func(ctx context.Context) *graphql.Response {
if !first {
return nil
}
first = false
data := ec._Query(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
case ast.Subscription:
next := ec._Logger(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
return func(ctx context.Context) *graphql.Response {
buf.Reset()
data := next()
if data == nil {
return nil
}
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
default:
return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation"))
}
}
type executionContext struct {
*graphql.OperationContext
*executableSchema
}
func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapSchema(parsedSchema), nil
}
func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil
}
var sources = []*ast.Source{
{Name: "graph/schema.graphqls", Input: `# Copyright 2021 Chaos Mesh Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
directive @goModel(model: String, models: [String!]) on OBJECT
| INPUT_OBJECT
| SCALAR
| ENUM
| INTERFACE
| UNION
directive @goField(forceResolver: Boolean, name: String) on INPUT_FIELD_DEFINITION
| FIELD_DEFINITION
scalar Time
scalar Map
schema {
query: Query
subscription: Logger
}
type Query {
namespace(ns: String): [Namespace!]
}
type Logger {
component(ns: String! = "chaos-testing", component: Component!): String! @goField(forceResolver: true)
pod(ns: String! = "default", name: String!): String! @goField(forceResolver: true)
}
type Namespace {
ns: String!
component(component: Component!): [Pod!] @goField(forceResolver: true)
pod(name: String): [Pod!] @goField(forceResolver: true)
stresschaos(name: String): [StressChaos!] @goField(forceResolver: true)
iochaos(name: String): [IOChaos!] @goField(forceResolver: true)
podiochaos(name: String): [PodIOChaos!] @goField(forceResolver: true)
httpchaos(name: String): [HTTPChaos!] @goField(forceResolver: true)
podhttpchaos(name: String): [PodHTTPChaos!] @goField(forceResolver: true)
networkchaos(name: String): [NetworkChaos!] @goField(forceResolver: true)
podnetworkchaos(name: String): [PodNetworkChaos!] @goField(forceResolver: true)
}
type OwnerReference @goModel(model: "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference") {
kind: String!
apiVersion: String!
name: String!
uid: String!
controller: Boolean
blockOwnerDeletion: Boolean
}
enum Component {
MANAGER
DAEMON
DASHBOARD
DNSSERVER
}
type Process {
pod: Pod!
pid: String!
command: String!
fds: [Fd!] @goField(forceResolver: true)
}
type Fd {
fd: String!
target: String!
}
type Pod @goModel(model: "k8s.io/api/core/v1.Pod") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: PodSpec!
status: PodStatus!
logs: String! @goField(forceResolver: true)
daemon: Pod @goField(forceResolver: true)
processes: [Process!] @goField(forceResolver: true)
mounts: [String!] @goField(forceResolver: true)
ipset: String! @goField(forceResolver: true)
tcQdisc: String! @goField(forceResolver: true)
iptables: String! @goField(forceResolver: true)
}
# PodStatus represents information about the status of a pod. Status may trail the actual
# state of a system, especially if the node that hosts the pod cannot contact the control
# plane.
type PodStatus @goModel(model: "k8s.io/api/core/v1.PodStatus") {
# The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
# The conditions array, the reason and message fields, and the individual container status
# arrays contain more detail about the pod's status.
# There are five possible phase values:
#
# Pending: The pod has been accepted by the Kubernetes system, but one or more of the
# container images has not been created. This includes time before being scheduled as
# well as time spent downloading images over the network, which could take a while.
# Running: The pod has been bound to a node, and all of the containers have been created.
# At least one container is still running, or is in the process of starting or restarting.
# Succeeded: All containers in the pod have terminated in success, and will not be restarted.
# Failed: All containers in the pod have terminated, and at least one container has
# terminated in failure. The container either exited with non-zero status or was terminated
# by the system.
# Unknown: For some reason the state of the pod could not be obtained, typically due to an
# error in communicating with the host of the pod.
#
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
phase: String!
# Current service state of pod.
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
conditions: [PodCondition!]
# A human readable message indicating details about why the pod is in this condition.
message: String!
# A brief CamelCase message indicating details about why the pod is in this state.
# e.g. 'Evicted'
reason: String!
# nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
# scheduled right away as preemption victims receive their graceful termination periods.
# This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
# to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
# give the resources on this node to a higher priority pod that is created after preemption.
# As a result, this field may be different than PodSpec.nodeName when the pod is
# scheduled.
nominatedNodeName: String!
# IP address of the host to which the pod is assigned. Empty if not yet scheduled.
hostIP: String!
# IP address allocated to the pod. Routable at least within the cluster.
# Empty if not yet allocated.
podIP: String!
# podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
# match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
# is empty if no IPs have been allocated yet.
podIPs: [PodIP!]
# RFC 3339 date and time at which the object was acknowledged by the Kubelet.
# This is before the Kubelet pulled the container image(s) for the pod.
startTime: Time
# The list has one entry per init container in the manifest. The most recent successful
# init container will have ready = true, the most recently started container will have
# startTime set.
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
initContainerStatuses: [ContainerStatus!]
# The list has one entry per container in the manifest. Each entry is currently the output
# of ` + "`" + `docker inspect` + "`" + `.
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
containerStatuses: [ContainerStatus!]
# The Quality of Service (QOS) classification assigned to the pod based on resource requirements
# See PodQOSClass type for available QOS classes
# More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
qosClass: String!
# Status for any ephemeral containers that have run in this pod.
# This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.
ephemeralContainerStatuses: [ContainerStatus!]
}
# IP address information for entries in the (plural) PodIPs field.
# Each entry includes:
# IP: An IP address allocated to the pod. Routable at least within the cluster.
type PodIP @goModel(model: "k8s.io/api/core/v1.PodIP") {
# ip is an IP address (IPv4 or IPv6) assigned to the pod
ip: String!
}
# PodCondition contains details for the current condition of this pod.
type PodCondition @goModel(model: "k8s.io/api/core/v1.PodCondition") {
# Type is the type of the condition.
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
type: String!
# Status is the status of the condition.
# Can be True, False, Unknown.
# More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
status: String!
# Last time we probed the condition.
lastProbeTime: Time
# Last time the condition transitioned from one status to another.
lastTransitionTime: Time
# Unique, one-word, CamelCase reason for the condition's last transition.
reason: String
# Human-readable message indicating details about last transition.
message: String
}
# ContainerStatus contains details for the current status of this container.
type ContainerStatus @goModel(model: "k8s.io/api/core/v1.ContainerStatus") {
# This must be a DNS_LABEL. Each container in a pod must have a unique name.
# Cannot be updated.
name: String!
# Details about the container's current condition.
State: ContainerState
# Details about the container's last termination condition.
lastTerminationState: ContainerState
# Specifies whether the container has passed its readiness probe.
ready: Boolean!
# The number of times the container has been restarted, currently based on
# the number of dead containers that have not yet been removed.
# Note that this is calculated from dead containers. But those containers are subject to
# garbage collection. This value will get capped at 5 by GC.
restartCount: Int!
# The image the container is running.
# More info: https://kubernetes.io/docs/concepts/containers/images
# TODO(dchen1107): Which image the container is running with?
image: String!
# ImageID of the container's image.
imageID: String!
# Container's ID in the format 'docker://<container_id>'.
containerID: String!
# Specifies whether the container has passed its startup probe.
# Initialized as false, becomes true after startupProbe is considered successful.
# Resets to false when the container is restarted, or if kubelet loses state temporarily.
# Is always true when no startupProbe is defined.
started: Boolean
}
# ContainerState holds a possible state of container.
# Only one of its members may be specified.
# If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState @goModel(model: "k8s.io/api/core/v1.ContainerState") {
# Details about a waiting container
waiting: ContainerStateWaiting
# Details about a running container
running: ContainerStateRunning
# Details about a terminated container
terminated: ContainerStateTerminated
}
# ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting @goModel(model: "k8s.io/api/core/v1.ContainerStateWaiting") {
# (brief) reason the container is not yet running.
reason: String
# Message regarding why the container is not yet running.
message: String
}
# ContainerStateRunning is a running state of a container.
type ContainerStateRunning @goModel(model: "k8s.io/api/core/v1.ContainerStateRunning") {
# Time at which the container was last (re-)started
startedAt: Time
}
# ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated @goModel(model: "k8s.io/api/core/v1.ContainerStateTerminated") {
# Exit status from the last termination of the container
exitCode: Int!
# Signal from the last termination of the container
signal: Int
# (brief) reason from the last termination of the container
reason: String
# Message regarding the last termination of the container
message: String
#Time at which previous execution of the container started
startedAt: Time
# Time at which the container last terminated
finishedAt: Time
# Container's ID in the format 'docker://<container_id>'
containerID: String
}
# TODO: add more fields
type PodSpec @goModel(model: "k8s.io/api/core/v1.PodSpec") {
# ndeName is a request to schedule this pod onto a specific node. If it is non-empty,
# the scheduler simply schedules this pod onto that node, assuming that it fits resource
# requirements.
nodeName: String!
}
type PodIOChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodIOChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: PodIOChaosSpec!
status: PodIOChaosStatus!
pod: Pod! @goField(forceResolver: true)
ios: [IOChaos!] @goField(forceResolver: true)
}
# PodIOChaosSpec defines the desired state of PodIOChaos
type PodIOChaosSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodIOChaosSpec") {
# volumeMountPath represents the target mount path
# It must be a root of mount path now.
volumeMountPath: String!
container: String
# actions are a list of IOChaos actions
actions: [IOChaosAction!]
}
type PodIOChaosStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodIOChaosStatus") {
# pid represents a running toda process id
pid: Int
# startTime represents the start time of a toda process
startTime: Int
failedMessage: String
observedGeneration: Int
}
# IOChaosAction defines an possible action of IOChaos
type IOChaosAction @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.IOChaosAction") {
type: String!
# path represents a glob of injecting path
path: String!
# methods represents the method that the action will inject in
methods: [String!]
# percent represents the percent probability of injecting this action
percent: Int
# faults represents the fault to inject
faults: [IoFault!]
# Latency represents the latency to inject
latency: String
# attrOverrides represents the attribution to override
ino: Int
size: Int
blocks: Int
atime: Timespec
mtime: Timespec
ctime: Timespec
kind: String # the file kind
perm: Int
nlink: Int
uid: Int
gid: Int
rdev: Int
# MistakeSpec represents the mistake to inject
# filling determines what is filled in the miskate data.
filling: String
# there will be [1, MaxOccurrences] segments of wrong data.
maxOccurrences: Int
# max length of each wrong data segment in bytes
maxLength: Int
# source represents the source of current rules
source: String!
}
type IoFault @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.IoFault") {
errno: Int!
weight: Int!
}
# Timespec represents a time
type Timespec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.Timespec") {
sec: Int!
nsec: Int!
}
type IOChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.IOChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: IOChaosSpec!
status: IOChaosStatus!
podios: [PodIOChaos!] @goField(forceResolver: true)
}
# IOChaosSpec defines the desired state of IOChaos
type IOChaosSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.IOChaosSpec") {
# containerNames indicates list of the name of affected container.
# If not set, all containers will be injected
containerNames: [String!]
# selector is used to select pods that are used to inject chaos action.
selector: PodSelectorSpec!
# mode defines the mode to run chaos action.
# supported mode: one / all / fixed / fixed-percent / random-max-percent
mode: String!
# value is required when the mode is set to ` + "`" + `FixedPodMode` + "`" + ` / ` + "`" + `FixedPercentPodMod` + "`" + ` / ` + "`" + `RandomMaxPercentPodMod` + "`" + `.
# If ` + "`" + `FixedPodMode` + "`" + `, provide an integer of pods to do chaos action.
# If ` + "`" + `FixedPercentPodMod` + "`" + `, provide a number from 0-100 to specify the percent of pods the server can do chaos action.
# IF ` + "`" + `RandomMaxPercentPodMod` + "`" + `, provide a number from 0-100 to specify the max percent of pods to do chaos action
value: String
# action defines the specific pod chaos action.
# Supported action: latency / fault / attrOverride / mistake
action: String!
# delay defines the value of I/O chaos action delay.
# A delay string is a possibly signed sequence of
# decimal numbers, each with optional fraction and a unit suffix,
# such as "300ms".
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
delay: String
# errno defines the error code that returned by I/O action.
# refer to: https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html
errno: Int
# attr defines the overrided attribution
attr: AttrOverrideSpec
# mistake defines what types of incorrectness are injected to IO operations
mistake: MistakeSpec
# path defines the path of files for injecting I/O chaos action.
path: String
# methods defines the I/O methods for injecting I/O chaos action.
# default: all I/O methods.
methods: [String!]
# percent defines the percentage of injection errors and provides a number from 0-100.
# default: 100.
percent: Int
# volumePath represents the mount path of injected volume
volumePath: String!
# duration represents the duration of the chaos action.
# It is required when the action is ` + "`" + `PodFailureAction` + "`" + `.
# A duration string is a possibly signed sequence of
# decimal numbers, each with optional fraction and a unit suffix,
# such as "300ms", "-1.5h" or "2h45m".
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
duration: String
}
# AttrOverrideSpec represents an override of attribution
type AttrOverrideSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.AttrOverrideSpec") {
ino: Int
size: Int
blocks: Int
atime: Timespec
mtime: Timespec
ctime: Timespec
kind: String # the file kind
perm: Int
nlink: Int
uid: Int
gid: Int
rdev: Int
}
# MistakeSpec represents one type of mistake
type MistakeSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.MistakeSpec") {
# filling determines what is filled in the miskate data.
filling: String
# there will be [1, MaxOccurrences] segments of wrong data.
maxOccurrences: Int
# max length of each wrong data segment in bytes
maxLength: Int
}
type IOChaosStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.IOChaosStatus") {
# conditions represents the current global condition of the chaos
conditions: [ChaosCondition!]
# experiment records the last experiment state.
experiment: ExperimentStatus
# instances always specifies podhttpchaos generation or empty
instances: Map
}
type PodHTTPChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: PodHttpChaosSpec!
status: PodHttpChaosStatus!
pod: Pod! @goField(forceResolver: true)
}
# PodHttpChaosSpec defines the desired state of PodHttpChaos.
type PodHttpChaosSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosSpec") {
# rules are a list of injection rule for http request.
rules: [PodHttpChaosRule!]!
}
# PodHttpChaosStatus defines the actual state of PodHttpChaos.
type PodHttpChaosStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosStatus") {
# pid represents a running tproxy process id.
pid: Int
# startTime represents the start time of a tproxy process.
startTime: Int
failedMessage: String
observedGeneration: Int
}
# PodHttpChaosRule defines the injection rule for http.
type PodHttpChaosRule @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosRule") {
# target is the object to be selected and injected, <Request|Response>.
target: String!
selector: PodHttpChaosSelector!
actions: PodHttpChaosActions!
# source represents the source of current rules
source: String!
# port represents the target port to be proxy of.
port: Int!
}
type PodHttpChaosSelector @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosSelector") {
# port represents the target port to be proxy of.
port: Int
# path is a rule to select target by uri path in http request.
path: String
# method is a rule to select target by http method in request.
method: String
# code is a rule to select target by http status code in response.
code: Int
# requestHeaders is a rule to select target by http headers in request.
# The key-value pairs represent header name and header value pairs.
requestHeaders: Map
# responseHeaders is a rule to select target by http headers in response.
# The key-value pairs represent header name and header value pairs.
responseHeaders: Map
}
# PodHttpChaosAction defines possible actions of HttpChaos.
type PodHttpChaosActions @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosActions") {
# abort is a rule to abort a http session.
abort: Boolean
# delay represents the delay of the target request/response.
# A duration string is a possibly unsigned sequence of
# decimal numbers, each with optional fraction and a unit suffix,
# such as "300ms", "2h45m".
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
delay: String
# replace is a rule to replace some contents in target.
replace: PodHttpChaosReplaceActions
# patch is a rule to patch some contents in target.
patch: PodHttpChaosPatchActions
}
type HTTPChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.HTTPChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: HTTPChaosSpec!
status: HTTPChaosStatus!
podhttp: [PodHTTPChaos!] @goField(forceResolver: true)
}
type HTTPChaosSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.HTTPChaosSpec") {
# selector is used to select pods that are used to inject chaos action.
selector: PodSelectorSpec!
# mode defines the mode to run chaos action.
# supported mode: one / all / fixed / fixed-percent / random-max-percent
mode: String!
# value is required when the mode is set to ` + "`" + `FixedPodMode` + "`" + ` / ` + "`" + `FixedPercentPodMod` + "`" + ` / ` + "`" + `RandomMaxPercentPodMod` + "`" + `.
# If ` + "`" + `FixedPodMode` + "`" + `, provide an integer of pods to do chaos action.
# If ` + "`" + `FixedPercentPodMod` + "`" + `, provide a number from 0-100 to specify the percent of pods the server can do chaos action.
# IF ` + "`" + `RandomMaxPercentPodMod` + "`" + `, provide a number from 0-100 to specify the max percent of pods to do chaos action
value: String
# target is the object to be selected and injected.
target: String!
# abort is a rule to abort a http session.
abort: Boolean
# delay represents the delay of the target request/response.
# A duration string is a possibly unsigned sequence of
# decimal numbers, each with optional fraction and a unit suffix,
# such as "300ms", "2h45m".
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
delay: String
# replace is a rule to replace some contents in target.
replace: PodHttpChaosReplaceActions
# patch is a rule to patch some contents in target.
patch: PodHttpChaosPatchActions
# port represents the target port to be proxy of.
port: Int
# path is a rule to select target by uri path in http request.
path: String
# method is a rule to select target by http method in request.
method: String
# code is a rule to select target by http status code in response.
code: Int
# requestHeaders is a rule to select target by http headers in request.
# The key-value pairs represent header name and header value pairs.
requestHeaders: Map
# responseHeaders is a rule to select target by http headers in response.
# The key-value pairs represent header name and header value pairs.
responseHeaders: Map
# duration represents the duration of the chaos action.
duration: String
}
# PodSelectorSpec defines the some selectors to select objects.
# If the all selectors are empty, all objects will be used in chaos experiment.
type PodSelectorSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodSelectorSpec") {
# namespaces is a set of namespace to which objects belong.
namespaces: [String!]
# nodes is a set of node name and objects must belong to these nodes.
nodes: [String!]
# pods is a map of string keys and a set values that used to select pods.
# The key defines the namespace which pods belong,
# and the each values is a set of pod names.
pods: Map
# map of string keys and values that can be used to select nodes.
# Selector which must match a node's labels,
# and objects must belong to these selected nodes.
nodeSelectors: Map
# map of string keys and values that can be used to select objects.
# A selector based on fields.
fieldSelectors: Map
# map of string keys and values that can be used to select objects.
# A selector based on labels.
labelSelectors: Map
# map of string keys and values that can be used to select objects.
# A selector based on annotations.
annotationSelectors: Map
# podPhaseSelectors is a set of condition of a pod at the current time.
# supported value: Pending / Running / Succeeded / Failed / Unknown
podPhaseSelectors: [String!]
}
type PodHttpChaosReplaceActions @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosReplaceActions") {
# path is rule to to replace uri path in http request.
path: String
# method is a rule to replace http method in request.
method: String
# code is a rule to replace http status code in response.
code: Int
# body is a rule to replace http message body in target.
body: String
# queries is a rule to replace uri queries in http request.
# For example, with value ` + "`" + `{ "foo": "unknown" }` + "`" + `, the ` + "`" + `/?foo=bar` + "`" + ` will be altered to ` + "`" + `/?foo=unknown` + "`" + `,
queries: Map
# headers is a rule to replace http headers of target.
# The key-value pairs represent header name and header value pairs.
headers: Map
}
# PodHttpChaosPatchActions defines possible patch-actions of HttpChaos.
type PodHttpChaosPatchActions @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosPatchActions") {
# body is a rule to patch message body of target.
body: PodHttpChaosPatchBodyAction
# queries is a rule to append uri queries of target(Request only).
# For example: ` + "`" + `[["foo", "bar"], ["foo", "unknown"]]` + "`" + `.
queries: [[String!]!]
# headers is a rule to append http headers of target.
# For example: ` + "`" + `[["Set-Cookie", "<one cookie>"], ["Set-Cookie", "<another cookie>"]]` + "`" + `.
headers: [[String!]!]
}
# PodHttpChaosPatchBodyAction defines patch body action of HttpChaos.
type PodHttpChaosPatchBodyAction @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodHttpChaosPatchBodyAction") {
# type represents the patch type, only support ` + "`" + `JSON` + "`" + ` as [merge patch json](https://tools.ietf.org/html/rfc7396) currently.
type: String!
# value is the patch contents.
value: String!
}
type HTTPChaosStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.HTTPChaosStatus") {
# conditions represents the current global condition of the chaos
conditions: [ChaosCondition!]
# experiment records the last experiment state.
experiment: ExperimentStatus
# instances always specifies podhttpchaos generation or empty
instances: Map
}
type ChaosCondition @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.ChaosCondition") {
type: String!
status: String!
reason: String
}
type ExperimentStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.ExperimentStatus") {
desiredPhase: String!
# Records are used to track the running status
Records: [Record!]
}
type Record @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.Record") {
id: String!
selectorKey: String!
phase: String!
}
type PodNetworkChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodNetworkChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
spec: PodNetworkChaosSpec!
status: PodNetworkChaosStatus!
pod: Pod! @goField(forceResolver: true)
}
# PodNetworkChaosSpec defines the desired state of PodNetworkChaos
type PodNetworkChaosSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodNetworkChaosSpec") {
# The ipset on the pod
ipSets: [RawIPSet!]
# The iptables rules on the pod
iptables: [RawIptables!]
# The tc rules on the pod
trafficControls: [RawTrafficControl!]
}
# PodNetworkChaosStatus defines the observed state of PodNetworkChaos
type PodNetworkChaosStatus @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.PodNetworkChaosStatus") {
failedMessage: String!
observedGeneration: Int!
}
# RawIPSet represents an ipset on specific pod
type RawIPSet @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.RawIPSet") {
# The name of ipset
name: String!
# The contents of ipset
cidrs: [String!]!
# The name and namespace of the source network chaos
source: String!
}
# RawIptables represents the iptables rules on specific pod
type RawIptables @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.RawIptables") {
# The name of iptables chain
name: String!
# The name of related ipset
ipSets: [String!]!
# The block direction of this iptables rule
direction: String!
# The name and namespace of the source network chaos
source: String!
}
# RawTrafficControl represents the traffic control chaos on specific pod
type RawTrafficControl @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.RawTrafficControl") {
# The type of traffic control
type: String!
# delay represents the detail about delay action
delay: DelaySpec
# loss represents the detail about loss action
loss: LossSpec
# duplicateSpec represents the detail about loss action
duplicate: DuplicateSpec
# corrupt represents the detail about corrupt action
corrupt: CorruptSpec
# bandwidth represents the detail about bandwidth control action
Bandwidth: BandwidthSpec
# The name of target ipset
ipSet: String
# The name and namespace of the source network chaos
source: String
}
# DelaySpec defines detail of a delay action
type DelaySpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.DelaySpec") {
latency: String!
correlation: String
jitter: String
reorder: ReorderSpec
}
# LossSpec defines detail of a loss action
type LossSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.LossSpec") {
loss: String!
correlation: String
}
# DuplicateSpec defines detail of a duplicate action
type DuplicateSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.DuplicateSpec") {
duplicate: String!
correlation: String
}
# CorruptSpec defines detail of a corrupt action
type CorruptSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.CorruptSpec") {
corrup: String!
correlation: String
}
# BandwidthSpec defines detail of bandwidth limit.
type BandwidthSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.BandwidthSpec") {
# rate is the speed knob. Allows bps, kbps, mbps, gbps, tbps unit. bps means bytes per second.
rate: String!
# limit is the number of bytes that can be queued waiting for tokens to become available.
limit: Int!
# buffer is the maximum amount of bytes that tokens can be available for instantaneously.
buffer: Int!
# peakrate is the maximum depletion rate of the bucket.
# The peakrate does not need to be set, it is only necessary
# if perfect millisecond timescale shaping is required.
peakrate: Int
# minburst specifies the size of the peakrate bucket. For perfect
# accuracy, should be set to the MTU of the interface. If a
# peakrate is needed, but some burstiness is acceptable, this
# size can be raised. A 3000 byte minburst allows around 3mbit/s
# of peakrate, given 1000 byte packets.
minburst: Int
}
# ReorderSpec defines details of packet reorder.
type ReorderSpec @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.ReorderSpec") {
reorder: String!
correlation: String
gap: Int
}
type NetworkChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.NetworkChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
podnetwork: [PodNetworkChaos!] @goField(forceResolver: true)
}
type StressChaos @goModel(model: "github.com/chaos-mesh/chaos-mesh/api/v1alpha1.StressChaos") {
kind: String!
apiVersion: String!
name: String!
generateName: String!
namespace: String!
selfLink: String!
uid: String!
resourceVersion: String!
generation: Int!
creationTimestamp: Time!
deletionTimestamp: Time
deletionGracePeriodSeconds: Int
labels: Map
annotations: Map
ownerReferences: [OwnerReference!]
finalizers: [String!]
clusterName: String!
}
`, BuiltIn: false},
}
var parsedSchema = gqlparser.MustLoadSchema(sources...)
// endregion ************************** generated!.gotpl **************************
// region ***************************** args.gotpl *****************************
func (ec *executionContext) field_Logger_component_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["ns"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("ns"))
arg0, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["ns"] = arg0
var arg1 model.Component
if tmp, ok := rawArgs["component"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("component"))
arg1, err = ec.unmarshalNComponent2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐComponent(ctx, tmp)
if err != nil {
return nil, err
}
}
args["component"] = arg1
return args, nil
}
func (ec *executionContext) field_Logger_pod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["ns"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("ns"))
arg0, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["ns"] = arg0
var arg1 string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg1, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg1
return args, nil
}
func (ec *executionContext) field_Namespace_component_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 model.Component
if tmp, ok := rawArgs["component"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("component"))
arg0, err = ec.unmarshalNComponent2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐComponent(ctx, tmp)
if err != nil {
return nil, err
}
}
args["component"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_httpchaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_iochaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_networkchaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_pod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_podhttpchaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_podiochaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_podnetworkchaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Namespace_stresschaos_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Query_namespace_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 *string
if tmp, ok := rawArgs["ns"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("ns"))
arg0, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
if err != nil {
return nil, err
}
}
args["ns"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
// endregion ***************************** args.gotpl *****************************
// region ************************** directives.gotpl **************************
// endregion ************************** directives.gotpl **************************
// region **************************** field.gotpl *****************************
func (ec *executionContext) _AttrOverrideSpec_ino(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Ino(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_size(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Size(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_blocks(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Blocks(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_atime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Atime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_mtime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Mtime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_ctime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Ctime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Kind(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_perm(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Perm(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_nlink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Nlink(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_gid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Gid(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _AttrOverrideSpec_rdev(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.AttrOverrideSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "AttrOverrideSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.AttrOverrideSpec().Rdev(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _BandwidthSpec_rate(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.BandwidthSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "BandwidthSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Rate, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _BandwidthSpec_limit(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.BandwidthSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "BandwidthSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.BandwidthSpec().Limit(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _BandwidthSpec_buffer(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.BandwidthSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "BandwidthSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.BandwidthSpec().Buffer(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _BandwidthSpec_peakrate(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.BandwidthSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "BandwidthSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.BandwidthSpec().Peakrate(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _BandwidthSpec_minburst(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.BandwidthSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "BandwidthSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.BandwidthSpec().Minburst(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _ChaosCondition_type(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ChaosCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ChaosCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ChaosCondition().Type(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ChaosCondition_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ChaosCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ChaosCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ChaosCondition().Status(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ChaosCondition_reason(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ChaosCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ChaosCondition",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reason, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerState_waiting(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerState) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerState",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Waiting, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1.ContainerStateWaiting)
fc.Result = res
return ec.marshalOContainerStateWaiting2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateWaiting(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerState_running(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerState) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerState",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Running, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1.ContainerStateRunning)
fc.Result = res
return ec.marshalOContainerStateRunning2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateRunning(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerState_terminated(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerState) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerState",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Terminated, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1.ContainerStateTerminated)
fc.Result = res
return ec.marshalOContainerStateTerminated2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateTerminated(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateRunning_startedAt(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateRunning) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateRunning",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ContainerStateRunning().StartedAt(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_exitCode(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ExitCode, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_signal(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Signal, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalOInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_reason(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reason, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_message(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_startedAt(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ContainerStateTerminated().StartedAt(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_finishedAt(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ContainerStateTerminated().FinishedAt(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateTerminated_containerID(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateTerminated) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateTerminated",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ContainerID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateWaiting_reason(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateWaiting) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateWaiting",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reason, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStateWaiting_message(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStateWaiting) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStateWaiting",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_name(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_State(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.State, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(v1.ContainerState)
fc.Result = res
return ec.marshalOContainerState2k8sᚗioᚋapiᚋcoreᚋv1ᚐContainerState(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_lastTerminationState(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LastTerminationState, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(v1.ContainerState)
fc.Result = res
return ec.marshalOContainerState2k8sᚗioᚋapiᚋcoreᚋv1ᚐContainerState(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_ready(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Ready, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_restartCount(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.RestartCount, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_image(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Image, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_imageID(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ImageID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_containerID(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ContainerID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ContainerStatus_started(ctx context.Context, field graphql.CollectedField, obj *v1.ContainerStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ContainerStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Started, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) _CorruptSpec_corrup(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.CorruptSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CorruptSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.CorruptSpec().Corrup(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _CorruptSpec_correlation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.CorruptSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CorruptSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Correlation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DelaySpec_latency(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DelaySpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DelaySpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Latency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DelaySpec_correlation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DelaySpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DelaySpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Correlation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DelaySpec_jitter(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DelaySpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DelaySpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Jitter, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DelaySpec_reorder(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DelaySpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DelaySpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reorder, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.ReorderSpec)
fc.Result = res
return ec.marshalOReorderSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐReorderSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _DuplicateSpec_duplicate(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DuplicateSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DuplicateSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Duplicate, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DuplicateSpec_correlation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.DuplicateSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DuplicateSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Correlation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ExperimentStatus_desiredPhase(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ExperimentStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ExperimentStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.ExperimentStatus().DesiredPhase(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ExperimentStatus_Records(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ExperimentStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ExperimentStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Records, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.Record)
fc.Result = res
return ec.marshalORecord2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRecordᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Fd_fd(ctx context.Context, field graphql.CollectedField, obj *model.Fd) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Fd",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Fd, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Fd_target(ctx context.Context, field graphql.CollectedField, obj *model.Fd) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Fd",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Target, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_spec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.HTTPChaosSpec)
fc.Result = res
return ec.marshalNHTTPChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.HTTPChaosStatus)
fc.Result = res
return ec.marshalNHTTPChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaos_podhttp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaos().Podhttp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodHttpChaos)
fc.Result = res
return ec.marshalOPodHTTPChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_selector(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Selector, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodSelectorSpec)
fc.Result = res
return ec.marshalNPodSelectorSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodSelectorSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_mode(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaosSpec().Mode(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_value(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Value, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_target(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaosSpec().Target(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_abort(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Abort, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_delay(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Delay, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_replace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Replace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.PodHttpChaosReplaceActions)
fc.Result = res
return ec.marshalOPodHttpChaosReplaceActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosReplaceActions(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_patch(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Patch, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.PodHttpChaosPatchActions)
fc.Result = res
return ec.marshalOPodHttpChaosPatchActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosPatchActions(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_port(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Port, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalOInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_path(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Path, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_method(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Method, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_code(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Code, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int32)
fc.Result = res
return ec.marshalOInt2ᚖint32(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_requestHeaders(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaosSpec().RequestHeaders(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_responseHeaders(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaosSpec().ResponseHeaders(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosSpec_duration(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Duration, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosStatus_conditions(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Conditions, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.ChaosCondition)
fc.Result = res
return ec.marshalOChaosCondition2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐChaosConditionᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosStatus_experiment(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Experiment, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(v1alpha1.ExperimentStatus)
fc.Result = res
return ec.marshalOExperimentStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐExperimentStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _HTTPChaosStatus_instances(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.HTTPChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HTTPChaosStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.HTTPChaosStatus().Instances(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_spec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.IOChaosSpec)
fc.Result = res
return ec.marshalNIOChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.IOChaosStatus)
fc.Result = res
return ec.marshalNIOChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaos_podios(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaos().Podios(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodIOChaos)
fc.Result = res
return ec.marshalOPodIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_type(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Type(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_path(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Path, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_methods(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Methods(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_percent(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Percent, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalOInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_faults(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Faults, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.IoFault)
fc.Result = res
return ec.marshalOIoFault2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIoFaultᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_latency(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Latency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_ino(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Ino(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_size(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Size(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_blocks(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Blocks(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_atime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Atime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_mtime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Mtime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_ctime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Ctime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.Timespec)
fc.Result = res
return ec.marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Kind(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_perm(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Perm(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_nlink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Nlink(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_gid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Gid(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_rdev(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Rdev(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_filling(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosAction().Filling(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_maxOccurrences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MaxOccurrences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_maxLength(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MaxLength, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosAction_source(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Source, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_containerNames(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ContainerNames, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_selector(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Selector, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodSelectorSpec)
fc.Result = res
return ec.marshalNPodSelectorSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodSelectorSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_mode(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosSpec().Mode(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_value(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Value, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_action(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosSpec().Action(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_delay(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Delay, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_errno(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosSpec().Errno(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int)
fc.Result = res
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_attr(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Attr, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.AttrOverrideSpec)
fc.Result = res
return ec.marshalOAttrOverrideSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐAttrOverrideSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_mistake(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Mistake, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.MistakeSpec)
fc.Result = res
return ec.marshalOMistakeSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐMistakeSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_path(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Path, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_methods(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosSpec().Methods(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_percent(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Percent, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalOInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_volumePath(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.VolumePath, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosSpec_duration(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Duration, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosStatus_conditions(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Conditions, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.ChaosCondition)
fc.Result = res
return ec.marshalOChaosCondition2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐChaosConditionᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosStatus_experiment(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Experiment, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(v1alpha1.ExperimentStatus)
fc.Result = res
return ec.marshalOExperimentStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐExperimentStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _IOChaosStatus_instances(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IOChaosStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IOChaosStatus().Instances(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _IoFault_errno(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IoFault) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IoFault",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.IoFault().Errno(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _IoFault_weight(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.IoFault) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IoFault",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Weight, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _Logger_component(ctx context.Context, field graphql.CollectedField) (ret func() graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
fc := &graphql.FieldContext{
Object: "Logger",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Logger_component_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return nil
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Logger().Component(rctx, args["ns"].(string), args["component"].(model.Component))
})
if err != nil {
ec.Error(ctx, err)
return nil
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return nil
}
return func() graphql.Marshaler {
res, ok := <-resTmp.(<-chan string)
if !ok {
return nil
}
return graphql.WriterFunc(func(w io.Writer) {
w.Write([]byte{'{'})
graphql.MarshalString(field.Alias).MarshalGQL(w)
w.Write([]byte{':'})
ec.marshalNString2string(ctx, field.Selections, res).MarshalGQL(w)
w.Write([]byte{'}'})
})
}
}
func (ec *executionContext) _Logger_pod(ctx context.Context, field graphql.CollectedField) (ret func() graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
fc := &graphql.FieldContext{
Object: "Logger",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Logger_pod_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return nil
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Logger().Pod(rctx, args["ns"].(string), args["name"].(string))
})
if err != nil {
ec.Error(ctx, err)
return nil
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return nil
}
return func() graphql.Marshaler {
res, ok := <-resTmp.(<-chan string)
if !ok {
return nil
}
return graphql.WriterFunc(func(w io.Writer) {
w.Write([]byte{'{'})
graphql.MarshalString(field.Alias).MarshalGQL(w)
w.Write([]byte{':'})
ec.marshalNString2string(ctx, field.Selections, res).MarshalGQL(w)
w.Write([]byte{'}'})
})
}
}
func (ec *executionContext) _LossSpec_loss(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.LossSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "LossSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Loss, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _LossSpec_correlation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.LossSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "LossSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Correlation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _MistakeSpec_filling(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.MistakeSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "MistakeSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.MistakeSpec().Filling(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _MistakeSpec_maxOccurrences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.MistakeSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "MistakeSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MaxOccurrences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _MistakeSpec_maxLength(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.MistakeSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "MistakeSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MaxLength, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_ns(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Ns, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_component(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_component_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Component(rctx, obj, args["component"].(model.Component))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1.Pod)
fc.Result = res
return ec.marshalOPod2ᚕᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPodᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_pod(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_pod_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Pod(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1.Pod)
fc.Result = res
return ec.marshalOPod2ᚕᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPodᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_stresschaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_stresschaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Stresschaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.StressChaos)
fc.Result = res
return ec.marshalOStressChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐStressChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_iochaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_iochaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Iochaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.IOChaos)
fc.Result = res
return ec.marshalOIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_podiochaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_podiochaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Podiochaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodIOChaos)
fc.Result = res
return ec.marshalOPodIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_httpchaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_httpchaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Httpchaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.HTTPChaos)
fc.Result = res
return ec.marshalOHTTPChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_podhttpchaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_podhttpchaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Podhttpchaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodHttpChaos)
fc.Result = res
return ec.marshalOPodHTTPChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_networkchaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_networkchaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Networkchaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.NetworkChaos)
fc.Result = res
return ec.marshalONetworkChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐNetworkChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Namespace_podnetworkchaos(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Namespace",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Namespace_podnetworkchaos_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Namespace().Podnetworkchaos(rctx, obj, args["name"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodNetworkChaos)
fc.Result = res
return ec.marshalOPodNetworkChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _NetworkChaos_podnetwork(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.NetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "NetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.NetworkChaos().Podnetwork(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.PodNetworkChaos)
fc.Result = res
return ec.marshalOPodNetworkChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_kind(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_name(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_uid(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.OwnerReference().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_controller(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Controller, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) _OwnerReference_blockOwnerDeletion(ctx context.Context, field graphql.CollectedField, obj *v11.OwnerReference) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "OwnerReference",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.BlockOwnerDeletion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_kind(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_name(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_generateName(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_namespace(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_uid(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_generation(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_labels(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_annotations(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_spec(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1.PodSpec)
fc.Result = res
return ec.marshalNPodSpec2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_status(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1.PodStatus)
fc.Result = res
return ec.marshalNPodStatus2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_logs(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Logs(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_daemon(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Daemon(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1.Pod)
fc.Result = res
return ec.marshalOPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_processes(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Processes(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*model.Process)
fc.Result = res
return ec.marshalOProcess2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐProcessᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_mounts(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Mounts(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_ipset(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Ipset(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_tcQdisc(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().TcQdisc(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Pod_iptables(ctx context.Context, field graphql.CollectedField, obj *v1.Pod) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Pod",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Pod().Iptables(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_type(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodCondition().Type(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_status(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodCondition().Status(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_lastProbeTime(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodCondition().LastProbeTime(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_lastTransitionTime(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodCondition().LastTransitionTime(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_reason(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reason, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodCondition_message(ctx context.Context, field graphql.CollectedField, obj *v1.PodCondition) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodCondition",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_spec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodHttpChaosSpec)
fc.Result = res
return ec.marshalNPodHttpChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodHttpChaosStatus)
fc.Result = res
return ec.marshalNPodHttpChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHTTPChaos_pod(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHTTPChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHTTPChaos().Pod(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*v1.Pod)
fc.Result = res
return ec.marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosActions_abort(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Abort, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*bool)
fc.Result = res
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosActions_delay(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Delay, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosActions_replace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Replace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.PodHttpChaosReplaceActions)
fc.Result = res
return ec.marshalOPodHttpChaosReplaceActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosReplaceActions(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosActions_patch(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Patch, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.PodHttpChaosPatchActions)
fc.Result = res
return ec.marshalOPodHttpChaosPatchActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosPatchActions(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosPatchActions_body(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosPatchActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosPatchActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Body, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.PodHttpChaosPatchBodyAction)
fc.Result = res
return ec.marshalOPodHttpChaosPatchBodyAction2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosPatchBodyAction(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosPatchActions_queries(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosPatchActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosPatchActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Queries, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([][]string)
fc.Result = res
return ec.marshalOString2ᚕᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosPatchActions_headers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosPatchActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosPatchActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Headers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([][]string)
fc.Result = res
return ec.marshalOString2ᚕᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosPatchBodyAction_type(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosPatchBodyAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosPatchBodyAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosPatchBodyAction_value(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosPatchBodyAction) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosPatchBodyAction",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Value, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_path(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Path, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_method(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Method, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_code(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Code, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int32)
fc.Result = res
return ec.marshalOInt2ᚖint32(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_body(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosReplaceActions().Body(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_queries(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosReplaceActions().Queries(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosReplaceActions_headers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosReplaceActions) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosReplaceActions",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosReplaceActions().Headers(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosRule_target(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosRule) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosRule",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosRule().Target(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosRule_selector(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosRule) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosRule",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Selector, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodHttpChaosSelector)
fc.Result = res
return ec.marshalNPodHttpChaosSelector2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosSelector(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosRule_actions(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosRule) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosRule",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Actions, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodHttpChaosActions)
fc.Result = res
return ec.marshalNPodHttpChaosActions2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosActions(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosRule_source(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosRule) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosRule",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Source, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosRule_port(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosRule) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosRule",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Port, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int32)
fc.Result = res
return ec.marshalNInt2int32(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_port(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Port, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int32)
fc.Result = res
return ec.marshalOInt2ᚖint32(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_path(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Path, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_method(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Method, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_code(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Code, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int32)
fc.Result = res
return ec.marshalOInt2ᚖint32(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_requestHeaders(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosSelector().RequestHeaders(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSelector_responseHeaders(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSelector) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSelector",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodHttpChaosSelector().ResponseHeaders(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosSpec_rules(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Rules, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]v1alpha1.PodHttpChaosRule)
fc.Result = res
return ec.marshalNPodHttpChaosRule2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosRuleᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosStatus_pid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Pid, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosStatus_startTime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.StartTime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosStatus_failedMessage(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.FailedMessage, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodHttpChaosStatus_observedGeneration(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodHttpChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodHttpChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ObservedGeneration, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_spec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodIOChaosSpec)
fc.Result = res
return ec.marshalNPodIOChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodIOChaosStatus)
fc.Result = res
return ec.marshalNPodIOChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_pod(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().Pod(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*v1.Pod)
fc.Result = res
return ec.marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaos_ios(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodIOChaos().Ios(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*v1alpha1.IOChaos)
fc.Result = res
return ec.marshalOIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosSpec_volumeMountPath(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.VolumeMountPath, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosSpec_container(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Container, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosSpec_actions(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Actions, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.IOChaosAction)
fc.Result = res
return ec.marshalOIOChaosAction2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosActionᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosStatus_pid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Pid, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosStatus_startTime(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.StartTime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosStatus_failedMessage(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.FailedMessage, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIOChaosStatus_observedGeneration(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodIOChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIOChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ObservedGeneration, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalOInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodIP_ip(ctx context.Context, field graphql.CollectedField, obj *v1.PodIP) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodIP",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IP, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_spec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Spec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodNetworkChaosSpec)
fc.Result = res
return ec.marshalNPodNetworkChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_status(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(v1alpha1.PodNetworkChaosStatus)
fc.Result = res
return ec.marshalNPodNetworkChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosStatus(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaos_pod(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodNetworkChaos().Pod(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*v1.Pod)
fc.Result = res
return ec.marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaosSpec_ipSets(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IPSets, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.RawIPSet)
fc.Result = res
return ec.marshalORawIPSet2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIPSetᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaosSpec_iptables(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Iptables, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.RawIptables)
fc.Result = res
return ec.marshalORawIptables2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIptablesᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaosSpec_trafficControls(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaosSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaosSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.TrafficControls, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1alpha1.RawTrafficControl)
fc.Result = res
return ec.marshalORawTrafficControl2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawTrafficControlᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaosStatus_failedMessage(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.FailedMessage, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodNetworkChaosStatus_observedGeneration(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodNetworkChaosStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodNetworkChaosStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ObservedGeneration, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_namespaces(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespaces, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_nodes(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Nodes, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_pods(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodSelectorSpec().Pods(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_nodeSelectors(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodSelectorSpec().NodeSelectors(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_fieldSelectors(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodSelectorSpec().FieldSelectors(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_labelSelectors(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodSelectorSpec().LabelSelectors(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_annotationSelectors(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodSelectorSpec().AnnotationSelectors(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSelectorSpec_podPhaseSelectors(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.PodSelectorSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSelectorSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PodPhaseSelectors, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodSpec_nodeName(ctx context.Context, field graphql.CollectedField, obj *v1.PodSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.NodeName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_phase(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodStatus().Phase(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_conditions(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Conditions, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1.PodCondition)
fc.Result = res
return ec.marshalOPodCondition2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐPodConditionᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_message(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_reason(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reason, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_nominatedNodeName(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.NominatedNodeName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_hostIP(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.HostIP, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_podIP(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PodIP, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_podIPs(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PodIPs, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1.PodIP)
fc.Result = res
return ec.marshalOPodIP2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐPodIPᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_startTime(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodStatus().StartTime(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_initContainerStatuses(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.InitContainerStatuses, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1.ContainerStatus)
fc.Result = res
return ec.marshalOContainerStatus2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatusᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_containerStatuses(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ContainerStatuses, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1.ContainerStatus)
fc.Result = res
return ec.marshalOContainerStatus2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatusᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_qosClass(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.PodStatus().QosClass(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _PodStatus_ephemeralContainerStatuses(ctx context.Context, field graphql.CollectedField, obj *v1.PodStatus) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PodStatus",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.EphemeralContainerStatuses, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v1.ContainerStatus)
fc.Result = res
return ec.marshalOContainerStatus2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatusᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Process_pod(ctx context.Context, field graphql.CollectedField, obj *model.Process) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Process",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Pod, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*v1.Pod)
fc.Result = res
return ec.marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, field.Selections, res)
}
func (ec *executionContext) _Process_pid(ctx context.Context, field graphql.CollectedField, obj *model.Process) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Process",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Pid, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Process_command(ctx context.Context, field graphql.CollectedField, obj *model.Process) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Process",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Command, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Process_fds(ctx context.Context, field graphql.CollectedField, obj *model.Process) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Process",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Process().Fds(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*model.Fd)
fc.Result = res
return ec.marshalOFd2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐFdᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Query_namespace(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Query_namespace_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().Namespace(rctx, args["ns"].(*string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*model.Namespace)
fc.Result = res
return ec.marshalONamespace2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐNamespaceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Query___type_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectType(args["name"].(string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectSchema()
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Schema)
fc.Result = res
return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIPSet_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIPSet) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIPSet",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIPSet_cidrs(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIPSet) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIPSet",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Cidrs, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIPSet_source(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIPSet) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIPSet",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Source, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIptables_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIptables) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIptables",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIptables_ipSets(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIptables) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIptables",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IPSets, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIptables_direction(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIptables) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIptables",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.RawIptables().Direction(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawIptables_source(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawIptables) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawIptables",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Source, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_type(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.RawTrafficControl().Type(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_delay(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Delay, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.DelaySpec)
fc.Result = res
return ec.marshalODelaySpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐDelaySpec(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_loss(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Loss, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.LossSpec)
fc.Result = res
return ec.marshalOLossSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐLossSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_duplicate(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Duplicate, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.DuplicateSpec)
fc.Result = res
return ec.marshalODuplicateSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐDuplicateSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_corrupt(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Corrupt, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.CorruptSpec)
fc.Result = res
return ec.marshalOCorruptSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐCorruptSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_Bandwidth(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Bandwidth, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*v1alpha1.BandwidthSpec)
fc.Result = res
return ec.marshalOBandwidthSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐBandwidthSpec(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_ipSet(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IPSet, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _RawTrafficControl_source(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.RawTrafficControl) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "RawTrafficControl",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Source, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Record_id(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.Record) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Record",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Id, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Record_selectorKey(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.Record) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Record",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelectorKey, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Record_phase(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.Record) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Record",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Record().Phase(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ReorderSpec_reorder(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ReorderSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ReorderSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Reorder, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ReorderSpec_correlation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ReorderSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ReorderSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Correlation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _ReorderSpec_gap(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.ReorderSpec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "ReorderSpec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Gap, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalOInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_kind(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_apiVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.APIVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_name(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_generateName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.GenerateName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_namespace(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Namespace, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_selfLink(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SelfLink, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_uid(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.StressChaos().UID(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_resourceVersion(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ResourceVersion, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_generation(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Generation, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_creationTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.StressChaos().CreationTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_deletionTimestamp(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.StressChaos().DeletionTimestamp(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_deletionGracePeriodSeconds(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeletionGracePeriodSeconds, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*int64)
fc.Result = res
return ec.marshalOInt2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_labels(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.StressChaos().Labels(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_annotations(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.StressChaos().Annotations(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(map[string]interface{})
fc.Result = res
return ec.marshalOMap2map(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_ownerReferences(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OwnerReferences, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]v11.OwnerReference)
fc.Result = res
return ec.marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_finalizers(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Finalizers, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _StressChaos_clusterName(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.StressChaos) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "StressChaos",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ClusterName, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Timespec_sec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.Timespec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Timespec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Sec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) _Timespec_nsec(ctx context.Context, field graphql.CollectedField, obj *v1alpha1.Timespec) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Timespec",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Nsec, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int64)
fc.Result = res
return ec.marshalNInt2int64(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Locations, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DefaultValue, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Types(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.QueryType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MutationType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SubscriptionType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Directives(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Directive)
fc.Result = res
return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalN__TypeKind2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_fields_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Fields(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Field)
fc.Result = res
return ec.marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Interfaces(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PossibleTypes(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_enumValues_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.EnumValues(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.EnumValue)
fc.Result = res
return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.InputFields(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OfType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
// endregion **************************** field.gotpl *****************************
// region **************************** input.gotpl *****************************
// endregion **************************** input.gotpl *****************************
// region ************************** interface.gotpl ***************************
// endregion ************************** interface.gotpl ***************************
// region **************************** object.gotpl ****************************
var attrOverrideSpecImplementors = []string{"AttrOverrideSpec"}
func (ec *executionContext) _AttrOverrideSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.AttrOverrideSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, attrOverrideSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("AttrOverrideSpec")
case "ino":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_ino(ctx, field, obj)
return res
})
case "size":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_size(ctx, field, obj)
return res
})
case "blocks":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_blocks(ctx, field, obj)
return res
})
case "atime":
out.Values[i] = ec._AttrOverrideSpec_atime(ctx, field, obj)
case "mtime":
out.Values[i] = ec._AttrOverrideSpec_mtime(ctx, field, obj)
case "ctime":
out.Values[i] = ec._AttrOverrideSpec_ctime(ctx, field, obj)
case "kind":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_kind(ctx, field, obj)
return res
})
case "perm":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_perm(ctx, field, obj)
return res
})
case "nlink":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_nlink(ctx, field, obj)
return res
})
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_uid(ctx, field, obj)
return res
})
case "gid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_gid(ctx, field, obj)
return res
})
case "rdev":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._AttrOverrideSpec_rdev(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var bandwidthSpecImplementors = []string{"BandwidthSpec"}
func (ec *executionContext) _BandwidthSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.BandwidthSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, bandwidthSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("BandwidthSpec")
case "rate":
out.Values[i] = ec._BandwidthSpec_rate(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "limit":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._BandwidthSpec_limit(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "buffer":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._BandwidthSpec_buffer(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "peakrate":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._BandwidthSpec_peakrate(ctx, field, obj)
return res
})
case "minburst":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._BandwidthSpec_minburst(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var chaosConditionImplementors = []string{"ChaosCondition"}
func (ec *executionContext) _ChaosCondition(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.ChaosCondition) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, chaosConditionImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ChaosCondition")
case "type":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ChaosCondition_type(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "status":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ChaosCondition_status(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "reason":
out.Values[i] = ec._ChaosCondition_reason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var containerStateImplementors = []string{"ContainerState"}
func (ec *executionContext) _ContainerState(ctx context.Context, sel ast.SelectionSet, obj *v1.ContainerState) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, containerStateImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ContainerState")
case "waiting":
out.Values[i] = ec._ContainerState_waiting(ctx, field, obj)
case "running":
out.Values[i] = ec._ContainerState_running(ctx, field, obj)
case "terminated":
out.Values[i] = ec._ContainerState_terminated(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var containerStateRunningImplementors = []string{"ContainerStateRunning"}
func (ec *executionContext) _ContainerStateRunning(ctx context.Context, sel ast.SelectionSet, obj *v1.ContainerStateRunning) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, containerStateRunningImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ContainerStateRunning")
case "startedAt":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ContainerStateRunning_startedAt(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var containerStateTerminatedImplementors = []string{"ContainerStateTerminated"}
func (ec *executionContext) _ContainerStateTerminated(ctx context.Context, sel ast.SelectionSet, obj *v1.ContainerStateTerminated) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, containerStateTerminatedImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ContainerStateTerminated")
case "exitCode":
out.Values[i] = ec._ContainerStateTerminated_exitCode(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "signal":
out.Values[i] = ec._ContainerStateTerminated_signal(ctx, field, obj)
case "reason":
out.Values[i] = ec._ContainerStateTerminated_reason(ctx, field, obj)
case "message":
out.Values[i] = ec._ContainerStateTerminated_message(ctx, field, obj)
case "startedAt":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ContainerStateTerminated_startedAt(ctx, field, obj)
return res
})
case "finishedAt":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ContainerStateTerminated_finishedAt(ctx, field, obj)
return res
})
case "containerID":
out.Values[i] = ec._ContainerStateTerminated_containerID(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var containerStateWaitingImplementors = []string{"ContainerStateWaiting"}
func (ec *executionContext) _ContainerStateWaiting(ctx context.Context, sel ast.SelectionSet, obj *v1.ContainerStateWaiting) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, containerStateWaitingImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ContainerStateWaiting")
case "reason":
out.Values[i] = ec._ContainerStateWaiting_reason(ctx, field, obj)
case "message":
out.Values[i] = ec._ContainerStateWaiting_message(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var containerStatusImplementors = []string{"ContainerStatus"}
func (ec *executionContext) _ContainerStatus(ctx context.Context, sel ast.SelectionSet, obj *v1.ContainerStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, containerStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ContainerStatus")
case "name":
out.Values[i] = ec._ContainerStatus_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "State":
out.Values[i] = ec._ContainerStatus_State(ctx, field, obj)
case "lastTerminationState":
out.Values[i] = ec._ContainerStatus_lastTerminationState(ctx, field, obj)
case "ready":
out.Values[i] = ec._ContainerStatus_ready(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "restartCount":
out.Values[i] = ec._ContainerStatus_restartCount(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "image":
out.Values[i] = ec._ContainerStatus_image(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "imageID":
out.Values[i] = ec._ContainerStatus_imageID(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "containerID":
out.Values[i] = ec._ContainerStatus_containerID(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "started":
out.Values[i] = ec._ContainerStatus_started(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var corruptSpecImplementors = []string{"CorruptSpec"}
func (ec *executionContext) _CorruptSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.CorruptSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, corruptSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("CorruptSpec")
case "corrup":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._CorruptSpec_corrup(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "correlation":
out.Values[i] = ec._CorruptSpec_correlation(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var delaySpecImplementors = []string{"DelaySpec"}
func (ec *executionContext) _DelaySpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.DelaySpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, delaySpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("DelaySpec")
case "latency":
out.Values[i] = ec._DelaySpec_latency(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "correlation":
out.Values[i] = ec._DelaySpec_correlation(ctx, field, obj)
case "jitter":
out.Values[i] = ec._DelaySpec_jitter(ctx, field, obj)
case "reorder":
out.Values[i] = ec._DelaySpec_reorder(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var duplicateSpecImplementors = []string{"DuplicateSpec"}
func (ec *executionContext) _DuplicateSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.DuplicateSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, duplicateSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("DuplicateSpec")
case "duplicate":
out.Values[i] = ec._DuplicateSpec_duplicate(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "correlation":
out.Values[i] = ec._DuplicateSpec_correlation(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var experimentStatusImplementors = []string{"ExperimentStatus"}
func (ec *executionContext) _ExperimentStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.ExperimentStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, experimentStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ExperimentStatus")
case "desiredPhase":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._ExperimentStatus_desiredPhase(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "Records":
out.Values[i] = ec._ExperimentStatus_Records(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var fdImplementors = []string{"Fd"}
func (ec *executionContext) _Fd(ctx context.Context, sel ast.SelectionSet, obj *model.Fd) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, fdImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Fd")
case "fd":
out.Values[i] = ec._Fd_fd(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "target":
out.Values[i] = ec._Fd_target(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var hTTPChaosImplementors = []string{"HTTPChaos"}
func (ec *executionContext) _HTTPChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.HTTPChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, hTTPChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("HTTPChaos")
case "kind":
out.Values[i] = ec._HTTPChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._HTTPChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._HTTPChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._HTTPChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._HTTPChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._HTTPChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._HTTPChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._HTTPChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._HTTPChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._HTTPChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._HTTPChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._HTTPChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._HTTPChaos_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._HTTPChaos_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "podhttp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaos_podhttp(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var hTTPChaosSpecImplementors = []string{"HTTPChaosSpec"}
func (ec *executionContext) _HTTPChaosSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.HTTPChaosSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, hTTPChaosSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("HTTPChaosSpec")
case "selector":
out.Values[i] = ec._HTTPChaosSpec_selector(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "mode":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaosSpec_mode(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "value":
out.Values[i] = ec._HTTPChaosSpec_value(ctx, field, obj)
case "target":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaosSpec_target(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "abort":
out.Values[i] = ec._HTTPChaosSpec_abort(ctx, field, obj)
case "delay":
out.Values[i] = ec._HTTPChaosSpec_delay(ctx, field, obj)
case "replace":
out.Values[i] = ec._HTTPChaosSpec_replace(ctx, field, obj)
case "patch":
out.Values[i] = ec._HTTPChaosSpec_patch(ctx, field, obj)
case "port":
out.Values[i] = ec._HTTPChaosSpec_port(ctx, field, obj)
case "path":
out.Values[i] = ec._HTTPChaosSpec_path(ctx, field, obj)
case "method":
out.Values[i] = ec._HTTPChaosSpec_method(ctx, field, obj)
case "code":
out.Values[i] = ec._HTTPChaosSpec_code(ctx, field, obj)
case "requestHeaders":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaosSpec_requestHeaders(ctx, field, obj)
return res
})
case "responseHeaders":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaosSpec_responseHeaders(ctx, field, obj)
return res
})
case "duration":
out.Values[i] = ec._HTTPChaosSpec_duration(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var hTTPChaosStatusImplementors = []string{"HTTPChaosStatus"}
func (ec *executionContext) _HTTPChaosStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.HTTPChaosStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, hTTPChaosStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("HTTPChaosStatus")
case "conditions":
out.Values[i] = ec._HTTPChaosStatus_conditions(ctx, field, obj)
case "experiment":
out.Values[i] = ec._HTTPChaosStatus_experiment(ctx, field, obj)
case "instances":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._HTTPChaosStatus_instances(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var iOChaosImplementors = []string{"IOChaos"}
func (ec *executionContext) _IOChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.IOChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, iOChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IOChaos")
case "kind":
out.Values[i] = ec._IOChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._IOChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._IOChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._IOChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._IOChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._IOChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._IOChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._IOChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._IOChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._IOChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._IOChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._IOChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._IOChaos_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._IOChaos_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "podios":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaos_podios(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var iOChaosActionImplementors = []string{"IOChaosAction"}
func (ec *executionContext) _IOChaosAction(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.IOChaosAction) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, iOChaosActionImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IOChaosAction")
case "type":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_type(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "path":
out.Values[i] = ec._IOChaosAction_path(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "methods":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_methods(ctx, field, obj)
return res
})
case "percent":
out.Values[i] = ec._IOChaosAction_percent(ctx, field, obj)
case "faults":
out.Values[i] = ec._IOChaosAction_faults(ctx, field, obj)
case "latency":
out.Values[i] = ec._IOChaosAction_latency(ctx, field, obj)
case "ino":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_ino(ctx, field, obj)
return res
})
case "size":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_size(ctx, field, obj)
return res
})
case "blocks":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_blocks(ctx, field, obj)
return res
})
case "atime":
out.Values[i] = ec._IOChaosAction_atime(ctx, field, obj)
case "mtime":
out.Values[i] = ec._IOChaosAction_mtime(ctx, field, obj)
case "ctime":
out.Values[i] = ec._IOChaosAction_ctime(ctx, field, obj)
case "kind":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_kind(ctx, field, obj)
return res
})
case "perm":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_perm(ctx, field, obj)
return res
})
case "nlink":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_nlink(ctx, field, obj)
return res
})
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_uid(ctx, field, obj)
return res
})
case "gid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_gid(ctx, field, obj)
return res
})
case "rdev":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_rdev(ctx, field, obj)
return res
})
case "filling":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosAction_filling(ctx, field, obj)
return res
})
case "maxOccurrences":
out.Values[i] = ec._IOChaosAction_maxOccurrences(ctx, field, obj)
case "maxLength":
out.Values[i] = ec._IOChaosAction_maxLength(ctx, field, obj)
case "source":
out.Values[i] = ec._IOChaosAction_source(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var iOChaosSpecImplementors = []string{"IOChaosSpec"}
func (ec *executionContext) _IOChaosSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.IOChaosSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, iOChaosSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IOChaosSpec")
case "containerNames":
out.Values[i] = ec._IOChaosSpec_containerNames(ctx, field, obj)
case "selector":
out.Values[i] = ec._IOChaosSpec_selector(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "mode":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosSpec_mode(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "value":
out.Values[i] = ec._IOChaosSpec_value(ctx, field, obj)
case "action":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosSpec_action(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "delay":
out.Values[i] = ec._IOChaosSpec_delay(ctx, field, obj)
case "errno":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosSpec_errno(ctx, field, obj)
return res
})
case "attr":
out.Values[i] = ec._IOChaosSpec_attr(ctx, field, obj)
case "mistake":
out.Values[i] = ec._IOChaosSpec_mistake(ctx, field, obj)
case "path":
out.Values[i] = ec._IOChaosSpec_path(ctx, field, obj)
case "methods":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosSpec_methods(ctx, field, obj)
return res
})
case "percent":
out.Values[i] = ec._IOChaosSpec_percent(ctx, field, obj)
case "volumePath":
out.Values[i] = ec._IOChaosSpec_volumePath(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "duration":
out.Values[i] = ec._IOChaosSpec_duration(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var iOChaosStatusImplementors = []string{"IOChaosStatus"}
func (ec *executionContext) _IOChaosStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.IOChaosStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, iOChaosStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IOChaosStatus")
case "conditions":
out.Values[i] = ec._IOChaosStatus_conditions(ctx, field, obj)
case "experiment":
out.Values[i] = ec._IOChaosStatus_experiment(ctx, field, obj)
case "instances":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IOChaosStatus_instances(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var ioFaultImplementors = []string{"IoFault"}
func (ec *executionContext) _IoFault(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.IoFault) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, ioFaultImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IoFault")
case "errno":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._IoFault_errno(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "weight":
out.Values[i] = ec._IoFault_weight(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var loggerImplementors = []string{"Logger"}
func (ec *executionContext) _Logger(ctx context.Context, sel ast.SelectionSet) func() graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, loggerImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Logger",
})
if len(fields) != 1 {
ec.Errorf(ctx, "must subscribe to exactly one stream")
return nil
}
switch fields[0].Name {
case "component":
return ec._Logger_component(ctx, fields[0])
case "pod":
return ec._Logger_pod(ctx, fields[0])
default:
panic("unknown field " + strconv.Quote(fields[0].Name))
}
}
var lossSpecImplementors = []string{"LossSpec"}
func (ec *executionContext) _LossSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.LossSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, lossSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("LossSpec")
case "loss":
out.Values[i] = ec._LossSpec_loss(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "correlation":
out.Values[i] = ec._LossSpec_correlation(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var mistakeSpecImplementors = []string{"MistakeSpec"}
func (ec *executionContext) _MistakeSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.MistakeSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, mistakeSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("MistakeSpec")
case "filling":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._MistakeSpec_filling(ctx, field, obj)
return res
})
case "maxOccurrences":
out.Values[i] = ec._MistakeSpec_maxOccurrences(ctx, field, obj)
case "maxLength":
out.Values[i] = ec._MistakeSpec_maxLength(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var namespaceImplementors = []string{"Namespace"}
func (ec *executionContext) _Namespace(ctx context.Context, sel ast.SelectionSet, obj *model.Namespace) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, namespaceImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Namespace")
case "ns":
out.Values[i] = ec._Namespace_ns(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "component":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_component(ctx, field, obj)
return res
})
case "pod":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_pod(ctx, field, obj)
return res
})
case "stresschaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_stresschaos(ctx, field, obj)
return res
})
case "iochaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_iochaos(ctx, field, obj)
return res
})
case "podiochaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_podiochaos(ctx, field, obj)
return res
})
case "httpchaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_httpchaos(ctx, field, obj)
return res
})
case "podhttpchaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_podhttpchaos(ctx, field, obj)
return res
})
case "networkchaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_networkchaos(ctx, field, obj)
return res
})
case "podnetworkchaos":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Namespace_podnetworkchaos(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var networkChaosImplementors = []string{"NetworkChaos"}
func (ec *executionContext) _NetworkChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.NetworkChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, networkChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("NetworkChaos")
case "kind":
out.Values[i] = ec._NetworkChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._NetworkChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._NetworkChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._NetworkChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._NetworkChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._NetworkChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._NetworkChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._NetworkChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._NetworkChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._NetworkChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._NetworkChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._NetworkChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "podnetwork":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._NetworkChaos_podnetwork(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var ownerReferenceImplementors = []string{"OwnerReference"}
func (ec *executionContext) _OwnerReference(ctx context.Context, sel ast.SelectionSet, obj *v11.OwnerReference) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, ownerReferenceImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("OwnerReference")
case "kind":
out.Values[i] = ec._OwnerReference_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._OwnerReference_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._OwnerReference_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._OwnerReference_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "controller":
out.Values[i] = ec._OwnerReference_controller(ctx, field, obj)
case "blockOwnerDeletion":
out.Values[i] = ec._OwnerReference_blockOwnerDeletion(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podImplementors = []string{"Pod"}
func (ec *executionContext) _Pod(ctx context.Context, sel ast.SelectionSet, obj *v1.Pod) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Pod")
case "kind":
out.Values[i] = ec._Pod_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._Pod_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._Pod_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._Pod_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._Pod_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._Pod_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._Pod_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._Pod_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._Pod_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._Pod_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._Pod_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._Pod_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._Pod_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._Pod_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "logs":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_logs(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "daemon":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_daemon(ctx, field, obj)
return res
})
case "processes":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_processes(ctx, field, obj)
return res
})
case "mounts":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_mounts(ctx, field, obj)
return res
})
case "ipset":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_ipset(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "tcQdisc":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_tcQdisc(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "iptables":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Pod_iptables(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podConditionImplementors = []string{"PodCondition"}
func (ec *executionContext) _PodCondition(ctx context.Context, sel ast.SelectionSet, obj *v1.PodCondition) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podConditionImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodCondition")
case "type":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodCondition_type(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "status":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodCondition_status(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "lastProbeTime":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodCondition_lastProbeTime(ctx, field, obj)
return res
})
case "lastTransitionTime":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodCondition_lastTransitionTime(ctx, field, obj)
return res
})
case "reason":
out.Values[i] = ec._PodCondition_reason(ctx, field, obj)
case "message":
out.Values[i] = ec._PodCondition_message(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHTTPChaosImplementors = []string{"PodHTTPChaos"}
func (ec *executionContext) _PodHTTPChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHTTPChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHTTPChaos")
case "kind":
out.Values[i] = ec._PodHTTPChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._PodHTTPChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._PodHTTPChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._PodHTTPChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._PodHTTPChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._PodHTTPChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._PodHTTPChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._PodHTTPChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._PodHTTPChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._PodHTTPChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._PodHTTPChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._PodHTTPChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._PodHTTPChaos_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._PodHTTPChaos_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "pod":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHTTPChaos_pod(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosActionsImplementors = []string{"PodHttpChaosActions"}
func (ec *executionContext) _PodHttpChaosActions(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosActions) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosActionsImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosActions")
case "abort":
out.Values[i] = ec._PodHttpChaosActions_abort(ctx, field, obj)
case "delay":
out.Values[i] = ec._PodHttpChaosActions_delay(ctx, field, obj)
case "replace":
out.Values[i] = ec._PodHttpChaosActions_replace(ctx, field, obj)
case "patch":
out.Values[i] = ec._PodHttpChaosActions_patch(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosPatchActionsImplementors = []string{"PodHttpChaosPatchActions"}
func (ec *executionContext) _PodHttpChaosPatchActions(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosPatchActions) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosPatchActionsImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosPatchActions")
case "body":
out.Values[i] = ec._PodHttpChaosPatchActions_body(ctx, field, obj)
case "queries":
out.Values[i] = ec._PodHttpChaosPatchActions_queries(ctx, field, obj)
case "headers":
out.Values[i] = ec._PodHttpChaosPatchActions_headers(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosPatchBodyActionImplementors = []string{"PodHttpChaosPatchBodyAction"}
func (ec *executionContext) _PodHttpChaosPatchBodyAction(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosPatchBodyAction) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosPatchBodyActionImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosPatchBodyAction")
case "type":
out.Values[i] = ec._PodHttpChaosPatchBodyAction_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "value":
out.Values[i] = ec._PodHttpChaosPatchBodyAction_value(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosReplaceActionsImplementors = []string{"PodHttpChaosReplaceActions"}
func (ec *executionContext) _PodHttpChaosReplaceActions(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosReplaceActions) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosReplaceActionsImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosReplaceActions")
case "path":
out.Values[i] = ec._PodHttpChaosReplaceActions_path(ctx, field, obj)
case "method":
out.Values[i] = ec._PodHttpChaosReplaceActions_method(ctx, field, obj)
case "code":
out.Values[i] = ec._PodHttpChaosReplaceActions_code(ctx, field, obj)
case "body":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosReplaceActions_body(ctx, field, obj)
return res
})
case "queries":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosReplaceActions_queries(ctx, field, obj)
return res
})
case "headers":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosReplaceActions_headers(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosRuleImplementors = []string{"PodHttpChaosRule"}
func (ec *executionContext) _PodHttpChaosRule(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosRule) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosRuleImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosRule")
case "target":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosRule_target(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "selector":
out.Values[i] = ec._PodHttpChaosRule_selector(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "actions":
out.Values[i] = ec._PodHttpChaosRule_actions(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "source":
out.Values[i] = ec._PodHttpChaosRule_source(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "port":
out.Values[i] = ec._PodHttpChaosRule_port(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosSelectorImplementors = []string{"PodHttpChaosSelector"}
func (ec *executionContext) _PodHttpChaosSelector(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosSelector) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosSelectorImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosSelector")
case "port":
out.Values[i] = ec._PodHttpChaosSelector_port(ctx, field, obj)
case "path":
out.Values[i] = ec._PodHttpChaosSelector_path(ctx, field, obj)
case "method":
out.Values[i] = ec._PodHttpChaosSelector_method(ctx, field, obj)
case "code":
out.Values[i] = ec._PodHttpChaosSelector_code(ctx, field, obj)
case "requestHeaders":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosSelector_requestHeaders(ctx, field, obj)
return res
})
case "responseHeaders":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodHttpChaosSelector_responseHeaders(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosSpecImplementors = []string{"PodHttpChaosSpec"}
func (ec *executionContext) _PodHttpChaosSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosSpec")
case "rules":
out.Values[i] = ec._PodHttpChaosSpec_rules(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podHttpChaosStatusImplementors = []string{"PodHttpChaosStatus"}
func (ec *executionContext) _PodHttpChaosStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodHttpChaosStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podHttpChaosStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodHttpChaosStatus")
case "pid":
out.Values[i] = ec._PodHttpChaosStatus_pid(ctx, field, obj)
case "startTime":
out.Values[i] = ec._PodHttpChaosStatus_startTime(ctx, field, obj)
case "failedMessage":
out.Values[i] = ec._PodHttpChaosStatus_failedMessage(ctx, field, obj)
case "observedGeneration":
out.Values[i] = ec._PodHttpChaosStatus_observedGeneration(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podIOChaosImplementors = []string{"PodIOChaos"}
func (ec *executionContext) _PodIOChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodIOChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podIOChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodIOChaos")
case "kind":
out.Values[i] = ec._PodIOChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._PodIOChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._PodIOChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._PodIOChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._PodIOChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._PodIOChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._PodIOChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._PodIOChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._PodIOChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._PodIOChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._PodIOChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._PodIOChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._PodIOChaos_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._PodIOChaos_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "pod":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_pod(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "ios":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodIOChaos_ios(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podIOChaosSpecImplementors = []string{"PodIOChaosSpec"}
func (ec *executionContext) _PodIOChaosSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodIOChaosSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podIOChaosSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodIOChaosSpec")
case "volumeMountPath":
out.Values[i] = ec._PodIOChaosSpec_volumeMountPath(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "container":
out.Values[i] = ec._PodIOChaosSpec_container(ctx, field, obj)
case "actions":
out.Values[i] = ec._PodIOChaosSpec_actions(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podIOChaosStatusImplementors = []string{"PodIOChaosStatus"}
func (ec *executionContext) _PodIOChaosStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodIOChaosStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podIOChaosStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodIOChaosStatus")
case "pid":
out.Values[i] = ec._PodIOChaosStatus_pid(ctx, field, obj)
case "startTime":
out.Values[i] = ec._PodIOChaosStatus_startTime(ctx, field, obj)
case "failedMessage":
out.Values[i] = ec._PodIOChaosStatus_failedMessage(ctx, field, obj)
case "observedGeneration":
out.Values[i] = ec._PodIOChaosStatus_observedGeneration(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podIPImplementors = []string{"PodIP"}
func (ec *executionContext) _PodIP(ctx context.Context, sel ast.SelectionSet, obj *v1.PodIP) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podIPImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodIP")
case "ip":
out.Values[i] = ec._PodIP_ip(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podNetworkChaosImplementors = []string{"PodNetworkChaos"}
func (ec *executionContext) _PodNetworkChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodNetworkChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podNetworkChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodNetworkChaos")
case "kind":
out.Values[i] = ec._PodNetworkChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._PodNetworkChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._PodNetworkChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._PodNetworkChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._PodNetworkChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._PodNetworkChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._PodNetworkChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._PodNetworkChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._PodNetworkChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._PodNetworkChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._PodNetworkChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._PodNetworkChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "spec":
out.Values[i] = ec._PodNetworkChaos_spec(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "status":
out.Values[i] = ec._PodNetworkChaos_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "pod":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodNetworkChaos_pod(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podNetworkChaosSpecImplementors = []string{"PodNetworkChaosSpec"}
func (ec *executionContext) _PodNetworkChaosSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodNetworkChaosSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podNetworkChaosSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodNetworkChaosSpec")
case "ipSets":
out.Values[i] = ec._PodNetworkChaosSpec_ipSets(ctx, field, obj)
case "iptables":
out.Values[i] = ec._PodNetworkChaosSpec_iptables(ctx, field, obj)
case "trafficControls":
out.Values[i] = ec._PodNetworkChaosSpec_trafficControls(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podNetworkChaosStatusImplementors = []string{"PodNetworkChaosStatus"}
func (ec *executionContext) _PodNetworkChaosStatus(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodNetworkChaosStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podNetworkChaosStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodNetworkChaosStatus")
case "failedMessage":
out.Values[i] = ec._PodNetworkChaosStatus_failedMessage(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "observedGeneration":
out.Values[i] = ec._PodNetworkChaosStatus_observedGeneration(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podSelectorSpecImplementors = []string{"PodSelectorSpec"}
func (ec *executionContext) _PodSelectorSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.PodSelectorSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podSelectorSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodSelectorSpec")
case "namespaces":
out.Values[i] = ec._PodSelectorSpec_namespaces(ctx, field, obj)
case "nodes":
out.Values[i] = ec._PodSelectorSpec_nodes(ctx, field, obj)
case "pods":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodSelectorSpec_pods(ctx, field, obj)
return res
})
case "nodeSelectors":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodSelectorSpec_nodeSelectors(ctx, field, obj)
return res
})
case "fieldSelectors":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodSelectorSpec_fieldSelectors(ctx, field, obj)
return res
})
case "labelSelectors":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodSelectorSpec_labelSelectors(ctx, field, obj)
return res
})
case "annotationSelectors":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodSelectorSpec_annotationSelectors(ctx, field, obj)
return res
})
case "podPhaseSelectors":
out.Values[i] = ec._PodSelectorSpec_podPhaseSelectors(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podSpecImplementors = []string{"PodSpec"}
func (ec *executionContext) _PodSpec(ctx context.Context, sel ast.SelectionSet, obj *v1.PodSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodSpec")
case "nodeName":
out.Values[i] = ec._PodSpec_nodeName(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var podStatusImplementors = []string{"PodStatus"}
func (ec *executionContext) _PodStatus(ctx context.Context, sel ast.SelectionSet, obj *v1.PodStatus) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, podStatusImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PodStatus")
case "phase":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodStatus_phase(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "conditions":
out.Values[i] = ec._PodStatus_conditions(ctx, field, obj)
case "message":
out.Values[i] = ec._PodStatus_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "reason":
out.Values[i] = ec._PodStatus_reason(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "nominatedNodeName":
out.Values[i] = ec._PodStatus_nominatedNodeName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "hostIP":
out.Values[i] = ec._PodStatus_hostIP(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "podIP":
out.Values[i] = ec._PodStatus_podIP(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "podIPs":
out.Values[i] = ec._PodStatus_podIPs(ctx, field, obj)
case "startTime":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodStatus_startTime(ctx, field, obj)
return res
})
case "initContainerStatuses":
out.Values[i] = ec._PodStatus_initContainerStatuses(ctx, field, obj)
case "containerStatuses":
out.Values[i] = ec._PodStatus_containerStatuses(ctx, field, obj)
case "qosClass":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._PodStatus_qosClass(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "ephemeralContainerStatuses":
out.Values[i] = ec._PodStatus_ephemeralContainerStatuses(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var processImplementors = []string{"Process"}
func (ec *executionContext) _Process(ctx context.Context, sel ast.SelectionSet, obj *model.Process) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, processImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Process")
case "pod":
out.Values[i] = ec._Process_pod(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "pid":
out.Values[i] = ec._Process_pid(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "command":
out.Values[i] = ec._Process_command(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "fds":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Process_fds(ctx, field, obj)
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var queryImplementors = []string{"Query"}
func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Query",
})
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Query")
case "namespace":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_namespace(ctx, field)
return res
})
case "__type":
out.Values[i] = ec._Query___type(ctx, field)
case "__schema":
out.Values[i] = ec._Query___schema(ctx, field)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var rawIPSetImplementors = []string{"RawIPSet"}
func (ec *executionContext) _RawIPSet(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.RawIPSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, rawIPSetImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("RawIPSet")
case "name":
out.Values[i] = ec._RawIPSet_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "cidrs":
out.Values[i] = ec._RawIPSet_cidrs(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "source":
out.Values[i] = ec._RawIPSet_source(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var rawIptablesImplementors = []string{"RawIptables"}
func (ec *executionContext) _RawIptables(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.RawIptables) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, rawIptablesImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("RawIptables")
case "name":
out.Values[i] = ec._RawIptables_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "ipSets":
out.Values[i] = ec._RawIptables_ipSets(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "direction":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._RawIptables_direction(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "source":
out.Values[i] = ec._RawIptables_source(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var rawTrafficControlImplementors = []string{"RawTrafficControl"}
func (ec *executionContext) _RawTrafficControl(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.RawTrafficControl) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, rawTrafficControlImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("RawTrafficControl")
case "type":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._RawTrafficControl_type(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "delay":
out.Values[i] = ec._RawTrafficControl_delay(ctx, field, obj)
case "loss":
out.Values[i] = ec._RawTrafficControl_loss(ctx, field, obj)
case "duplicate":
out.Values[i] = ec._RawTrafficControl_duplicate(ctx, field, obj)
case "corrupt":
out.Values[i] = ec._RawTrafficControl_corrupt(ctx, field, obj)
case "Bandwidth":
out.Values[i] = ec._RawTrafficControl_Bandwidth(ctx, field, obj)
case "ipSet":
out.Values[i] = ec._RawTrafficControl_ipSet(ctx, field, obj)
case "source":
out.Values[i] = ec._RawTrafficControl_source(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var recordImplementors = []string{"Record"}
func (ec *executionContext) _Record(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.Record) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, recordImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Record")
case "id":
out.Values[i] = ec._Record_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selectorKey":
out.Values[i] = ec._Record_selectorKey(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "phase":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Record_phase(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var reorderSpecImplementors = []string{"ReorderSpec"}
func (ec *executionContext) _ReorderSpec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.ReorderSpec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, reorderSpecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("ReorderSpec")
case "reorder":
out.Values[i] = ec._ReorderSpec_reorder(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "correlation":
out.Values[i] = ec._ReorderSpec_correlation(ctx, field, obj)
case "gap":
out.Values[i] = ec._ReorderSpec_gap(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var stressChaosImplementors = []string{"StressChaos"}
func (ec *executionContext) _StressChaos(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.StressChaos) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, stressChaosImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("StressChaos")
case "kind":
out.Values[i] = ec._StressChaos_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "apiVersion":
out.Values[i] = ec._StressChaos_apiVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "name":
out.Values[i] = ec._StressChaos_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generateName":
out.Values[i] = ec._StressChaos_generateName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "namespace":
out.Values[i] = ec._StressChaos_namespace(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "selfLink":
out.Values[i] = ec._StressChaos_selfLink(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "uid":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._StressChaos_uid(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "resourceVersion":
out.Values[i] = ec._StressChaos_resourceVersion(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "generation":
out.Values[i] = ec._StressChaos_generation(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
case "creationTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._StressChaos_creationTimestamp(ctx, field, obj)
if res == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
return res
})
case "deletionTimestamp":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._StressChaos_deletionTimestamp(ctx, field, obj)
return res
})
case "deletionGracePeriodSeconds":
out.Values[i] = ec._StressChaos_deletionGracePeriodSeconds(ctx, field, obj)
case "labels":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._StressChaos_labels(ctx, field, obj)
return res
})
case "annotations":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._StressChaos_annotations(ctx, field, obj)
return res
})
case "ownerReferences":
out.Values[i] = ec._StressChaos_ownerReferences(ctx, field, obj)
case "finalizers":
out.Values[i] = ec._StressChaos_finalizers(ctx, field, obj)
case "clusterName":
out.Values[i] = ec._StressChaos_clusterName(ctx, field, obj)
if out.Values[i] == graphql.Null {
atomic.AddUint32(&invalids, 1)
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var timespecImplementors = []string{"Timespec"}
func (ec *executionContext) _Timespec(ctx context.Context, sel ast.SelectionSet, obj *v1alpha1.Timespec) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, timespecImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Timespec")
case "sec":
out.Values[i] = ec._Timespec_sec(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "nsec":
out.Values[i] = ec._Timespec_nsec(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __DirectiveImplementors = []string{"__Directive"}
func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Directive")
case "name":
out.Values[i] = ec.___Directive_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Directive_description(ctx, field, obj)
case "locations":
out.Values[i] = ec.___Directive_locations(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "args":
out.Values[i] = ec.___Directive_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __EnumValueImplementors = []string{"__EnumValue"}
func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__EnumValue")
case "name":
out.Values[i] = ec.___EnumValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___EnumValue_description(ctx, field, obj)
case "isDeprecated":
out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __FieldImplementors = []string{"__Field"}
func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Field")
case "name":
out.Values[i] = ec.___Field_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Field_description(ctx, field, obj)
case "args":
out.Values[i] = ec.___Field_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "type":
out.Values[i] = ec.___Field_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "isDeprecated":
out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __InputValueImplementors = []string{"__InputValue"}
func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__InputValue")
case "name":
out.Values[i] = ec.___InputValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___InputValue_description(ctx, field, obj)
case "type":
out.Values[i] = ec.___InputValue_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "defaultValue":
out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __SchemaImplementors = []string{"__Schema"}
func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Schema")
case "types":
out.Values[i] = ec.___Schema_types(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "queryType":
out.Values[i] = ec.___Schema_queryType(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "mutationType":
out.Values[i] = ec.___Schema_mutationType(ctx, field, obj)
case "subscriptionType":
out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj)
case "directives":
out.Values[i] = ec.___Schema_directives(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __TypeImplementors = []string{"__Type"}
func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Type")
case "kind":
out.Values[i] = ec.___Type_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "name":
out.Values[i] = ec.___Type_name(ctx, field, obj)
case "description":
out.Values[i] = ec.___Type_description(ctx, field, obj)
case "fields":
out.Values[i] = ec.___Type_fields(ctx, field, obj)
case "interfaces":
out.Values[i] = ec.___Type_interfaces(ctx, field, obj)
case "possibleTypes":
out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj)
case "enumValues":
out.Values[i] = ec.___Type_enumValues(ctx, field, obj)
case "inputFields":
out.Values[i] = ec.___Type_inputFields(ctx, field, obj)
case "ofType":
out.Values[i] = ec.___Type_ofType(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
// endregion **************************** object.gotpl ****************************
// region ***************************** type.gotpl *****************************
func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
res := graphql.MarshalBoolean(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalNChaosCondition2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐChaosCondition(ctx context.Context, sel ast.SelectionSet, v v1alpha1.ChaosCondition) graphql.Marshaler {
return ec._ChaosCondition(ctx, sel, &v)
}
func (ec *executionContext) unmarshalNComponent2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐComponent(ctx context.Context, v interface{}) (model.Component, error) {
var res model.Component
err := res.UnmarshalGQL(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNComponent2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐComponent(ctx context.Context, sel ast.SelectionSet, v model.Component) graphql.Marshaler {
return v
}
func (ec *executionContext) marshalNContainerStatus2k8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatus(ctx context.Context, sel ast.SelectionSet, v v1.ContainerStatus) graphql.Marshaler {
return ec._ContainerStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNFd2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐFd(ctx context.Context, sel ast.SelectionSet, v *model.Fd) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Fd(ctx, sel, v)
}
func (ec *executionContext) marshalNHTTPChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.HTTPChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._HTTPChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNHTTPChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.HTTPChaosSpec) graphql.Marshaler {
return ec._HTTPChaosSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNHTTPChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.HTTPChaosStatus) graphql.Marshaler {
return ec._HTTPChaosStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNIOChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.IOChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._IOChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNIOChaosAction2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosAction(ctx context.Context, sel ast.SelectionSet, v v1alpha1.IOChaosAction) graphql.Marshaler {
return ec._IOChaosAction(ctx, sel, &v)
}
func (ec *executionContext) marshalNIOChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.IOChaosSpec) graphql.Marshaler {
return ec._IOChaosSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNIOChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.IOChaosStatus) graphql.Marshaler {
return ec._IOChaosStatus(ctx, sel, &v)
}
func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) {
res, err := graphql.UnmarshalInt(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
res := graphql.MarshalInt(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNInt2int32(ctx context.Context, v interface{}) (int32, error) {
res, err := graphql.UnmarshalInt32(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2int32(ctx context.Context, sel ast.SelectionSet, v int32) graphql.Marshaler {
res := graphql.MarshalInt32(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNInt2int64(ctx context.Context, v interface{}) (int64, error) {
res, err := graphql.UnmarshalInt64(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2int64(ctx context.Context, sel ast.SelectionSet, v int64) graphql.Marshaler {
res := graphql.MarshalInt64(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalNIoFault2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIoFault(ctx context.Context, sel ast.SelectionSet, v v1alpha1.IoFault) graphql.Marshaler {
return ec._IoFault(ctx, sel, &v)
}
func (ec *executionContext) marshalNNamespace2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐNamespace(ctx context.Context, sel ast.SelectionSet, v *model.Namespace) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Namespace(ctx, sel, v)
}
func (ec *executionContext) marshalNNetworkChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐNetworkChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.NetworkChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._NetworkChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNOwnerReference2k8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReference(ctx context.Context, sel ast.SelectionSet, v v11.OwnerReference) graphql.Marshaler {
return ec._OwnerReference(ctx, sel, &v)
}
func (ec *executionContext) marshalNPod2k8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx context.Context, sel ast.SelectionSet, v v1.Pod) graphql.Marshaler {
return ec._Pod(ctx, sel, &v)
}
func (ec *executionContext) marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx context.Context, sel ast.SelectionSet, v *v1.Pod) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Pod(ctx, sel, v)
}
func (ec *executionContext) marshalNPodCondition2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodCondition(ctx context.Context, sel ast.SelectionSet, v v1.PodCondition) graphql.Marshaler {
return ec._PodCondition(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodHTTPChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodHttpChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._PodHTTPChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNPodHttpChaosActions2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosActions(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodHttpChaosActions) graphql.Marshaler {
return ec._PodHttpChaosActions(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodHttpChaosRule2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosRule(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodHttpChaosRule) graphql.Marshaler {
return ec._PodHttpChaosRule(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodHttpChaosRule2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosRuleᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.PodHttpChaosRule) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodHttpChaosRule2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosRule(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalNPodHttpChaosSelector2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosSelector(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodHttpChaosSelector) graphql.Marshaler {
return ec._PodHttpChaosSelector(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodHttpChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodHttpChaosSpec) graphql.Marshaler {
return ec._PodHttpChaosSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodHttpChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodHttpChaosStatus) graphql.Marshaler {
return ec._PodHttpChaosStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodIOChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodIOChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._PodIOChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNPodIOChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodIOChaosSpec) graphql.Marshaler {
return ec._PodIOChaosSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodIOChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodIOChaosStatus) graphql.Marshaler {
return ec._PodIOChaosStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodIP2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodIP(ctx context.Context, sel ast.SelectionSet, v v1.PodIP) graphql.Marshaler {
return ec._PodIP(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodNetworkChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodNetworkChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._PodNetworkChaos(ctx, sel, v)
}
func (ec *executionContext) marshalNPodNetworkChaosSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodNetworkChaosSpec) graphql.Marshaler {
return ec._PodNetworkChaosSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodNetworkChaosStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodNetworkChaosStatus) graphql.Marshaler {
return ec._PodNetworkChaosStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodSelectorSpec2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodSelectorSpec(ctx context.Context, sel ast.SelectionSet, v v1alpha1.PodSelectorSpec) graphql.Marshaler {
return ec._PodSelectorSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodSpec2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodSpec(ctx context.Context, sel ast.SelectionSet, v v1.PodSpec) graphql.Marshaler {
return ec._PodSpec(ctx, sel, &v)
}
func (ec *executionContext) marshalNPodStatus2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodStatus(ctx context.Context, sel ast.SelectionSet, v v1.PodStatus) graphql.Marshaler {
return ec._PodStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalNProcess2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐProcess(ctx context.Context, sel ast.SelectionSet, v *model.Process) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Process(ctx, sel, v)
}
func (ec *executionContext) marshalNRawIPSet2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIPSet(ctx context.Context, sel ast.SelectionSet, v v1alpha1.RawIPSet) graphql.Marshaler {
return ec._RawIPSet(ctx, sel, &v)
}
func (ec *executionContext) marshalNRawIptables2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIptables(ctx context.Context, sel ast.SelectionSet, v v1alpha1.RawIptables) graphql.Marshaler {
return ec._RawIptables(ctx, sel, &v)
}
func (ec *executionContext) marshalNRawTrafficControl2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawTrafficControl(ctx context.Context, sel ast.SelectionSet, v v1alpha1.RawTrafficControl) graphql.Marshaler {
return ec._RawTrafficControl(ctx, sel, &v)
}
func (ec *executionContext) marshalNRecord2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRecord(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.Record) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Record(ctx, sel, v)
}
func (ec *executionContext) marshalNStressChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐStressChaos(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.StressChaos) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._StressChaos(ctx, sel, v)
}
func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNString2string(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNString2string(ctx, sel, v[i])
}
return ret
}
func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) {
res, err := graphql.UnmarshalTime(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler {
res := graphql.MarshalTime(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNTime2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) {
res, err := graphql.UnmarshalTime(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := graphql.MarshalTime(*v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler {
return ec.___Directive(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler {
return ec.___EnumValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler {
return ec.___Field(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler {
return ec.___InputValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler {
return ec.___Type(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalOAttrOverrideSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐAttrOverrideSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.AttrOverrideSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._AttrOverrideSpec(ctx, sel, v)
}
func (ec *executionContext) marshalOBandwidthSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐBandwidthSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.BandwidthSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._BandwidthSpec(ctx, sel, v)
}
func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
return graphql.MarshalBoolean(v)
}
func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalBoolean(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalBoolean(*v)
}
func (ec *executionContext) marshalOChaosCondition2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐChaosConditionᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.ChaosCondition) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNChaosCondition2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐChaosCondition(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOContainerState2k8sᚗioᚋapiᚋcoreᚋv1ᚐContainerState(ctx context.Context, sel ast.SelectionSet, v v1.ContainerState) graphql.Marshaler {
return ec._ContainerState(ctx, sel, &v)
}
func (ec *executionContext) marshalOContainerStateRunning2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateRunning(ctx context.Context, sel ast.SelectionSet, v *v1.ContainerStateRunning) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._ContainerStateRunning(ctx, sel, v)
}
func (ec *executionContext) marshalOContainerStateTerminated2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateTerminated(ctx context.Context, sel ast.SelectionSet, v *v1.ContainerStateTerminated) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._ContainerStateTerminated(ctx, sel, v)
}
func (ec *executionContext) marshalOContainerStateWaiting2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStateWaiting(ctx context.Context, sel ast.SelectionSet, v *v1.ContainerStateWaiting) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._ContainerStateWaiting(ctx, sel, v)
}
func (ec *executionContext) marshalOContainerStatus2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatusᚄ(ctx context.Context, sel ast.SelectionSet, v []v1.ContainerStatus) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNContainerStatus2k8sᚗioᚋapiᚋcoreᚋv1ᚐContainerStatus(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOCorruptSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐCorruptSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.CorruptSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._CorruptSpec(ctx, sel, v)
}
func (ec *executionContext) marshalODelaySpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐDelaySpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.DelaySpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._DelaySpec(ctx, sel, v)
}
func (ec *executionContext) marshalODuplicateSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐDuplicateSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.DuplicateSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._DuplicateSpec(ctx, sel, v)
}
func (ec *executionContext) marshalOExperimentStatus2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐExperimentStatus(ctx context.Context, sel ast.SelectionSet, v v1alpha1.ExperimentStatus) graphql.Marshaler {
return ec._ExperimentStatus(ctx, sel, &v)
}
func (ec *executionContext) marshalOFd2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐFdᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Fd) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNFd2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐFd(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOHTTPChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.HTTPChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNHTTPChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐHTTPChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.IOChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNIOChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOIOChaosAction2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosActionᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.IOChaosAction) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNIOChaosAction2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIOChaosAction(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) unmarshalOInt2int(ctx context.Context, v interface{}) (int, error) {
res, err := graphql.UnmarshalInt(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
return graphql.MarshalInt(v)
}
func (ec *executionContext) unmarshalOInt2int32(ctx context.Context, v interface{}) (int32, error) {
res, err := graphql.UnmarshalInt32(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2int32(ctx context.Context, sel ast.SelectionSet, v int32) graphql.Marshaler {
return graphql.MarshalInt32(v)
}
func (ec *executionContext) unmarshalOInt2int64(ctx context.Context, v interface{}) (int64, error) {
res, err := graphql.UnmarshalInt64(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2int64(ctx context.Context, sel ast.SelectionSet, v int64) graphql.Marshaler {
return graphql.MarshalInt64(v)
}
func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalInt(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.SelectionSet, v *int) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalInt(*v)
}
func (ec *executionContext) unmarshalOInt2ᚖint32(ctx context.Context, v interface{}) (*int32, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalInt32(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2ᚖint32(ctx context.Context, sel ast.SelectionSet, v *int32) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalInt32(*v)
}
func (ec *executionContext) unmarshalOInt2ᚖint64(ctx context.Context, v interface{}) (*int64, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalInt64(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOInt2ᚖint64(ctx context.Context, sel ast.SelectionSet, v *int64) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalInt64(*v)
}
func (ec *executionContext) marshalOIoFault2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIoFaultᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.IoFault) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNIoFault2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐIoFault(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOLossSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐLossSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.LossSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._LossSpec(ctx, sel, v)
}
func (ec *executionContext) unmarshalOMap2map(ctx context.Context, v interface{}) (map[string]interface{}, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalMap(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOMap2map(ctx context.Context, sel ast.SelectionSet, v map[string]interface{}) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalMap(v)
}
func (ec *executionContext) marshalOMistakeSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐMistakeSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.MistakeSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._MistakeSpec(ctx, sel, v)
}
func (ec *executionContext) marshalONamespace2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐNamespaceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Namespace) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNNamespace2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐNamespace(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalONetworkChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐNetworkChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.NetworkChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNNetworkChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐNetworkChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOOwnerReference2ᚕk8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReferenceᚄ(ctx context.Context, sel ast.SelectionSet, v []v11.OwnerReference) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNOwnerReference2k8sᚗioᚋapimachineryᚋpkgᚋapisᚋmetaᚋv1ᚐOwnerReference(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPod2ᚕᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPodᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1.Pod) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPod2ᚖk8sᚗioᚋapiᚋcoreᚋv1ᚐPod(ctx context.Context, sel ast.SelectionSet, v *v1.Pod) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._Pod(ctx, sel, v)
}
func (ec *executionContext) marshalOPodCondition2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐPodConditionᚄ(ctx context.Context, sel ast.SelectionSet, v []v1.PodCondition) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodCondition2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodCondition(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPodHTTPChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.PodHttpChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodHTTPChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPodHttpChaosPatchActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosPatchActions(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodHttpChaosPatchActions) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._PodHttpChaosPatchActions(ctx, sel, v)
}
func (ec *executionContext) marshalOPodHttpChaosPatchBodyAction2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosPatchBodyAction(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodHttpChaosPatchBodyAction) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._PodHttpChaosPatchBodyAction(ctx, sel, v)
}
func (ec *executionContext) marshalOPodHttpChaosReplaceActions2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodHttpChaosReplaceActions(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.PodHttpChaosReplaceActions) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._PodHttpChaosReplaceActions(ctx, sel, v)
}
func (ec *executionContext) marshalOPodIOChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.PodIOChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodIOChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodIOChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPodIP2ᚕk8sᚗioᚋapiᚋcoreᚋv1ᚐPodIPᚄ(ctx context.Context, sel ast.SelectionSet, v []v1.PodIP) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodIP2k8sᚗioᚋapiᚋcoreᚋv1ᚐPodIP(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPodNetworkChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.PodNetworkChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNPodNetworkChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐPodNetworkChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOProcess2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐProcessᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Process) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNProcess2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋpkgᚋctrlserverᚋgraphᚋmodelᚐProcess(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalORawIPSet2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIPSetᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.RawIPSet) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNRawIPSet2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIPSet(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalORawIptables2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIptablesᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.RawIptables) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNRawIptables2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawIptables(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalORawTrafficControl2ᚕgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawTrafficControlᚄ(ctx context.Context, sel ast.SelectionSet, v []v1alpha1.RawTrafficControl) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNRawTrafficControl2githubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRawTrafficControl(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalORecord2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRecordᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.Record) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNRecord2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐRecord(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOReorderSpec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐReorderSpec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.ReorderSpec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._ReorderSpec(ctx, sel, v)
}
func (ec *executionContext) marshalOStressChaos2ᚕᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐStressChaosᚄ(ctx context.Context, sel ast.SelectionSet, v []*v1alpha1.StressChaos) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNStressChaos2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐStressChaos(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
return graphql.MarshalString(v)
}
func (ec *executionContext) unmarshalOString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
if v == nil {
return nil, nil
}
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNString2string(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalOString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNString2string(ctx, sel, v[i])
}
return ret
}
func (ec *executionContext) unmarshalOString2ᚕᚕstringᚄ(ctx context.Context, v interface{}) ([][]string, error) {
if v == nil {
return nil, nil
}
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([][]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalNString2ᚕstringᚄ(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalOString2ᚕᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v [][]string) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
ret[i] = ec.marshalNString2ᚕstringᚄ(ctx, sel, v[i])
}
return ret
}
func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalString(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalString(*v)
}
func (ec *executionContext) unmarshalOTime2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalTime(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalTime(*v)
}
func (ec *executionContext) marshalOTimespec2ᚖgithubᚗcomᚋchaosᚑmeshᚋchaosᚑmeshᚋapiᚋv1alpha1ᚐTimespec(ctx context.Context, sel ast.SelectionSet, v *v1alpha1.Timespec) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._Timespec(ctx, sel, v)
}
func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Schema(ctx, sel, v)
}
func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
// endregion ***************************** type.gotpl *****************************
| 1 | 25,818 | Can you revert these changes? | chaos-mesh-chaos-mesh | go |
@@ -18,10 +18,13 @@
'use strict';
-import React, {
+import React from 'react';
+
+import {
Component,
Navigator,
- StatusBarIOS,
+ Platform,
+ StatusBar,
Text,
TouchableOpacity,
View, | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
import React, {
Component,
Navigator,
StatusBarIOS,
Text,
TouchableOpacity,
View,
} from 'react-native';
import TodoItem from './todo-item';
import TodoListView from './todo-listview';
import realm from './realm';
import styles from './styles';
export default class TodoApp extends Component {
constructor(props) {
super(props);
let todoLists = realm.objects('TodoList');
if (todoLists.length < 1) {
realm.write(() => {
realm.create('TodoList', {name: 'Todo List'});
});
}
// This is a Results object, which will live-update.
this.todoLists = todoLists;
// Bind all the methods that we will be passing as props.
this.renderScene = this.renderScene.bind(this);
this._addNewTodoList = this._addNewTodoList.bind(this);
this._onPressTodoList = this._onPressTodoList.bind(this);
this.state = {};
}
get currentListView() {
let refs = this.refs.nav.refs;
return refs.listItemView || refs.listView;
}
componentWillMount() {
if (StatusBarIOS) {
StatusBarIOS.setStyle('light-content');
}
}
render() {
let objects = realm.objects('Todo');
let extraItems = [
{name: 'Complete', items: objects.filtered('done = true')},
{name: 'Incomplete', items: objects.filtered('done = false')},
];
let route = {
title: 'My Todo Lists',
component: TodoListView,
passProps: {
ref: 'listView',
items: this.todoLists,
extraItems: extraItems,
onPressItem: this._onPressTodoList,
},
backButtonTitle: 'Lists',
rightButtonTitle: 'Add',
onRightButtonPress: this._addNewTodoList,
};
let navigationBar = (
<Navigator.NavigationBar routeMapper={RouteMapper} style={styles.navBar} />
);
return (
<Navigator
ref="nav"
initialRoute={route}
navigationBar={navigationBar}
renderScene={this.renderScene}
sceneStyle={styles.navScene}
style={styles.navigator}
/>
);
}
renderScene(route) {
return <route.component {...route.passProps} />
}
_addNewTodoItem(list) {
let items = list.items;
if (!this._shouldAddNewItem(items)) {
return;
}
realm.write(() => {
items.push({text: ''});
});
this._setEditingRow(items.length - 1);
}
_addNewTodoList() {
let items = this.todoLists;
if (!this._shouldAddNewItem(items)) {
return;
}
realm.write(() => {
realm.create('TodoList', {name: ''});
});
this._setEditingRow(items.length - 1);
}
_onPressTodoList(list) {
let items = list.items;
let route = {
title: list.name,
component: TodoListView,
passProps: {
ref: 'listItemView',
items: items,
rowClass: TodoItem,
},
};
// Check if the items are mutable (i.e. List rather than Results).
if (items.push) {
Object.assign(route, {
rightButtonTitle: 'Add',
onRightButtonPress: () => this._addNewTodoItem(list),
});
}
this.refs.nav.push(route);
}
_shouldAddNewItem(items) {
let editingRow = this.currentListView.state.editingRow;
let editingItem = editingRow != null && items[editingRow];
// Don't allow adding a new item if the one being edited is empty.
return !editingItem || !!editingItem.text || !!editingItem.name;
}
_setEditingRow(rowIndex) {
let listView = this.currentListView;
// Update the state on the currently displayed TodoList to edit this new item.
listView.setState({editingRow: rowIndex});
listView.updateDataSource();
}
}
const RouteMapper = {
LeftButton(route, navigator, index, navState) {
if (index == 0) {
return null;
}
let prevRoute = navState.routeStack[index - 1];
return (
<TouchableOpacity onPress={() => navigator.pop()}>
<View style={[styles.navBarView, styles.navBarLeftButton]}>
<Text style={styles.navBarLeftArrow}>‹</Text>
<Text style={styles.navBarText}>
{prevRoute.backButtonTitle || prevRoute.title || 'Back'}
</Text>
</View>
</TouchableOpacity>
);
},
RightButton(route) {
if (!route.rightButtonTitle) {
return null;
}
return (
<TouchableOpacity onPress={route.onRightButtonPress}>
<View style={[styles.navBarView, styles.navBarRightButton]}>
<Text style={styles.navBarText}>
{route.rightButtonTitle}
</Text>
</View>
</TouchableOpacity>
);
},
Title(route) {
return (
<View style={styles.navBarView}>
<Text style={[styles.navBarText, styles.navBarTitleText]}>
{route.title}
</Text>
</View>
);
},
};
| 1 | 15,396 | Do we need to change this? | realm-realm-js | js |
@@ -104,14 +104,12 @@ func setKernelMemory(path string, kernelMemoryLimit uint64) error {
}
func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
- ulimited := -1
-
- // If the memory update is set to uint64(-1) we should also
- // set swap to uint64(-1), it means unlimited memory.
- if cgroup.Resources.Memory == uint64(ulimited) {
+ // If the memory update is set to MemoryUnlimited we should also
+ // set swap to MemoryUnlimited.
+ if cgroup.Resources.Memory == configs.MemoryUnlimited {
// Only set swap if it's enbled in kernel
if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
- cgroup.Resources.MemorySwap = uint64(ulimited)
+ cgroup.Resources.MemorySwap = uint64(configs.MemoryUnlimited)
}
}
| 1 | // +build linux
package fs
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
const (
cgroupKernelMemoryLimit = "memory.kmem.limit_in_bytes"
cgroupMemorySwapLimit = "memory.memsw.limit_in_bytes"
cgroupMemoryLimit = "memory.limit_in_bytes"
)
type MemoryGroup struct {
}
func (s *MemoryGroup) Name() string {
return "memory"
}
func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
path, err := d.path("memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
} else if path == "" {
return nil
}
if memoryAssigned(d.config) {
if _, err := os.Stat(path); os.IsNotExist(err) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
// Only enable kernel memory accouting when this cgroup
// is created by libcontainer, otherwise we might get
// error when people use `cgroupsPath` to join an existed
// cgroup whose kernel memory is not initialized.
if err := EnableKernelMemoryAccounting(path); err != nil {
return err
}
}
}
defer func() {
if err != nil {
os.RemoveAll(path)
}
}()
// We need to join memory cgroup after set memory limits, because
// kmem.limit_in_bytes can only be set when the cgroup is empty.
_, err = d.join("memory")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func EnableKernelMemoryAccounting(path string) error {
// Check if kernel memory is enabled
// We have to limit the kernel memory here as it won't be accounted at all
// until a limit is set on the cgroup and limit cannot be set once the
// cgroup has children, or if there are already tasks in the cgroup.
for _, i := range []int64{1, -1} {
if err := setKernelMemory(path, uint64(i)); err != nil {
return err
}
}
return nil
}
func setKernelMemory(path string, kernelMemoryLimit uint64) error {
if path == "" {
return fmt.Errorf("no such directory for %s", cgroupKernelMemoryLimit)
}
if !cgroups.PathExists(filepath.Join(path, cgroupKernelMemoryLimit)) {
// kernel memory is not enabled on the system so we should do nothing
return nil
}
if err := ioutil.WriteFile(filepath.Join(path, cgroupKernelMemoryLimit), []byte(strconv.FormatUint(kernelMemoryLimit, 10)), 0700); err != nil {
// Check if the error number returned by the syscall is "EBUSY"
// The EBUSY signal is returned on attempts to write to the
// memory.kmem.limit_in_bytes file if the cgroup has children or
// once tasks have been attached to the cgroup
if pathErr, ok := err.(*os.PathError); ok {
if errNo, ok := pathErr.Err.(syscall.Errno); ok {
if errNo == syscall.EBUSY {
return fmt.Errorf("failed to set %s, because either tasks have already joined this cgroup or it has children", cgroupKernelMemoryLimit)
}
}
}
return fmt.Errorf("failed to write %v to %v: %v", kernelMemoryLimit, cgroupKernelMemoryLimit, err)
}
return nil
}
func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
ulimited := -1
// If the memory update is set to uint64(-1) we should also
// set swap to uint64(-1), it means unlimited memory.
if cgroup.Resources.Memory == uint64(ulimited) {
// Only set swap if it's enbled in kernel
if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
cgroup.Resources.MemorySwap = uint64(ulimited)
}
}
// When memory and swap memory are both set, we need to handle the cases
// for updating container.
if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap != 0 {
memoryUsage, err := getMemoryData(path, "")
if err != nil {
return err
}
// When update memory limit, we should adapt the write sequence
// for memory and swap memory, so it won't fail because the new
// value and the old value don't fit kernel's validation.
if cgroup.Resources.MemorySwap == uint64(ulimited) || memoryUsage.Limit < cgroup.Resources.MemorySwap {
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
return err
}
} else {
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
return err
}
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
}
} else {
if cgroup.Resources.Memory != 0 {
if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
return err
}
}
if cgroup.Resources.MemorySwap != 0 {
if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
return err
}
}
}
return nil
}
func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
if err := setMemoryAndSwap(path, cgroup); err != nil {
return err
}
if cgroup.Resources.KernelMemory != 0 {
if err := setKernelMemory(path, cgroup.Resources.KernelMemory); err != nil {
return err
}
}
if cgroup.Resources.MemoryReservation != 0 {
if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatUint(cgroup.Resources.MemoryReservation, 10)); err != nil {
return err
}
}
if cgroup.Resources.KernelMemoryTCP != 0 {
if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatUint(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
return err
}
}
if cgroup.Resources.OomKillDisable {
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
return err
}
}
if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
return nil
} else if *cgroup.Resources.MemorySwappiness <= 100 {
if err := writeFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
return err
}
} else {
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *cgroup.Resources.MemorySwappiness)
}
return nil
}
func (s *MemoryGroup) Remove(d *cgroupData) error {
return removePath(d.path("memory"))
}
func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
// Set stats from memory.stat.
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer statsFile.Close()
sc := bufio.NewScanner(statsFile)
for sc.Scan() {
t, v, err := getCgroupParamKeyValue(sc.Text())
if err != nil {
return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
}
stats.MemoryStats.Stats[t] = v
}
stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
memoryUsage, err := getMemoryData(path, "")
if err != nil {
return err
}
stats.MemoryStats.Usage = memoryUsage
swapUsage, err := getMemoryData(path, "memsw")
if err != nil {
return err
}
stats.MemoryStats.SwapUsage = swapUsage
kernelUsage, err := getMemoryData(path, "kmem")
if err != nil {
return err
}
stats.MemoryStats.KernelUsage = kernelUsage
kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
if err != nil {
return err
}
stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
return nil
}
func memoryAssigned(cgroup *configs.Cgroup) bool {
return cgroup.Resources.Memory != 0 ||
cgroup.Resources.MemoryReservation != 0 ||
cgroup.Resources.MemorySwap > 0 ||
cgroup.Resources.KernelMemory > 0 ||
cgroup.Resources.KernelMemoryTCP > 0 ||
cgroup.Resources.OomKillDisable ||
(cgroup.Resources.MemorySwappiness != nil && int64(*cgroup.Resources.MemorySwappiness) != -1)
}
func getMemoryData(path, name string) (cgroups.MemoryData, error) {
memoryData := cgroups.MemoryData{}
moduleName := "memory"
if name != "" {
moduleName = strings.Join([]string{"memory", name}, ".")
}
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
value, err := getCgroupParamUint(path, usage)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
}
memoryData.Usage = value
value, err = getCgroupParamUint(path, maxUsage)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
}
memoryData.MaxUsage = value
value, err = getCgroupParamUint(path, failcnt)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
}
memoryData.Failcnt = value
value, err = getCgroupParamUint(path, limit)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
}
memoryData.Limit = value
return memoryData, nil
}
| 1 | 14,635 | With this commit, `MemoryUnlimited` is defined as a `unit64`, so I think you can drop the redundant cast from this line (and the later lines which also have `uint64(configs.MemoryUnlimited)`. | opencontainers-runc | go |
@@ -7,6 +7,8 @@ function DashboardContentTitle (props) {
}
function PanelTopBar (props) {
+ const notOverFileLimit = props.maxNumberOfFiles !== props.totalFileCount
+
return (
<div class="uppy-DashboardContent-bar">
<button class="uppy-DashboardContent-back" | 1 | const { h } = require('preact')
function DashboardContentTitle (props) {
if (props.newFiles.length) {
return props.i18n('xFilesSelected', { smart_count: props.newFiles.length })
}
}
function PanelTopBar (props) {
return (
<div class="uppy-DashboardContent-bar">
<button class="uppy-DashboardContent-back"
type="button"
onclick={props.cancelAll}>{props.i18n('cancel')}</button>
<div class="uppy-DashboardContent-title" role="heading" aria-level="h1">
<DashboardContentTitle {...props} />
</div>
<button class="uppy-DashboardContent-addMore"
type="button"
aria-label={props.i18n('addMoreFiles')}
title={props.i18n('addMoreFiles')}
onclick={() => props.toggleAddFilesPanel(true)}>
<svg class="UppyIcon" width="15" height="15" viewBox="0 0 13 13" version="1.1" xmlns="http://www.w3.org/2000/svg">
<path d="M7,6 L13,6 L13,7 L7,7 L7,13 L6,13 L6,7 L0,7 L0,6 L6,6 L6,0 L7,0 L7,6 Z" />
</svg>
</button>
</div>
)
}
module.exports = PanelTopBar
| 1 | 11,130 | `props.totalFileCount < props.maxNumberOfFiles` makes the intent a bit more clear I think. And a check to see if maxNumberOfFiles even exists? | transloadit-uppy | js |
@@ -12,7 +12,8 @@ import (
// Flags
var (
- epochNum uint64
+ epochNum uint64
+ nextEpoch bool
)
// NodeCmd represents the node command | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package node
import (
"github.com/spf13/cobra"
)
// Flags
var (
epochNum uint64
)
// NodeCmd represents the node command
var NodeCmd = &cobra.Command{
Use: "node",
Short: "Deal with nodes of IoTeX blockchain",
Args: cobra.ExactArgs(1),
}
func init() {
NodeCmd.AddCommand(nodeDelegateCmd)
NodeCmd.AddCommand(nodeRewardCmd)
nodeDelegateCmd.Flags().Uint64VarP(&epochNum, "epoch-num", "e", 0, "specify specific epoch")
}
| 1 | 16,899 | `epochNum` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -1083,6 +1083,8 @@ angular.module('ui.grid')
} else {
// otherwise, manually search the oldRows to see if we can find this row
newRow = self.getRow(newEntity, oldRows);
+ } else {
+ newRow.entity = newEntity;
}
// if we didn't find the row, it must be new, so create it | 1 | (function(){
angular.module('ui.grid')
.factory('Grid', ['$q', '$compile', '$parse', 'gridUtil', 'uiGridConstants', 'GridOptions', 'GridColumn', 'GridRow', 'GridApi', 'rowSorter', 'rowSearcher', 'GridRenderContainer', '$timeout','ScrollEvent',
function($q, $compile, $parse, gridUtil, uiGridConstants, GridOptions, GridColumn, GridRow, GridApi, rowSorter, rowSearcher, GridRenderContainer, $timeout, ScrollEvent) {
/**
* @ngdoc object
* @name ui.grid.core.api:PublicApi
* @description Public Api for the core grid features
*
*/
/**
* @ngdoc function
* @name ui.grid.class:Grid
* @description Grid is the main viewModel. Any properties or methods needed to maintain state are defined in
* this prototype. One instance of Grid is created per Grid directive instance.
* @param {object} options Object map of options to pass into the grid. An 'id' property is expected.
*/
var Grid = function Grid(options) {
var self = this;
// Get the id out of the options, then remove it
if (options !== undefined && typeof(options.id) !== 'undefined' && options.id) {
if (!/^[_a-zA-Z0-9-]+$/.test(options.id)) {
throw new Error("Grid id '" + options.id + '" is invalid. It must follow CSS selector syntax rules.');
}
}
else {
throw new Error('No ID provided. An ID must be given when creating a grid.');
}
self.id = options.id;
delete options.id;
// Get default options
self.options = GridOptions.initialize( options );
/**
* @ngdoc object
* @name appScope
* @propertyOf ui.grid.class:Grid
* @description reference to the application scope (the parent scope of the ui-grid element). Assigned in ui-grid controller
* <br/>
* use gridOptions.appScopeProvider to override the default assignment of $scope.$parent with any reference
*/
self.appScope = self.options.appScopeProvider;
self.headerHeight = self.options.headerRowHeight;
/**
* @ngdoc object
* @name footerHeight
* @propertyOf ui.grid.class:Grid
* @description returns the total footer height gridFooter + columnFooter
*/
self.footerHeight = self.calcFooterHeight();
/**
* @ngdoc object
* @name columnFooterHeight
* @propertyOf ui.grid.class:Grid
* @description returns the total column footer height
*/
self.columnFooterHeight = self.calcColumnFooterHeight();
self.rtl = false;
self.gridHeight = 0;
self.gridWidth = 0;
self.columnBuilders = [];
self.rowBuilders = [];
self.rowsProcessors = [];
self.columnsProcessors = [];
self.styleComputations = [];
self.viewportAdjusters = [];
self.rowHeaderColumns = [];
self.dataChangeCallbacks = {};
self.verticalScrollSyncCallBackFns = {};
self.horizontalScrollSyncCallBackFns = {};
// self.visibleRowCache = [];
// Set of 'render' containers for self grid, which can render sets of rows
self.renderContainers = {};
// Create a
self.renderContainers.body = new GridRenderContainer('body', self);
self.cellValueGetterCache = {};
// Cached function to use with custom row templates
self.getRowTemplateFn = null;
//representation of the rows on the grid.
//these are wrapped references to the actual data rows (options.data)
self.rows = [];
//represents the columns on the grid
self.columns = [];
/**
* @ngdoc boolean
* @name isScrollingVertically
* @propertyOf ui.grid.class:Grid
* @description set to true when Grid is scrolling vertically. Set to false via debounced method
*/
self.isScrollingVertically = false;
/**
* @ngdoc boolean
* @name isScrollingHorizontally
* @propertyOf ui.grid.class:Grid
* @description set to true when Grid is scrolling horizontally. Set to false via debounced method
*/
self.isScrollingHorizontally = false;
/**
* @ngdoc property
* @name scrollDirection
* @propertyOf ui.grid.class:Grid
* @description set one of the uiGridConstants.scrollDirection values (UP, DOWN, LEFT, RIGHT, NONE), which tells
* us which direction we are scrolling. Set to NONE via debounced method
*/
self.scrollDirection = uiGridConstants.scrollDirection.NONE;
function vertical (scrollEvent) {
self.isScrollingVertically = false;
self.api.core.raise.scrollEnd(scrollEvent);
self.scrollDirection = uiGridConstants.scrollDirection.NONE;
}
var debouncedVertical = gridUtil.debounce(vertical, self.options.scrollDebounce);
var debouncedVerticalMinDelay = gridUtil.debounce(vertical, 0);
function horizontal (scrollEvent) {
self.isScrollingHorizontally = false;
self.api.core.raise.scrollEnd(scrollEvent);
self.scrollDirection = uiGridConstants.scrollDirection.NONE;
}
var debouncedHorizontal = gridUtil.debounce(horizontal, self.options.scrollDebounce);
var debouncedHorizontalMinDelay = gridUtil.debounce(horizontal, 0);
/**
* @ngdoc function
* @name flagScrollingVertically
* @methodOf ui.grid.class:Grid
* @description sets isScrollingVertically to true and sets it to false in a debounced function
*/
self.flagScrollingVertically = function(scrollEvent) {
if (!self.isScrollingVertically && !self.isScrollingHorizontally) {
self.api.core.raise.scrollBegin(scrollEvent);
}
self.isScrollingVertically = true;
if (self.options.scrollDebounce === 0 || !scrollEvent.withDelay) {
debouncedVerticalMinDelay(scrollEvent);
}
else {
debouncedVertical(scrollEvent);
}
};
/**
* @ngdoc function
* @name flagScrollingHorizontally
* @methodOf ui.grid.class:Grid
* @description sets isScrollingHorizontally to true and sets it to false in a debounced function
*/
self.flagScrollingHorizontally = function(scrollEvent) {
if (!self.isScrollingVertically && !self.isScrollingHorizontally) {
self.api.core.raise.scrollBegin(scrollEvent);
}
self.isScrollingHorizontally = true;
if (self.options.scrollDebounce === 0 || !scrollEvent.withDelay) {
debouncedHorizontalMinDelay(scrollEvent);
}
else {
debouncedHorizontal(scrollEvent);
}
};
self.scrollbarHeight = 0;
self.scrollbarWidth = 0;
if (self.options.enableHorizontalScrollbar === uiGridConstants.scrollbars.ALWAYS) {
self.scrollbarHeight = gridUtil.getScrollbarWidth();
}
if (self.options.enableVerticalScrollbar === uiGridConstants.scrollbars.ALWAYS) {
self.scrollbarWidth = gridUtil.getScrollbarWidth();
}
self.api = new GridApi(self);
/**
* @ngdoc function
* @name refresh
* @methodOf ui.grid.core.api:PublicApi
* @description Refresh the rendered grid on screen.
* The refresh method re-runs both the columnProcessors and the
* rowProcessors, as well as calling refreshCanvas to update all
* the grid sizing. In general you should prefer to use queueGridRefresh
* instead, which is basically a debounced version of refresh.
*
* If you only want to resize the grid, not regenerate all the rows
* and columns, you should consider directly calling refreshCanvas instead.
*
*/
self.api.registerMethod( 'core', 'refresh', this.refresh );
/**
* @ngdoc function
* @name queueGridRefresh
* @methodOf ui.grid.core.api:PublicApi
* @description Request a refresh of the rendered grid on screen, if multiple
* calls to queueGridRefresh are made within a digest cycle only one will execute.
* The refresh method re-runs both the columnProcessors and the
* rowProcessors, as well as calling refreshCanvas to update all
* the grid sizing. In general you should prefer to use queueGridRefresh
* instead, which is basically a debounced version of refresh.
*
*/
self.api.registerMethod( 'core', 'queueGridRefresh', this.queueGridRefresh );
/**
* @ngdoc function
* @name refreshRows
* @methodOf ui.grid.core.api:PublicApi
* @description Runs only the rowProcessors, columns remain as they were.
* It then calls redrawInPlace and refreshCanvas, which adjust the grid sizing.
* @returns {promise} promise that is resolved when render completes?
*
*/
self.api.registerMethod( 'core', 'refreshRows', this.refreshRows );
/**
* @ngdoc function
* @name queueRefresh
* @methodOf ui.grid.core.api:PublicApi
* @description Requests execution of refreshCanvas, if multiple requests are made
* during a digest cycle only one will run. RefreshCanvas updates the grid sizing.
* @returns {promise} promise that is resolved when render completes?
*
*/
self.api.registerMethod( 'core', 'refreshRows', this.queueRefresh );
/**
* @ngdoc function
* @name handleWindowResize
* @methodOf ui.grid.core.api:PublicApi
* @description Trigger a grid resize, normally this would be picked
* up by a watch on window size, but in some circumstances it is necessary
* to call this manually
* @returns {promise} promise that is resolved when render completes?
*
*/
self.api.registerMethod( 'core', 'handleWindowResize', this.handleWindowResize );
/**
* @ngdoc function
* @name addRowHeaderColumn
* @methodOf ui.grid.core.api:PublicApi
* @description adds a row header column to the grid
* @param {object} column def
*
*/
self.api.registerMethod( 'core', 'addRowHeaderColumn', this.addRowHeaderColumn );
/**
* @ngdoc function
* @name scrollToIfNecessary
* @methodOf ui.grid.core.api:PublicApi
* @description Scrolls the grid to make a certain row and column combo visible,
* in the case that it is not completely visible on the screen already.
* @param {GridRow} gridRow row to make visible
* @param {GridCol} gridCol column to make visible
* @returns {promise} a promise that is resolved when scrolling is complete
*
*/
self.api.registerMethod( 'core', 'scrollToIfNecessary', function(gridRow, gridCol) { return self.scrollToIfNecessary(gridRow, gridCol);} );
/**
* @ngdoc function
* @name scrollTo
* @methodOf ui.grid.core.api:PublicApi
* @description Scroll the grid such that the specified
* row and column is in view
* @param {object} rowEntity gridOptions.data[] array instance to make visible
* @param {object} colDef to make visible
* @returns {promise} a promise that is resolved after any scrolling is finished
*/
self.api.registerMethod( 'core', 'scrollTo', function (rowEntity, colDef) { return self.scrollTo(rowEntity, colDef);} );
/**
* @ngdoc function
* @name registerRowsProcessor
* @methodOf ui.grid.core.api:PublicApi
* @description
* Register a "rows processor" function. When the rows are updated,
* the grid calls each registered "rows processor", which has a chance
* to alter the set of rows (sorting, etc) as long as the count is not
* modified.
*
* @param {function(renderedRowsToProcess, columns )} processorFunction rows processor function, which
* is run in the context of the grid (i.e. this for the function will be the grid), and must
* return the updated rows list, which is passed to the next processor in the chain
* @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room
* for other people to inject rows processors at intermediate priorities. Lower priority rowsProcessors run earlier.
*
* At present allRowsVisible is running at 50, sort manipulations running at 60-65, filter is running at 100,
* sort is at 200, grouping and treeview at 400-410, selectable rows at 500, pagination at 900 (pagination will generally want to be last)
*/
self.api.registerMethod( 'core', 'registerRowsProcessor', this.registerRowsProcessor );
/**
* @ngdoc function
* @name registerColumnsProcessor
* @methodOf ui.grid.core.api:PublicApi
* @description
* Register a "columns processor" function. When the columns are updated,
* the grid calls each registered "columns processor", which has a chance
* to alter the set of columns as long as the count is not
* modified.
*
* @param {function(renderedColumnsToProcess, rows )} processorFunction columns processor function, which
* is run in the context of the grid (i.e. this for the function will be the grid), and must
* return the updated columns list, which is passed to the next processor in the chain
* @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room
* for other people to inject columns processors at intermediate priorities. Lower priority columnsProcessors run earlier.
*
* At present allRowsVisible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last)
*/
self.api.registerMethod( 'core', 'registerColumnsProcessor', this.registerColumnsProcessor );
/**
* @ngdoc function
* @name sortHandleNulls
* @methodOf ui.grid.core.api:PublicApi
* @description A null handling method that can be used when building custom sort
* functions
* @example
* <pre>
* mySortFn = function(a, b) {
* var nulls = $scope.gridApi.core.sortHandleNulls(a, b);
* if ( nulls !== null ){
* return nulls;
* } else {
* // your code for sorting here
* };
* </pre>
* @param {object} a sort value a
* @param {object} b sort value b
* @returns {number} null if there were no nulls/undefineds, otherwise returns
* a sort value that should be passed back from the sort function
*
*/
self.api.registerMethod( 'core', 'sortHandleNulls', rowSorter.handleNulls );
/**
* @ngdoc function
* @name sortChanged
* @methodOf ui.grid.core.api:PublicApi
* @description The sort criteria on one or more columns has
* changed. Provides as parameters the grid and the output of
* getColumnSorting, which is an array of gridColumns
* that have sorting on them, sorted in priority order.
*
* @param {Grid} grid the grid
* @param {array} sortColumns an array of columns with
* sorts on them, in priority order
*
* @example
* <pre>
* gridApi.core.on.sortChanged( grid, sortColumns );
* </pre>
*/
self.api.registerEvent( 'core', 'sortChanged' );
/**
* @ngdoc function
* @name columnVisibilityChanged
* @methodOf ui.grid.core.api:PublicApi
* @description The visibility of a column has changed,
* the column itself is passed out as a parameter of the event.
*
* @param {GridCol} column the column that changed
*
* @example
* <pre>
* gridApi.core.on.columnVisibilityChanged( $scope, function (column) {
* // do something
* } );
* </pre>
*/
self.api.registerEvent( 'core', 'columnVisibilityChanged' );
/**
* @ngdoc method
* @name notifyDataChange
* @methodOf ui.grid.core.api:PublicApi
* @description Notify the grid that a data or config change has occurred,
* where that change isn't something the grid was otherwise noticing. This
* might be particularly relevant where you've changed values within the data
* and you'd like cell classes to be re-evaluated, or changed config within
* the columnDef and you'd like headerCellClasses to be re-evaluated.
* @param {string} type one of the
* uiGridConstants.dataChange values (ALL, ROW, EDIT, COLUMN), which tells
* us which refreshes to fire.
*
*/
self.api.registerMethod( 'core', 'notifyDataChange', this.notifyDataChange );
/**
* @ngdoc method
* @name clearAllFilters
* @methodOf ui.grid.core.api:PublicApi
* @description Clears all filters and optionally refreshes the visible rows.
* @params {object} refreshRows Defaults to true.
* @params {object} clearConditions Defaults to false.
* @params {object} clearFlags Defaults to false.
* @returns {promise} If `refreshRows` is true, returns a promise of the rows refreshing.
*/
self.api.registerMethod('core', 'clearAllFilters', this.clearAllFilters);
self.registerDataChangeCallback( self.columnRefreshCallback, [uiGridConstants.dataChange.COLUMN]);
self.registerDataChangeCallback( self.processRowsCallback, [uiGridConstants.dataChange.EDIT]);
self.registerStyleComputation({
priority: 10,
func: self.getFooterStyles
});
};
Grid.prototype.calcFooterHeight = function () {
if (!this.hasFooter()) {
return 0;
}
var height = 0;
if (this.options.showGridFooter) {
height += this.options.gridFooterHeight;
}
height += this.calcColumnFooterHeight();
return height;
};
Grid.prototype.calcColumnFooterHeight = function () {
var height = 0;
if (this.options.showColumnFooter) {
height += this.options.columnFooterHeight;
}
return height;
};
Grid.prototype.getFooterStyles = function () {
var style = '.grid' + this.id + ' .ui-grid-footer-aggregates-row { height: ' + this.options.columnFooterHeight + 'px; }';
style += ' .grid' + this.id + ' .ui-grid-footer-info { height: ' + this.options.gridFooterHeight + 'px; }';
return style;
};
Grid.prototype.hasFooter = function () {
return this.options.showGridFooter || this.options.showColumnFooter;
};
/**
* @ngdoc function
* @name isRTL
* @methodOf ui.grid.class:Grid
* @description Returns true if grid is RightToLeft
*/
Grid.prototype.isRTL = function () {
return this.rtl;
};
/**
* @ngdoc function
* @name registerColumnBuilder
* @methodOf ui.grid.class:Grid
* @description When the build creates columns from column definitions, the columnbuilders will be called to add
* additional properties to the column.
* @param {function(colDef, col, gridOptions)} columnBuilder function to be called
*/
Grid.prototype.registerColumnBuilder = function registerColumnBuilder(columnBuilder) {
this.columnBuilders.push(columnBuilder);
};
/**
* @ngdoc function
* @name buildColumnDefsFromData
* @methodOf ui.grid.class:Grid
* @description Populates columnDefs from the provided data
* @param {function(colDef, col, gridOptions)} rowBuilder function to be called
*/
Grid.prototype.buildColumnDefsFromData = function (dataRows){
this.options.columnDefs = gridUtil.getColumnsFromData(dataRows, this.options.excludeProperties);
};
/**
* @ngdoc function
* @name registerRowBuilder
* @methodOf ui.grid.class:Grid
* @description When the build creates rows from gridOptions.data, the rowBuilders will be called to add
* additional properties to the row.
* @param {function(row, gridOptions)} rowBuilder function to be called
*/
Grid.prototype.registerRowBuilder = function registerRowBuilder(rowBuilder) {
this.rowBuilders.push(rowBuilder);
};
/**
* @ngdoc function
* @name registerDataChangeCallback
* @methodOf ui.grid.class:Grid
* @description When a data change occurs, the data change callbacks of the specified type
* will be called. The rules are:
*
* - when the data watch fires, that is considered a ROW change (the data watch only notices
* added or removed rows)
* - when the api is called to inform us of a change, the declared type of that change is used
* - when a cell edit completes, the EDIT callbacks are triggered
* - when the columnDef watch fires, the COLUMN callbacks are triggered
* - when the options watch fires, the OPTIONS callbacks are triggered
*
* For a given event:
* - ALL calls ROW, EDIT, COLUMN, OPTIONS and ALL callbacks
* - ROW calls ROW and ALL callbacks
* - EDIT calls EDIT and ALL callbacks
* - COLUMN calls COLUMN and ALL callbacks
* - OPTIONS calls OPTIONS and ALL callbacks
*
* @param {function(grid)} callback function to be called
* @param {array} types the types of data change you want to be informed of. Values from
* the uiGridConstants.dataChange values ( ALL, EDIT, ROW, COLUMN, OPTIONS ). Optional and defaults to
* ALL
* @returns {function} deregister function - a function that can be called to deregister this callback
*/
Grid.prototype.registerDataChangeCallback = function registerDataChangeCallback(callback, types, _this) {
var uid = gridUtil.nextUid();
if ( !types ){
types = [uiGridConstants.dataChange.ALL];
}
if ( !Array.isArray(types)){
gridUtil.logError("Expected types to be an array or null in registerDataChangeCallback, value passed was: " + types );
}
this.dataChangeCallbacks[uid] = { callback: callback, types: types, _this:_this };
var self = this;
var deregisterFunction = function() {
delete self.dataChangeCallbacks[uid];
};
return deregisterFunction;
};
/**
* @ngdoc function
* @name callDataChangeCallbacks
* @methodOf ui.grid.class:Grid
* @description Calls the callbacks based on the type of data change that
* has occurred. Always calls the ALL callbacks, calls the ROW, EDIT, COLUMN and OPTIONS callbacks if the
* event type is matching, or if the type is ALL.
* @param {number} type the type of event that occurred - one of the
* uiGridConstants.dataChange values (ALL, ROW, EDIT, COLUMN, OPTIONS)
*/
Grid.prototype.callDataChangeCallbacks = function callDataChangeCallbacks(type, options) {
angular.forEach( this.dataChangeCallbacks, function( callback, uid ){
if ( callback.types.indexOf( uiGridConstants.dataChange.ALL ) !== -1 ||
callback.types.indexOf( type ) !== -1 ||
type === uiGridConstants.dataChange.ALL ) {
if (callback._this) {
callback.callback.apply(callback._this,this);
}
else {
callback.callback( this );
}
}
}, this);
};
/**
* @ngdoc function
* @name notifyDataChange
* @methodOf ui.grid.class:Grid
* @description Notifies us that a data change has occurred, used in the public
* api for users to tell us when they've changed data or some other event that
* our watches cannot pick up
* @param {string} type the type of event that occurred - one of the
* uiGridConstants.dataChange values (ALL, ROW, EDIT, COLUMN)
*/
Grid.prototype.notifyDataChange = function notifyDataChange(type) {
var constants = uiGridConstants.dataChange;
if ( type === constants.ALL ||
type === constants.COLUMN ||
type === constants.EDIT ||
type === constants.ROW ||
type === constants.OPTIONS ){
this.callDataChangeCallbacks( type );
} else {
gridUtil.logError("Notified of a data change, but the type was not recognised, so no action taken, type was: " + type);
}
};
/**
* @ngdoc function
* @name columnRefreshCallback
* @methodOf ui.grid.class:Grid
* @description refreshes the grid when a column refresh
* is notified, which triggers handling of the visible flag.
* This is called on uiGridConstants.dataChange.COLUMN, and is
* registered as a dataChangeCallback in grid.js
* @param {string} name column name
*/
Grid.prototype.columnRefreshCallback = function columnRefreshCallback( grid ){
grid.buildColumns();
grid.queueGridRefresh();
};
/**
* @ngdoc function
* @name processRowsCallback
* @methodOf ui.grid.class:Grid
* @description calls the row processors, specifically
* intended to reset the sorting when an edit is called,
* registered as a dataChangeCallback on uiGridConstants.dataChange.EDIT
* @param {string} name column name
*/
Grid.prototype.processRowsCallback = function processRowsCallback( grid ){
grid.queueGridRefresh();
};
/**
* @ngdoc function
* @name getColumn
* @methodOf ui.grid.class:Grid
* @description returns a grid column for the column name
* @param {string} name column name
*/
Grid.prototype.getColumn = function getColumn(name) {
var columns = this.columns.filter(function (column) {
return column.colDef.name === name;
});
return columns.length > 0 ? columns[0] : null;
};
/**
* @ngdoc function
* @name getColDef
* @methodOf ui.grid.class:Grid
* @description returns a grid colDef for the column name
* @param {string} name column.field
*/
Grid.prototype.getColDef = function getColDef(name) {
var colDefs = this.options.columnDefs.filter(function (colDef) {
return colDef.name === name;
});
return colDefs.length > 0 ? colDefs[0] : null;
};
/**
* @ngdoc function
* @name assignTypes
* @methodOf ui.grid.class:Grid
* @description uses the first row of data to assign colDef.type for any types not defined.
*/
/**
* @ngdoc property
* @name type
* @propertyOf ui.grid.class:GridOptions.columnDef
* @description the type of the column, used in sorting. If not provided then the
* grid will guess the type. Add this only if the grid guessing is not to your
* satisfaction. One of:
* - 'string'
* - 'boolean'
* - 'number'
* - 'date'
* - 'object'
* - 'numberStr'
* Note that if you choose date, your dates should be in a javascript date type
*
*/
Grid.prototype.assignTypes = function(){
var self = this;
self.options.columnDefs.forEach(function (colDef, index) {
//Assign colDef type if not specified
if (!colDef.type) {
var col = new GridColumn(colDef, index, self);
var firstRow = self.rows.length > 0 ? self.rows[0] : null;
if (firstRow) {
colDef.type = gridUtil.guessType(self.getCellValue(firstRow, col));
}
else {
gridUtil.logWarn('Unable to assign type from data, so defaulting to string');
colDef.type = 'string';
}
}
});
};
/**
* @ngdoc function
* @name isRowHeaderColumn
* @methodOf ui.grid.class:Grid
* @description returns true if the column is a row Header
* @param {object} column column
*/
Grid.prototype.isRowHeaderColumn = function isRowHeaderColumn(column) {
return this.rowHeaderColumns.indexOf(column) !== -1;
};
/**
* @ngdoc function
* @name addRowHeaderColumn
* @methodOf ui.grid.class:Grid
* @description adds a row header column to the grid
* @param {object} column def
*/
Grid.prototype.addRowHeaderColumn = function addRowHeaderColumn(colDef) {
var self = this;
var rowHeaderCol = new GridColumn(colDef, gridUtil.nextUid(), self);
rowHeaderCol.isRowHeader = true;
if (self.isRTL()) {
self.createRightContainer();
rowHeaderCol.renderContainer = 'right';
}
else {
self.createLeftContainer();
rowHeaderCol.renderContainer = 'left';
}
// relies on the default column builder being first in array, as it is instantiated
// as part of grid creation
self.columnBuilders[0](colDef,rowHeaderCol,self.options)
.then(function(){
rowHeaderCol.enableFiltering = false;
rowHeaderCol.enableSorting = false;
rowHeaderCol.enableHiding = false;
self.rowHeaderColumns.push(rowHeaderCol);
self.buildColumns()
.then( function() {
self.preCompileCellTemplates();
self.queueGridRefresh();
});
});
};
/**
* @ngdoc function
* @name getOnlyDataColumns
* @methodOf ui.grid.class:Grid
* @description returns all columns except for rowHeader columns
*/
Grid.prototype.getOnlyDataColumns = function getOnlyDataColumns() {
var self = this;
var cols = [];
self.columns.forEach(function (col) {
if (self.rowHeaderColumns.indexOf(col) === -1) {
cols.push(col);
}
});
return cols;
};
/**
* @ngdoc function
* @name buildColumns
* @methodOf ui.grid.class:Grid
* @description creates GridColumn objects from the columnDefinition. Calls each registered
* columnBuilder to further process the column
* @param {object} options An object contains options to use when building columns
*
* * **orderByColumnDefs**: defaults to **false**. When true, `buildColumns` will reorder existing columns according to the order within the column definitions.
*
* @returns {Promise} a promise to load any needed column resources
*/
Grid.prototype.buildColumns = function buildColumns(opts) {
var options = {
orderByColumnDefs: false
};
angular.extend(options, opts);
// gridUtil.logDebug('buildColumns');
var self = this;
var builderPromises = [];
var headerOffset = self.rowHeaderColumns.length;
var i;
// Remove any columns for which a columnDef cannot be found
// Deliberately don't use forEach, as it doesn't like splice being called in the middle
// Also don't cache columns.length, as it will change during this operation
for (i = 0; i < self.columns.length; i++){
if (!self.getColDef(self.columns[i].name)) {
self.columns.splice(i, 1);
i--;
}
}
//add row header columns to the grid columns array _after_ columns without columnDefs have been removed
self.rowHeaderColumns.forEach(function (rowHeaderColumn) {
self.columns.unshift(rowHeaderColumn);
});
// look at each column def, and update column properties to match. If the column def
// doesn't have a column, then splice in a new gridCol
self.options.columnDefs.forEach(function (colDef, index) {
self.preprocessColDef(colDef);
var col = self.getColumn(colDef.name);
if (!col) {
col = new GridColumn(colDef, gridUtil.nextUid(), self);
self.columns.splice(index + headerOffset, 0, col);
}
else {
// tell updateColumnDef that the column was pre-existing
col.updateColumnDef(colDef, false);
}
self.columnBuilders.forEach(function (builder) {
builderPromises.push(builder.call(self, colDef, col, self.options));
});
});
/*** Reorder columns if necessary ***/
if (!!options.orderByColumnDefs) {
// Create a shallow copy of the columns as a cache
var columnCache = self.columns.slice(0);
// We need to allow for the "row headers" when mapping from the column defs array to the columns array
// If we have a row header in columns[0] and don't account for it we'll overwrite it with the column in columnDefs[0]
// Go through all the column defs, use the shorter of columns length and colDefs.length because if a user has given two columns the same name then
// columns will be shorter than columnDefs. In this situation we'll avoid an error, but the user will still get an unexpected result
var len = Math.min(self.options.columnDefs.length, self.columns.length);
for (i = 0; i < len; i++) {
// If the column at this index has a different name than the column at the same index in the column defs...
if (self.columns[i + headerOffset].name !== self.options.columnDefs[i].name) {
// Replace the one in the cache with the appropriate column
columnCache[i + headerOffset] = self.getColumn(self.options.columnDefs[i].name);
}
else {
// Otherwise just copy over the one from the initial columns
columnCache[i + headerOffset] = self.columns[i + headerOffset];
}
}
// Empty out the columns array, non-destructively
self.columns.length = 0;
// And splice in the updated, ordered columns from the cache
Array.prototype.splice.apply(self.columns, [0, 0].concat(columnCache));
}
return $q.all(builderPromises).then(function(){
if (self.rows.length > 0){
self.assignTypes();
}
});
};
/**
* @ngdoc function
* @name preCompileCellTemplates
* @methodOf ui.grid.class:Grid
* @description precompiles all cell templates
*/
Grid.prototype.preCompileCellTemplates = function() {
var self = this;
var preCompileTemplate = function( col ) {
var html = col.cellTemplate.replace(uiGridConstants.MODEL_COL_FIELD, self.getQualifiedColField(col));
html = html.replace(uiGridConstants.COL_FIELD, 'grid.getCellValue(row, col)');
var compiledElementFn = $compile(html);
col.compiledElementFn = compiledElementFn;
if (col.compiledElementFnDefer) {
col.compiledElementFnDefer.resolve(col.compiledElementFn);
}
};
this.columns.forEach(function (col) {
if ( col.cellTemplate ){
preCompileTemplate( col );
} else if ( col.cellTemplatePromise ){
col.cellTemplatePromise.then( function() {
preCompileTemplate( col );
});
}
});
};
/**
* @ngdoc function
* @name getGridQualifiedColField
* @methodOf ui.grid.class:Grid
* @description Returns the $parse-able accessor for a column within its $scope
* @param {GridColumn} col col object
*/
Grid.prototype.getQualifiedColField = function (col) {
return 'row.entity.' + gridUtil.preEval(col.field);
};
/**
* @ngdoc function
* @name createLeftContainer
* @methodOf ui.grid.class:Grid
* @description creates the left render container if it doesn't already exist
*/
Grid.prototype.createLeftContainer = function() {
if (!this.hasLeftContainer()) {
this.renderContainers.left = new GridRenderContainer('left', this, { disableColumnOffset: true });
}
};
/**
* @ngdoc function
* @name createRightContainer
* @methodOf ui.grid.class:Grid
* @description creates the right render container if it doesn't already exist
*/
Grid.prototype.createRightContainer = function() {
if (!this.hasRightContainer()) {
this.renderContainers.right = new GridRenderContainer('right', this, { disableColumnOffset: true });
}
};
/**
* @ngdoc function
* @name hasLeftContainer
* @methodOf ui.grid.class:Grid
* @description returns true if leftContainer exists
*/
Grid.prototype.hasLeftContainer = function() {
return this.renderContainers.left !== undefined;
};
/**
* @ngdoc function
* @name hasLeftContainer
* @methodOf ui.grid.class:Grid
* @description returns true if rightContainer exists
*/
Grid.prototype.hasRightContainer = function() {
return this.renderContainers.right !== undefined;
};
/**
* undocumented function
* @name preprocessColDef
* @methodOf ui.grid.class:Grid
* @description defaults the name property from field to maintain backwards compatibility with 2.x
* validates that name or field is present
*/
Grid.prototype.preprocessColDef = function preprocessColDef(colDef) {
var self = this;
if (!colDef.field && !colDef.name) {
throw new Error('colDef.name or colDef.field property is required');
}
//maintain backwards compatibility with 2.x
//field was required in 2.x. now name is required
if (colDef.name === undefined && colDef.field !== undefined) {
// See if the column name already exists:
var newName = colDef.field,
counter = 2;
while (self.getColumn(newName)) {
newName = colDef.field + counter.toString();
counter++;
}
colDef.name = newName;
}
};
// Return a list of items that exist in the `n` array but not the `o` array. Uses optional property accessors passed as third & fourth parameters
Grid.prototype.newInN = function newInN(o, n, oAccessor, nAccessor) {
var self = this;
var t = [];
for (var i = 0; i < n.length; i++) {
var nV = nAccessor ? n[i][nAccessor] : n[i];
var found = false;
for (var j = 0; j < o.length; j++) {
var oV = oAccessor ? o[j][oAccessor] : o[j];
if (self.options.rowEquality(nV, oV)) {
found = true;
break;
}
}
if (!found) {
t.push(nV);
}
}
return t;
};
/**
* @ngdoc function
* @name getRow
* @methodOf ui.grid.class:Grid
* @description returns the GridRow that contains the rowEntity
* @param {object} rowEntity the gridOptions.data array element instance
* @param {array} rows [optional] the rows to look in - if not provided then
* looks in grid.rows
*/
Grid.prototype.getRow = function getRow(rowEntity, lookInRows) {
var self = this;
lookInRows = typeof(lookInRows) === 'undefined' ? self.rows : lookInRows;
var rows = lookInRows.filter(function (row) {
return self.options.rowEquality(row.entity, rowEntity);
});
return rows.length > 0 ? rows[0] : null;
};
/**
* @ngdoc function
* @name modifyRows
* @methodOf ui.grid.class:Grid
* @description creates or removes GridRow objects from the newRawData array. Calls each registered
* rowBuilder to further process the row
*
* This method aims to achieve three things:
* 1. the resulting rows array is in the same order as the newRawData, we'll call
* rowsProcessors immediately after to sort the data anyway
* 2. if we have row hashing available, we try to use the rowHash to find the row
* 3. no memory leaks - rows that are no longer in newRawData need to be garbage collected
*
* The basic logic flow makes use of the newRawData, oldRows and oldHash, and creates
* the newRows and newHash
*
* ```
* newRawData.forEach newEntity
* if (hashing enabled)
* check oldHash for newEntity
* else
* look for old row directly in oldRows
* if !oldRowFound // must be a new row
* create newRow
* append to the newRows and add to newHash
* run the processors
*
* Rows are identified using the hashKey if configured. If not configured, then rows
* are identified using the gridOptions.rowEquality function
*/
Grid.prototype.modifyRows = function modifyRows(newRawData) {
var self = this;
var oldRows = self.rows.slice(0);
var oldRowHash = self.rowHashMap || self.createRowHashMap();
self.rowHashMap = self.createRowHashMap();
self.rows.length = 0;
newRawData.forEach( function( newEntity, i ) {
var newRow;
if ( self.options.enableRowHashing ){
// if hashing is enabled, then this row will be in the hash if we already know about it
newRow = oldRowHash.get( newEntity );
} else {
// otherwise, manually search the oldRows to see if we can find this row
newRow = self.getRow(newEntity, oldRows);
}
// if we didn't find the row, it must be new, so create it
if ( !newRow ){
newRow = self.processRowBuilders(new GridRow(newEntity, i, self));
}
self.rows.push( newRow );
self.rowHashMap.put( newEntity, newRow );
});
self.assignTypes();
var p1 = $q.when(self.processRowsProcessors(self.rows))
.then(function (renderableRows) {
return self.setVisibleRows(renderableRows);
});
var p2 = $q.when(self.processColumnsProcessors(self.columns))
.then(function (renderableColumns) {
return self.setVisibleColumns(renderableColumns);
});
return $q.all([p1, p2]);
};
/**
* Private Undocumented Method
* @name addRows
* @methodOf ui.grid.class:Grid
* @description adds the newRawData array of rows to the grid and calls all registered
* rowBuilders. this keyword will reference the grid
*/
Grid.prototype.addRows = function addRows(newRawData) {
var self = this;
var existingRowCount = self.rows.length;
for (var i = 0; i < newRawData.length; i++) {
var newRow = self.processRowBuilders(new GridRow(newRawData[i], i + existingRowCount, self));
if (self.options.enableRowHashing) {
var found = self.rowHashMap.get(newRow.entity);
if (found) {
found.row = newRow;
}
}
self.rows.push(newRow);
}
};
/**
* @ngdoc function
* @name processRowBuilders
* @methodOf ui.grid.class:Grid
* @description processes all RowBuilders for the gridRow
* @param {GridRow} gridRow reference to gridRow
* @returns {GridRow} the gridRow with all additional behavior added
*/
Grid.prototype.processRowBuilders = function processRowBuilders(gridRow) {
var self = this;
self.rowBuilders.forEach(function (builder) {
builder.call(self, gridRow, self.options);
});
return gridRow;
};
/**
* @ngdoc function
* @name registerStyleComputation
* @methodOf ui.grid.class:Grid
* @description registered a styleComputation function
*
* If the function returns a value it will be appended into the grid's `<style>` block
* @param {function($scope)} styleComputation function
*/
Grid.prototype.registerStyleComputation = function registerStyleComputation(styleComputationInfo) {
this.styleComputations.push(styleComputationInfo);
};
// NOTE (c0bra): We already have rowBuilders. I think these do exactly the same thing...
// Grid.prototype.registerRowFilter = function(filter) {
// // TODO(c0bra): validate filter?
// this.rowFilters.push(filter);
// };
// Grid.prototype.removeRowFilter = function(filter) {
// var idx = this.rowFilters.indexOf(filter);
// if (typeof(idx) !== 'undefined' && idx !== undefined) {
// this.rowFilters.slice(idx, 1);
// }
// };
// Grid.prototype.processRowFilters = function(rows) {
// var self = this;
// self.rowFilters.forEach(function (filter) {
// filter.call(self, rows);
// });
// };
/**
* @ngdoc function
* @name registerRowsProcessor
* @methodOf ui.grid.class:Grid
* @description
*
* Register a "rows processor" function. When the rows are updated,
* the grid calls each registered "rows processor", which has a chance
* to alter the set of rows (sorting, etc) as long as the count is not
* modified.
*
* @param {function(renderedRowsToProcess, columns )} processorFunction rows processor function, which
* is run in the context of the grid (i.e. this for the function will be the grid), and must
* return the updated rows list, which is passed to the next processor in the chain
* @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room
* for other people to inject rows processors at intermediate priorities. Lower priority rowsProcessors run earlier.
*
* At present all rows visible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last)
*
*/
Grid.prototype.registerRowsProcessor = function registerRowsProcessor(processor, priority) {
if (!angular.isFunction(processor)) {
throw 'Attempt to register non-function rows processor: ' + processor;
}
this.rowsProcessors.push({processor: processor, priority: priority});
this.rowsProcessors.sort(function sortByPriority( a, b ){
return a.priority - b.priority;
});
};
/**
* @ngdoc function
* @name removeRowsProcessor
* @methodOf ui.grid.class:Grid
* @param {function(renderableRows)} rows processor function
* @description Remove a registered rows processor
*/
Grid.prototype.removeRowsProcessor = function removeRowsProcessor(processor) {
var idx = -1;
this.rowsProcessors.forEach(function(rowsProcessor, index){
if ( rowsProcessor.processor === processor ){
idx = index;
}
});
if ( idx !== -1 ) {
this.rowsProcessors.splice(idx, 1);
}
};
/**
* Private Undocumented Method
* @name processRowsProcessors
* @methodOf ui.grid.class:Grid
* @param {Array[GridRow]} The array of "renderable" rows
* @param {Array[GridColumn]} The array of columns
* @description Run all the registered rows processors on the array of renderable rows
*/
Grid.prototype.processRowsProcessors = function processRowsProcessors(renderableRows) {
var self = this;
// Create a shallow copy of the rows so that we can safely sort them without altering the original grid.rows sort order
var myRenderableRows = renderableRows.slice(0);
// Return myRenderableRows with no processing if we have no rows processors
if (self.rowsProcessors.length === 0) {
return $q.when(myRenderableRows);
}
// Counter for iterating through rows processors
var i = 0;
// Promise for when we're done with all the processors
var finished = $q.defer();
// This function will call the processor in self.rowsProcessors at index 'i', and then
// when done will call the next processor in the list, using the output from the processor
// at i as the argument for 'renderedRowsToProcess' on the next iteration.
//
// If we're at the end of the list of processors, we resolve our 'finished' callback with
// the result.
function startProcessor(i, renderedRowsToProcess) {
// Get the processor at 'i'
var processor = self.rowsProcessors[i].processor;
// Call the processor, passing in the rows to process and the current columns
// (note: it's wrapped in $q.when() in case the processor does not return a promise)
return $q.when( processor.call(self, renderedRowsToProcess, self.columns) )
.then(function handleProcessedRows(processedRows) {
// Check for errors
if (!processedRows) {
throw "Processor at index " + i + " did not return a set of renderable rows";
}
if (!angular.isArray(processedRows)) {
throw "Processor at index " + i + " did not return an array";
}
// Processor is done, increment the counter
i++;
// If we're not done with the processors, call the next one
if (i <= self.rowsProcessors.length - 1) {
return startProcessor(i, processedRows);
}
// We're done! Resolve the 'finished' promise
else {
finished.resolve(processedRows);
}
});
}
// Start on the first processor
startProcessor(0, myRenderableRows);
return finished.promise;
};
Grid.prototype.setVisibleRows = function setVisibleRows(rows) {
var self = this;
// Reset all the render container row caches
for (var i in self.renderContainers) {
var container = self.renderContainers[i];
container.canvasHeightShouldUpdate = true;
if ( typeof(container.visibleRowCache) === 'undefined' ){
container.visibleRowCache = [];
} else {
container.visibleRowCache.length = 0;
}
}
// rows.forEach(function (row) {
for (var ri = 0; ri < rows.length; ri++) {
var row = rows[ri];
var targetContainer = (typeof(row.renderContainer) !== 'undefined' && row.renderContainer) ? row.renderContainer : 'body';
// If the row is visible
if (row.visible) {
self.renderContainers[targetContainer].visibleRowCache.push(row);
}
}
self.api.core.raise.rowsRendered(this.api);
};
/**
* @ngdoc function
* @name registerColumnsProcessor
* @methodOf ui.grid.class:Grid
* @param {function(renderedColumnsToProcess, rows)} columnProcessor column processor function, which
* is run in the context of the grid (i.e. this for the function will be the grid), and
* which must return an updated renderedColumnsToProcess which can be passed to the next processor
* in the chain
* @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room
* for other people to inject columns processors at intermediate priorities. Lower priority columnsProcessors run earlier.
*
* At present all rows visible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last)
* @description
Register a "columns processor" function. When the columns are updated,
the grid calls each registered "columns processor", which has a chance
to alter the set of columns, as long as the count is not modified.
*/
Grid.prototype.registerColumnsProcessor = function registerColumnsProcessor(processor, priority) {
if (!angular.isFunction(processor)) {
throw 'Attempt to register non-function rows processor: ' + processor;
}
this.columnsProcessors.push({processor: processor, priority: priority});
this.columnsProcessors.sort(function sortByPriority( a, b ){
return a.priority - b.priority;
});
};
Grid.prototype.removeColumnsProcessor = function removeColumnsProcessor(processor) {
var idx = this.columnsProcessors.indexOf(processor);
if (typeof(idx) !== 'undefined' && idx !== undefined) {
this.columnsProcessors.splice(idx, 1);
}
};
Grid.prototype.processColumnsProcessors = function processColumnsProcessors(renderableColumns) {
var self = this;
// Create a shallow copy of the rows so that we can safely sort them without altering the original grid.rows sort order
var myRenderableColumns = renderableColumns.slice(0);
// Return myRenderableRows with no processing if we have no rows processors
if (self.columnsProcessors.length === 0) {
return $q.when(myRenderableColumns);
}
// Counter for iterating through rows processors
var i = 0;
// Promise for when we're done with all the processors
var finished = $q.defer();
// This function will call the processor in self.rowsProcessors at index 'i', and then
// when done will call the next processor in the list, using the output from the processor
// at i as the argument for 'renderedRowsToProcess' on the next iteration.
//
// If we're at the end of the list of processors, we resolve our 'finished' callback with
// the result.
function startProcessor(i, renderedColumnsToProcess) {
// Get the processor at 'i'
var processor = self.columnsProcessors[i].processor;
// Call the processor, passing in the rows to process and the current columns
// (note: it's wrapped in $q.when() in case the processor does not return a promise)
return $q.when( processor.call(self, renderedColumnsToProcess, self.rows) )
.then(function handleProcessedRows(processedColumns) {
// Check for errors
if (!processedColumns) {
throw "Processor at index " + i + " did not return a set of renderable rows";
}
if (!angular.isArray(processedColumns)) {
throw "Processor at index " + i + " did not return an array";
}
// Processor is done, increment the counter
i++;
// If we're not done with the processors, call the next one
if (i <= self.columnsProcessors.length - 1) {
return startProcessor(i, myRenderableColumns);
}
// We're done! Resolve the 'finished' promise
else {
finished.resolve(myRenderableColumns);
}
});
}
// Start on the first processor
startProcessor(0, myRenderableColumns);
return finished.promise;
};
Grid.prototype.setVisibleColumns = function setVisibleColumns(columns) {
// gridUtil.logDebug('setVisibleColumns');
var self = this;
// Reset all the render container row caches
for (var i in self.renderContainers) {
var container = self.renderContainers[i];
container.visibleColumnCache.length = 0;
}
for (var ci = 0; ci < columns.length; ci++) {
var column = columns[ci];
// If the column is visible
if (column.visible) {
// If the column has a container specified
if (typeof(column.renderContainer) !== 'undefined' && column.renderContainer) {
self.renderContainers[column.renderContainer].visibleColumnCache.push(column);
}
// If not, put it into the body container
else {
self.renderContainers.body.visibleColumnCache.push(column);
}
}
}
};
/**
* @ngdoc function
* @name handleWindowResize
* @methodOf ui.grid.class:Grid
* @description Triggered when the browser window resizes; automatically resizes the grid
*/
Grid.prototype.handleWindowResize = function handleWindowResize($event) {
var self = this;
self.gridWidth = gridUtil.elementWidth(self.element);
self.gridHeight = gridUtil.elementHeight(self.element);
self.queueRefresh();
};
/**
* @ngdoc function
* @name queueRefresh
* @methodOf ui.grid.class:Grid
* @description queues a grid refreshCanvas, a way of debouncing all the refreshes we might otherwise issue
*/
Grid.prototype.queueRefresh = function queueRefresh() {
var self = this;
if (self.refreshCanceller) {
$timeout.cancel(self.refreshCanceller);
}
self.refreshCanceller = $timeout(function () {
self.refreshCanvas(true);
});
self.refreshCanceller.then(function () {
self.refreshCanceller = null;
});
return self.refreshCanceller;
};
/**
* @ngdoc function
* @name queueGridRefresh
* @methodOf ui.grid.class:Grid
* @description queues a grid refresh, a way of debouncing all the refreshes we might otherwise issue
*/
Grid.prototype.queueGridRefresh = function queueGridRefresh() {
var self = this;
if (self.gridRefreshCanceller) {
$timeout.cancel(self.gridRefreshCanceller);
}
self.gridRefreshCanceller = $timeout(function () {
self.refresh(true);
});
self.gridRefreshCanceller.then(function () {
self.gridRefreshCanceller = null;
});
return self.gridRefreshCanceller;
};
/**
* @ngdoc function
* @name updateCanvasHeight
* @methodOf ui.grid.class:Grid
* @description flags all render containers to update their canvas height
*/
Grid.prototype.updateCanvasHeight = function updateCanvasHeight() {
var self = this;
for (var containerId in self.renderContainers) {
if (self.renderContainers.hasOwnProperty(containerId)) {
var container = self.renderContainers[containerId];
container.canvasHeightShouldUpdate = true;
}
}
};
/**
* @ngdoc function
* @name buildStyles
* @methodOf ui.grid.class:Grid
* @description calls each styleComputation function
*/
// TODO: this used to take $scope, but couldn't see that it was used
Grid.prototype.buildStyles = function buildStyles() {
// gridUtil.logDebug('buildStyles');
var self = this;
self.customStyles = '';
self.styleComputations
.sort(function(a, b) {
if (a.priority === null) { return 1; }
if (b.priority === null) { return -1; }
if (a.priority === null && b.priority === null) { return 0; }
return a.priority - b.priority;
})
.forEach(function (compInfo) {
// this used to provide $scope as a second parameter, but I couldn't find any
// style builders that used it, so removed it as part of moving to grid from controller
var ret = compInfo.func.call(self);
if (angular.isString(ret)) {
self.customStyles += '\n' + ret;
}
});
};
Grid.prototype.minColumnsToRender = function minColumnsToRender() {
var self = this;
var viewport = this.getViewportWidth();
var min = 0;
var totalWidth = 0;
self.columns.forEach(function(col, i) {
if (totalWidth < viewport) {
totalWidth += col.drawnWidth;
min++;
}
else {
var currWidth = 0;
for (var j = i; j >= i - min; j--) {
currWidth += self.columns[j].drawnWidth;
}
if (currWidth < viewport) {
min++;
}
}
});
return min;
};
Grid.prototype.getBodyHeight = function getBodyHeight() {
// Start with the viewportHeight
var bodyHeight = this.getViewportHeight();
// Add the horizontal scrollbar height if there is one
//if (typeof(this.horizontalScrollbarHeight) !== 'undefined' && this.horizontalScrollbarHeight !== undefined && this.horizontalScrollbarHeight > 0) {
// bodyHeight = bodyHeight + this.horizontalScrollbarHeight;
//}
return bodyHeight;
};
// NOTE: viewport drawable height is the height of the grid minus the header row height (including any border)
// TODO(c0bra): account for footer height
Grid.prototype.getViewportHeight = function getViewportHeight() {
var self = this;
var viewPortHeight = this.gridHeight - this.headerHeight - this.footerHeight;
// Account for native horizontal scrollbar, if present
//if (typeof(this.horizontalScrollbarHeight) !== 'undefined' && this.horizontalScrollbarHeight !== undefined && this.horizontalScrollbarHeight > 0) {
// viewPortHeight = viewPortHeight - this.horizontalScrollbarHeight;
//}
var adjustment = self.getViewportAdjustment();
viewPortHeight = viewPortHeight + adjustment.height;
//gridUtil.logDebug('viewPortHeight', viewPortHeight);
return viewPortHeight;
};
Grid.prototype.getViewportWidth = function getViewportWidth() {
var self = this;
var viewPortWidth = this.gridWidth;
//if (typeof(this.verticalScrollbarWidth) !== 'undefined' && this.verticalScrollbarWidth !== undefined && this.verticalScrollbarWidth > 0) {
// viewPortWidth = viewPortWidth - this.verticalScrollbarWidth;
//}
var adjustment = self.getViewportAdjustment();
viewPortWidth = viewPortWidth + adjustment.width;
//gridUtil.logDebug('getviewPortWidth', viewPortWidth);
return viewPortWidth;
};
Grid.prototype.getHeaderViewportWidth = function getHeaderViewportWidth() {
var viewPortWidth = this.getViewportWidth();
//if (typeof(this.verticalScrollbarWidth) !== 'undefined' && this.verticalScrollbarWidth !== undefined && this.verticalScrollbarWidth > 0) {
// viewPortWidth = viewPortWidth + this.verticalScrollbarWidth;
//}
return viewPortWidth;
};
Grid.prototype.addVerticalScrollSync = function (containerId, callBackFn) {
this.verticalScrollSyncCallBackFns[containerId] = callBackFn;
};
Grid.prototype.addHorizontalScrollSync = function (containerId, callBackFn) {
this.horizontalScrollSyncCallBackFns[containerId] = callBackFn;
};
/**
* Scroll needed containers by calling their ScrollSyncs
* @param sourceContainerId the containerId that has already set it's top/left.
* can be empty string which means all containers need to set top/left
* @param scrollEvent
*/
Grid.prototype.scrollContainers = function (sourceContainerId, scrollEvent) {
if (scrollEvent.y) {
//default for no container Id (ex. mousewheel means that all containers must set scrollTop/Left)
var verts = ['body','left', 'right'];
this.flagScrollingVertically(scrollEvent);
if (sourceContainerId === 'body') {
verts = ['left', 'right'];
}
else if (sourceContainerId === 'left') {
verts = ['body', 'right'];
}
else if (sourceContainerId === 'right') {
verts = ['body', 'left'];
}
for (var i = 0; i < verts.length; i++) {
var id = verts[i];
if (this.verticalScrollSyncCallBackFns[id]) {
this.verticalScrollSyncCallBackFns[id](scrollEvent);
}
}
}
if (scrollEvent.x) {
//default for no container Id (ex. mousewheel means that all containers must set scrollTop/Left)
var horizs = ['body','bodyheader', 'bodyfooter'];
this.flagScrollingHorizontally(scrollEvent);
if (sourceContainerId === 'body') {
horizs = ['bodyheader', 'bodyfooter'];
}
for (var j = 0; j < horizs.length; j++) {
var idh = horizs[j];
if (this.horizontalScrollSyncCallBackFns[idh]) {
this.horizontalScrollSyncCallBackFns[idh](scrollEvent);
}
}
}
};
Grid.prototype.registerViewportAdjuster = function registerViewportAdjuster(func) {
this.viewportAdjusters.push(func);
};
Grid.prototype.removeViewportAdjuster = function registerViewportAdjuster(func) {
var idx = this.viewportAdjusters.indexOf(func);
if (typeof(idx) !== 'undefined' && idx !== undefined) {
this.viewportAdjusters.splice(idx, 1);
}
};
Grid.prototype.getViewportAdjustment = function getViewportAdjustment() {
var self = this;
var adjustment = { height: 0, width: 0 };
self.viewportAdjusters.forEach(function (func) {
adjustment = func.call(this, adjustment);
});
return adjustment;
};
Grid.prototype.getVisibleRowCount = function getVisibleRowCount() {
// var count = 0;
// this.rows.forEach(function (row) {
// if (row.visible) {
// count++;
// }
// });
// return this.visibleRowCache.length;
return this.renderContainers.body.visibleRowCache.length;
};
Grid.prototype.getVisibleRows = function getVisibleRows() {
return this.renderContainers.body.visibleRowCache;
};
Grid.prototype.getVisibleColumnCount = function getVisibleColumnCount() {
// var count = 0;
// this.rows.forEach(function (row) {
// if (row.visible) {
// count++;
// }
// });
// return this.visibleRowCache.length;
return this.renderContainers.body.visibleColumnCache.length;
};
Grid.prototype.searchRows = function searchRows(renderableRows) {
return rowSearcher.search(this, renderableRows, this.columns);
};
Grid.prototype.sortByColumn = function sortByColumn(renderableRows) {
return rowSorter.sort(this, renderableRows, this.columns);
};
/**
* @ngdoc function
* @name getCellValue
* @methodOf ui.grid.class:Grid
* @description Gets the value of a cell for a particular row and column
* @param {GridRow} row Row to access
* @param {GridColumn} col Column to access
*/
Grid.prototype.getCellValue = function getCellValue(row, col){
if ( typeof(row.entity[ '$$' + col.uid ]) !== 'undefined' ) {
return row.entity[ '$$' + col.uid].rendered;
} else if (this.options.flatEntityAccess && typeof(col.field) !== 'undefined' ){
return row.entity[col.field];
} else {
if (!col.cellValueGetterCache) {
col.cellValueGetterCache = $parse(row.getEntityQualifiedColField(col));
}
return col.cellValueGetterCache(row);
}
};
Grid.prototype.getNextColumnSortPriority = function getNextColumnSortPriority() {
var self = this,
p = 0;
self.columns.forEach(function (col) {
if (col.sort && col.sort.priority && col.sort.priority > p) {
p = col.sort.priority;
}
});
return p + 1;
};
/**
* @ngdoc function
* @name resetColumnSorting
* @methodOf ui.grid.class:Grid
* @description Return the columns that the grid is currently being sorted by
* @param {GridColumn} [excludedColumn] Optional GridColumn to exclude from having its sorting reset
*/
Grid.prototype.resetColumnSorting = function resetColumnSorting(excludeCol) {
var self = this;
self.columns.forEach(function (col) {
if (col !== excludeCol && !col.suppressRemoveSort) {
col.sort = {};
}
});
};
/**
* @ngdoc function
* @name getColumnSorting
* @methodOf ui.grid.class:Grid
* @description Return the columns that the grid is currently being sorted by
* @returns {Array[GridColumn]} An array of GridColumn objects
*/
Grid.prototype.getColumnSorting = function getColumnSorting() {
var self = this;
var sortedCols = [], myCols;
// Iterate through all the columns, sorted by priority
// Make local copy of column list, because sorting is in-place and we do not want to
// change the original sequence of columns
myCols = self.columns.slice(0);
myCols.sort(rowSorter.prioritySort).forEach(function (col) {
if (col.sort && typeof(col.sort.direction) !== 'undefined' && col.sort.direction && (col.sort.direction === uiGridConstants.ASC || col.sort.direction === uiGridConstants.DESC)) {
sortedCols.push(col);
}
});
return sortedCols;
};
/**
* @ngdoc function
* @name sortColumn
* @methodOf ui.grid.class:Grid
* @description Set the sorting on a given column, optionally resetting any existing sorting on the Grid.
* Emits the sortChanged event whenever the sort criteria are changed.
* @param {GridColumn} column Column to set the sorting on
* @param {uiGridConstants.ASC|uiGridConstants.DESC} [direction] Direction to sort by, either descending or ascending.
* If not provided, the column will iterate through the sort directions: ascending, descending, unsorted.
* @param {boolean} [add] Add this column to the sorting. If not provided or set to `false`, the Grid will reset any existing sorting and sort
* by this column only
* @returns {Promise} A resolved promise that supplies the column.
*/
Grid.prototype.sortColumn = function sortColumn(column, directionOrAdd, add) {
var self = this,
direction = null;
if (typeof(column) === 'undefined' || !column) {
throw new Error('No column parameter provided');
}
// Second argument can either be a direction or whether to add this column to the existing sort.
// If it's a boolean, it's an add, otherwise, it's a direction
if (typeof(directionOrAdd) === 'boolean') {
add = directionOrAdd;
}
else {
direction = directionOrAdd;
}
if (!add) {
self.resetColumnSorting(column);
column.sort.priority = 0;
// Get the actual priority since there may be columns which have suppressRemoveSort set
column.sort.priority = self.getNextColumnSortPriority();
}
else if (!column.sort.priority){
column.sort.priority = self.getNextColumnSortPriority();
}
if (!direction) {
// Figure out the sort direction
if (column.sort.direction && column.sort.direction === uiGridConstants.ASC) {
column.sort.direction = uiGridConstants.DESC;
}
else if (column.sort.direction && column.sort.direction === uiGridConstants.DESC) {
if ( column.colDef && column.suppressRemoveSort ){
column.sort.direction = uiGridConstants.ASC;
} else {
column.sort = {};
}
}
else {
column.sort.direction = uiGridConstants.ASC;
}
}
else {
column.sort.direction = direction;
}
self.api.core.raise.sortChanged( self, self.getColumnSorting() );
return $q.when(column);
};
/**
* communicate to outside world that we are done with initial rendering
*/
Grid.prototype.renderingComplete = function(){
if (angular.isFunction(this.options.onRegisterApi)) {
this.options.onRegisterApi(this.api);
}
this.api.core.raise.renderingComplete( this.api );
};
Grid.prototype.createRowHashMap = function createRowHashMap() {
var self = this;
var hashMap = new RowHashMap();
hashMap.grid = self;
return hashMap;
};
/**
* @ngdoc function
* @name refresh
* @methodOf ui.grid.class:Grid
* @description Refresh the rendered grid on screen.
* @params {boolean} [rowsAltered] Optional flag for refreshing when the number of rows has changed.
*/
Grid.prototype.refresh = function refresh(rowsAltered) {
var self = this;
var p1 = self.processRowsProcessors(self.rows).then(function (renderableRows) {
self.setVisibleRows(renderableRows);
});
var p2 = self.processColumnsProcessors(self.columns).then(function (renderableColumns) {
self.setVisibleColumns(renderableColumns);
});
return $q.all([p1, p2]).then(function () {
self.redrawInPlace(rowsAltered);
self.refreshCanvas(true);
});
};
/**
* @ngdoc function
* @name refreshRows
* @methodOf ui.grid.class:Grid
* @description Refresh the rendered rows on screen? Note: not functional at present
* @returns {promise} promise that is resolved when render completes?
*
*/
Grid.prototype.refreshRows = function refreshRows() {
var self = this;
return self.processRowsProcessors(self.rows)
.then(function (renderableRows) {
self.setVisibleRows(renderableRows);
self.redrawInPlace();
self.refreshCanvas( true );
});
};
/**
* @ngdoc function
* @name refreshCanvas
* @methodOf ui.grid.class:Grid
* @description Builds all styles and recalculates much of the grid sizing
* @params {object} buildStyles optional parameter. Use TBD
* @returns {promise} promise that is resolved when the canvas
* has been refreshed
*
*/
Grid.prototype.refreshCanvas = function(buildStyles) {
var self = this;
if (buildStyles) {
self.buildStyles();
}
var p = $q.defer();
// Get all the header heights
var containerHeadersToRecalc = [];
for (var containerId in self.renderContainers) {
if (self.renderContainers.hasOwnProperty(containerId)) {
var container = self.renderContainers[containerId];
// Skip containers that have no canvasWidth set yet
if (container.canvasWidth === null || isNaN(container.canvasWidth)) {
continue;
}
if (container.header || container.headerCanvas) {
container.explicitHeaderHeight = container.explicitHeaderHeight || null;
container.explicitHeaderCanvasHeight = container.explicitHeaderCanvasHeight || null;
containerHeadersToRecalc.push(container);
}
}
}
/*
*
* Here we loop through the headers, measuring each element as well as any header "canvas" it has within it.
*
* If any header is less than the largest header height, it will be resized to that so that we don't have headers
* with different heights, which looks like a rendering problem
*
* We'll do the same thing with the header canvases, and give the header CELLS an explicit height if their canvas
* is smaller than the largest canvas height. That was header cells without extra controls like filtering don't
* appear shorter than other cells.
*
*/
if (containerHeadersToRecalc.length > 0) {
// Build the styles without the explicit header heights
if (buildStyles) {
self.buildStyles();
}
// Putting in a timeout as it's not calculating after the grid element is rendered and filled out
$timeout(function() {
// var oldHeaderHeight = self.grid.headerHeight;
// self.grid.headerHeight = gridUtil.outerElementHeight(self.header);
var rebuildStyles = false;
// Get all the header heights
var maxHeaderHeight = 0;
var maxHeaderCanvasHeight = 0;
var i, container;
var getHeight = function(oldVal, newVal){
if ( oldVal !== newVal){
rebuildStyles = true;
}
return newVal;
};
for (i = 0; i < containerHeadersToRecalc.length; i++) {
container = containerHeadersToRecalc[i];
// Skip containers that have no canvasWidth set yet
if (container.canvasWidth === null || isNaN(container.canvasWidth)) {
continue;
}
if (container.header) {
var headerHeight = container.headerHeight = getHeight(container.headerHeight, parseInt(gridUtil.outerElementHeight(container.header), 10));
// Get the "inner" header height, that is the height minus the top and bottom borders, if present. We'll use it to make sure all the headers have a consistent height
var topBorder = gridUtil.getBorderSize(container.header, 'top');
var bottomBorder = gridUtil.getBorderSize(container.header, 'bottom');
var innerHeaderHeight = parseInt(headerHeight - topBorder - bottomBorder, 10);
innerHeaderHeight = innerHeaderHeight < 0 ? 0 : innerHeaderHeight;
container.innerHeaderHeight = innerHeaderHeight;
// If the header doesn't have an explicit height set, save the largest header height for use later
// Explicit header heights are based off of the max we are calculating here. We never want to base the max on something we're setting explicitly
if (!container.explicitHeaderHeight && innerHeaderHeight > maxHeaderHeight) {
maxHeaderHeight = innerHeaderHeight;
}
}
if (container.headerCanvas) {
var headerCanvasHeight = container.headerCanvasHeight = getHeight(container.headerCanvasHeight, parseInt(gridUtil.outerElementHeight(container.headerCanvas), 10));
// If the header doesn't have an explicit canvas height, save the largest header canvas height for use later
// Explicit header heights are based off of the max we are calculating here. We never want to base the max on something we're setting explicitly
if (!container.explicitHeaderCanvasHeight && headerCanvasHeight > maxHeaderCanvasHeight) {
maxHeaderCanvasHeight = headerCanvasHeight;
}
}
}
// Go through all the headers
for (i = 0; i < containerHeadersToRecalc.length; i++) {
container = containerHeadersToRecalc[i];
/* If:
1. We have a max header height
2. This container has a header height defined
3. And either this container has an explicit header height set, OR its header height is less than the max
then:
Give this container's header an explicit height so it will line up with the tallest header
*/
if (
maxHeaderHeight > 0 && typeof(container.headerHeight) !== 'undefined' && container.headerHeight !== null &&
(container.explicitHeaderHeight || container.headerHeight < maxHeaderHeight)
) {
container.explicitHeaderHeight = getHeight(container.explicitHeaderHeight, maxHeaderHeight);
}
// Do the same as above except for the header canvas
if (
maxHeaderCanvasHeight > 0 && typeof(container.headerCanvasHeight) !== 'undefined' && container.headerCanvasHeight !== null &&
(container.explicitHeaderCanvasHeight || container.headerCanvasHeight < maxHeaderCanvasHeight)
) {
container.explicitHeaderCanvasHeight = getHeight(container.explicitHeaderCanvasHeight, maxHeaderCanvasHeight);
}
}
// Rebuild styles if the header height has changed
// The header height is used in body/viewport calculations and those are then used in other styles so we need it to be available
if (buildStyles && rebuildStyles) {
self.buildStyles();
}
p.resolve();
});
}
else {
// Timeout still needs to be here to trigger digest after styles have been rebuilt
$timeout(function() {
p.resolve();
});
}
return p.promise;
};
/**
* @ngdoc function
* @name redrawCanvas
* @methodOf ui.grid.class:Grid
* @description Redraw the rows and columns based on our current scroll position
* @param {boolean} [rowsAdded] Optional to indicate rows are added and the scroll percentage must be recalculated
*
*/
Grid.prototype.redrawInPlace = function redrawInPlace(rowsAdded) {
// gridUtil.logDebug('redrawInPlace');
var self = this;
for (var i in self.renderContainers) {
var container = self.renderContainers[i];
// gridUtil.logDebug('redrawing container', i);
if (rowsAdded) {
container.adjustRows(container.prevScrollTop, null);
container.adjustColumns(container.prevScrollLeft, null);
}
else {
container.adjustRows(null, container.prevScrolltopPercentage);
container.adjustColumns(null, container.prevScrollleftPercentage);
}
}
};
/**
* @ngdoc function
* @name hasLeftContainerColumns
* @methodOf ui.grid.class:Grid
* @description returns true if leftContainer has columns
*/
Grid.prototype.hasLeftContainerColumns = function () {
return this.hasLeftContainer() && this.renderContainers.left.renderedColumns.length > 0;
};
/**
* @ngdoc function
* @name hasRightContainerColumns
* @methodOf ui.grid.class:Grid
* @description returns true if rightContainer has columns
*/
Grid.prototype.hasRightContainerColumns = function () {
return this.hasRightContainer() && this.renderContainers.right.renderedColumns.length > 0;
};
/**
* @ngdoc method
* @methodOf ui.grid.class:Grid
* @name scrollToIfNecessary
* @description Scrolls the grid to make a certain row and column combo visible,
* in the case that it is not completely visible on the screen already.
* @param {GridRow} gridRow row to make visible
* @param {GridCol} gridCol column to make visible
* @returns {promise} a promise that is resolved when scrolling is complete
*/
Grid.prototype.scrollToIfNecessary = function (gridRow, gridCol) {
var self = this;
var scrollEvent = new ScrollEvent(self, 'uiGrid.scrollToIfNecessary');
// Alias the visible row and column caches
var visRowCache = self.renderContainers.body.visibleRowCache;
var visColCache = self.renderContainers.body.visibleColumnCache;
/*-- Get the top, left, right, and bottom "scrolled" edges of the grid --*/
// The top boundary is the current Y scroll position PLUS the header height, because the header can obscure rows when the grid is scrolled downwards
var topBound = self.renderContainers.body.prevScrollTop + self.headerHeight;
// Don't the let top boundary be less than 0
topBound = (topBound < 0) ? 0 : topBound;
// The left boundary is the current X scroll position
var leftBound = self.renderContainers.body.prevScrollLeft;
// The bottom boundary is the current Y scroll position, plus the height of the grid, but minus the header height.
// Basically this is the viewport height added on to the scroll position
var bottomBound = self.renderContainers.body.prevScrollTop + self.gridHeight - self.renderContainers.body.headerHeight - self.footerHeight - self.scrollbarWidth;
// If there's a horizontal scrollbar, remove its height from the bottom boundary, otherwise we'll be letting it obscure rows
//if (self.horizontalScrollbarHeight) {
// bottomBound = bottomBound - self.horizontalScrollbarHeight;
//}
// The right position is the current X scroll position minus the grid width
var rightBound = self.renderContainers.body.prevScrollLeft + Math.ceil(self.gridWidth);
// If there's a vertical scrollbar, subtract it from the right boundary or we'll allow it to obscure cells
//if (self.verticalScrollbarWidth) {
// rightBound = rightBound - self.verticalScrollbarWidth;
//}
// We were given a row to scroll to
if (gridRow !== null) {
// This is the index of the row we want to scroll to, within the list of rows that can be visible
var seekRowIndex = visRowCache.indexOf(gridRow);
// Total vertical scroll length of the grid
var scrollLength = (self.renderContainers.body.getCanvasHeight() - self.renderContainers.body.getViewportHeight());
// Add the height of the native horizontal scrollbar to the scroll length, if it's there. Otherwise it will mask over the final row
//if (self.horizontalScrollbarHeight && self.horizontalScrollbarHeight > 0) {
// scrollLength = scrollLength + self.horizontalScrollbarHeight;
//}
// This is the minimum amount of pixels we need to scroll vertical in order to see this row.
var pixelsToSeeRow = ((seekRowIndex + 1) * self.options.rowHeight);
// Don't let the pixels required to see the row be less than zero
pixelsToSeeRow = (pixelsToSeeRow < 0) ? 0 : pixelsToSeeRow;
var scrollPixels, percentage;
// If the scroll position we need to see the row is LESS than the top boundary, i.e. obscured above the top of the self...
if (pixelsToSeeRow < topBound) {
// Get the different between the top boundary and the required scroll position and subtract it from the current scroll position\
// to get the full position we need
scrollPixels = self.renderContainers.body.prevScrollTop - (topBound - pixelsToSeeRow);
// Turn the scroll position into a percentage and make it an argument for a scroll event
percentage = scrollPixels / scrollLength;
scrollEvent.y = { percentage: percentage };
}
// Otherwise if the scroll position we need to see the row is MORE than the bottom boundary, i.e. obscured below the bottom of the self...
else if (pixelsToSeeRow > bottomBound) {
// Get the different between the bottom boundary and the required scroll position and add it to the current scroll position
// to get the full position we need
scrollPixels = pixelsToSeeRow - bottomBound + self.renderContainers.body.prevScrollTop;
// Turn the scroll position into a percentage and make it an argument for a scroll event
percentage = scrollPixels / scrollLength;
scrollEvent.y = { percentage: percentage };
}
}
// We were given a column to scroll to
if (gridCol !== null) {
// This is the index of the row we want to scroll to, within the list of rows that can be visible
var seekColumnIndex = visColCache.indexOf(gridCol);
// Total vertical scroll length of the grid
var horizScrollLength = (self.renderContainers.body.getCanvasWidth() - self.renderContainers.body.getViewportWidth());
// Add the height of the native horizontal scrollbar to the scroll length, if it's there. Otherwise it will mask over the final row
// if (self.verticalScrollbarWidth && self.verticalScrollbarWidth > 0) {
// horizScrollLength = horizScrollLength + self.verticalScrollbarWidth;
// }
// This is the minimum amount of pixels we need to scroll vertical in order to see this column
var columnLeftEdge = 0;
for (var i = 0; i < seekColumnIndex; i++) {
var col = visColCache[i];
columnLeftEdge += col.drawnWidth;
}
columnLeftEdge = (columnLeftEdge < 0) ? 0 : columnLeftEdge;
var columnRightEdge = columnLeftEdge + gridCol.drawnWidth;
// Don't let the pixels required to see the column be less than zero
columnRightEdge = (columnRightEdge < 0) ? 0 : columnRightEdge;
var horizScrollPixels, horizPercentage;
// If the scroll position we need to see the row is LESS than the top boundary, i.e. obscured above the top of the self...
if (columnLeftEdge < leftBound) {
// Get the different between the top boundary and the required scroll position and subtract it from the current scroll position\
// to get the full position we need
horizScrollPixels = self.renderContainers.body.prevScrollLeft - (leftBound - columnLeftEdge);
// Turn the scroll position into a percentage and make it an argument for a scroll event
horizPercentage = horizScrollPixels / horizScrollLength;
horizPercentage = (horizPercentage > 1) ? 1 : horizPercentage;
scrollEvent.x = { percentage: horizPercentage };
}
// Otherwise if the scroll position we need to see the row is MORE than the bottom boundary, i.e. obscured below the bottom of the self...
else if (columnRightEdge > rightBound) {
// Get the different between the bottom boundary and the required scroll position and add it to the current scroll position
// to get the full position we need
horizScrollPixels = columnRightEdge - rightBound + self.renderContainers.body.prevScrollLeft;
// Turn the scroll position into a percentage and make it an argument for a scroll event
horizPercentage = horizScrollPixels / horizScrollLength;
horizPercentage = (horizPercentage > 1) ? 1 : horizPercentage;
scrollEvent.x = { percentage: horizPercentage };
}
}
var deferred = $q.defer();
// If we need to scroll on either the x or y axes, fire a scroll event
if (scrollEvent.y || scrollEvent.x) {
scrollEvent.withDelay = false;
self.scrollContainers('',scrollEvent);
var dereg = self.api.core.on.scrollEnd(null,function() {
deferred.resolve(scrollEvent);
dereg();
});
}
else {
deferred.resolve();
}
return deferred.promise;
};
/**
* @ngdoc method
* @methodOf ui.grid.class:Grid
* @name scrollTo
* @description Scroll the grid such that the specified
* row and column is in view
* @param {object} rowEntity gridOptions.data[] array instance to make visible
* @param {object} colDef to make visible
* @returns {promise} a promise that is resolved after any scrolling is finished
*/
Grid.prototype.scrollTo = function (rowEntity, colDef) {
var gridRow = null, gridCol = null;
if (rowEntity !== null && typeof(rowEntity) !== 'undefined' ) {
gridRow = this.getRow(rowEntity);
}
if (colDef !== null && typeof(colDef) !== 'undefined' ) {
gridCol = this.getColumn(colDef.name ? colDef.name : colDef.field);
}
return this.scrollToIfNecessary(gridRow, gridCol);
};
/**
* @ngdoc function
* @name clearAllFilters
* @methodOf ui.grid.class:Grid
* @description Clears all filters and optionally refreshes the visible rows.
* @params {object} refreshRows Defaults to true.
* @params {object} clearConditions Defaults to false.
* @params {object} clearFlags Defaults to false.
* @returns {promise} If `refreshRows` is true, returns a promise of the rows refreshing.
*/
Grid.prototype.clearAllFilters = function clearAllFilters(refreshRows, clearConditions, clearFlags) {
// Default `refreshRows` to true because it will be the most commonly desired behaviour.
if (refreshRows === undefined) {
refreshRows = true;
}
if (clearConditions === undefined) {
clearConditions = false;
}
if (clearFlags === undefined) {
clearFlags = false;
}
this.columns.forEach(function(column) {
column.filters.forEach(function(filter) {
filter.term = undefined;
if (clearConditions) {
filter.condition = undefined;
}
if (clearFlags) {
filter.flags = undefined;
}
});
});
if (refreshRows) {
return this.refreshRows();
}
};
// Blatantly stolen from Angular as it isn't exposed (yet? 2.0?)
function RowHashMap() {}
RowHashMap.prototype = {
/**
* Store key value pair
* @param key key to store can be any type
* @param value value to store can be any type
*/
put: function(key, value) {
this[this.grid.options.rowIdentity(key)] = value;
},
/**
* @param key
* @returns {Object} the value for the key
*/
get: function(key) {
return this[this.grid.options.rowIdentity(key)];
},
/**
* Remove the key/value pair
* @param key
*/
remove: function(key) {
var value = this[key = this.grid.options.rowIdentity(key)];
delete this[key];
return value;
}
};
return Grid;
}]);
})();
| 1 | 10,831 | This needs to be fixed before we can accept the PR | angular-ui-ui-grid | js |
@@ -14,6 +14,11 @@ constexpr double bootstrap_minimum_termination_time_sec = 30.0;
constexpr unsigned bootstrap_max_new_connections = 10;
constexpr unsigned bulk_push_cost_limit = 200;
+size_t constexpr nano::frontier_req::size;
+size_t constexpr nano::bulk_pull_blocks::size;
+size_t constexpr nano::bulk_pull_account::size;
+size_t constexpr nano::frontier_req_client::size_frontier;
+
nano::socket::socket (std::shared_ptr<nano::node> node_a) :
socket_m (node_a->io_ctx),
cutoff (std::numeric_limits<uint64_t>::max ()), | 1 | #include <nano/node/bootstrap.hpp>
#include <nano/node/common.hpp>
#include <nano/node/node.hpp>
#include <boost/log/trivial.hpp>
constexpr double bootstrap_connection_scale_target_blocks = 50000.0;
constexpr double bootstrap_connection_warmup_time_sec = 5.0;
constexpr double bootstrap_minimum_blocks_per_sec = 10.0;
constexpr double bootstrap_minimum_frontier_blocks_per_sec = 1000.0;
constexpr unsigned bootstrap_frontier_retry_limit = 16;
constexpr double bootstrap_minimum_termination_time_sec = 30.0;
constexpr unsigned bootstrap_max_new_connections = 10;
constexpr unsigned bulk_push_cost_limit = 200;
nano::socket::socket (std::shared_ptr<nano::node> node_a) :
socket_m (node_a->io_ctx),
cutoff (std::numeric_limits<uint64_t>::max ()),
node (node_a)
{
}
void nano::socket::async_connect (nano::tcp_endpoint const & endpoint_a, std::function<void(boost::system::error_code const &)> callback_a)
{
checkup ();
auto this_l (shared_from_this ());
start ();
socket_m.async_connect (endpoint_a, [this_l, callback_a](boost::system::error_code const & ec) {
this_l->stop ();
callback_a (ec);
});
}
void nano::socket::async_read (std::shared_ptr<std::vector<uint8_t>> buffer_a, size_t size_a, std::function<void(boost::system::error_code const &, size_t)> callback_a)
{
assert (size_a <= buffer_a->size ());
auto this_l (shared_from_this ());
start ();
boost::asio::async_read (socket_m, boost::asio::buffer (buffer_a->data (), size_a), [this_l, callback_a](boost::system::error_code const & ec, size_t size_a) {
this_l->node->stats.add (nano::stat::type::traffic_bootstrap, nano::stat::dir::in, size_a);
this_l->stop ();
callback_a (ec, size_a);
});
}
void nano::socket::async_write (std::shared_ptr<std::vector<uint8_t>> buffer_a, std::function<void(boost::system::error_code const &, size_t)> callback_a)
{
auto this_l (shared_from_this ());
start ();
boost::asio::async_write (socket_m, boost::asio::buffer (buffer_a->data (), buffer_a->size ()), [this_l, callback_a, buffer_a](boost::system::error_code const & ec, size_t size_a) {
this_l->node->stats.add (nano::stat::type::traffic_bootstrap, nano::stat::dir::out, size_a);
this_l->stop ();
callback_a (ec, size_a);
});
}
void nano::socket::start (std::chrono::steady_clock::time_point timeout_a)
{
cutoff = timeout_a.time_since_epoch ().count ();
}
void nano::socket::stop ()
{
cutoff = std::numeric_limits<uint64_t>::max ();
}
void nano::socket::close ()
{
if (socket_m.is_open ())
{
try
{
socket_m.shutdown (boost::asio::ip::tcp::socket::shutdown_both);
}
catch (...)
{
/* Ignore spurious exceptions; shutdown is best effort. */
}
socket_m.close ();
}
}
void nano::socket::checkup ()
{
std::weak_ptr<nano::socket> this_w (shared_from_this ());
node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (10), [this_w]() {
if (auto this_l = this_w.lock ())
{
if (this_l->cutoff != std::numeric_limits<uint64_t>::max () && this_l->cutoff < std::chrono::steady_clock::now ().time_since_epoch ().count ())
{
if (this_l->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Disconnecting from %1% due to timeout") % this_l->remote_endpoint ());
}
this_l->close ();
}
else
{
this_l->checkup ();
}
}
});
}
nano::tcp_endpoint nano::socket::remote_endpoint ()
{
nano::tcp_endpoint endpoint;
if (socket_m.is_open ())
{
boost::system::error_code remote_endpoint_error;
endpoint = socket_m.remote_endpoint (remote_endpoint_error);
}
return endpoint;
}
nano::bootstrap_client::bootstrap_client (std::shared_ptr<nano::node> node_a, std::shared_ptr<nano::bootstrap_attempt> attempt_a, nano::tcp_endpoint const & endpoint_a) :
node (node_a),
attempt (attempt_a),
socket (std::make_shared<nano::socket> (node_a)),
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
endpoint (endpoint_a),
start_time (std::chrono::steady_clock::now ()),
block_count (0),
pending_stop (false),
hard_stop (false)
{
++attempt->connections;
receive_buffer->resize (256);
}
nano::bootstrap_client::~bootstrap_client ()
{
--attempt->connections;
}
double nano::bootstrap_client::block_rate () const
{
auto elapsed = elapsed_seconds ();
return elapsed > 0.0 ? (double)block_count.load () / elapsed : 0.0;
}
double nano::bootstrap_client::elapsed_seconds () const
{
return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time).count ();
}
void nano::bootstrap_client::stop (bool force)
{
pending_stop = true;
if (force)
{
hard_stop = true;
}
}
void nano::bootstrap_client::run ()
{
auto this_l (shared_from_this ());
socket->async_connect (endpoint, [this_l](boost::system::error_code const & ec) {
if (!ec)
{
if (this_l->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Connection established to %1%") % this_l->endpoint);
}
this_l->attempt->pool_connection (this_l->shared_from_this ());
}
else
{
if (this_l->node->config.logging.network_logging ())
{
switch (ec.value ())
{
default:
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Error initiating bootstrap connection to %1%: %2%") % this_l->endpoint % ec.message ());
break;
case boost::system::errc::connection_refused:
case boost::system::errc::operation_canceled:
case boost::system::errc::timed_out:
case 995: //Windows The I/O operation has been aborted because of either a thread exit or an application request
case 10061: //Windows No connection could be made because the target machine actively refused it
break;
}
}
}
});
}
void nano::frontier_req_client::run ()
{
std::unique_ptr<nano::frontier_req> request (new nano::frontier_req);
request->start.clear ();
request->age = std::numeric_limits<decltype (request->age)>::max ();
request->count = std::numeric_limits<decltype (request->count)>::max ();
auto send_buffer (std::make_shared<std::vector<uint8_t>> ());
{
nano::vectorstream stream (*send_buffer);
request->serialize (stream);
}
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->receive_frontier ();
}
else
{
if (this_l->connection->node->config.logging.network_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error while sending bootstrap request %1%") % ec.message ());
}
}
});
}
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_client::shared ()
{
return shared_from_this ();
}
nano::frontier_req_client::frontier_req_client (std::shared_ptr<nano::bootstrap_client> connection_a) :
connection (connection_a),
current (0),
count (0),
bulk_push_cost (0)
{
auto transaction (connection->node->store.tx_begin_read ());
next (transaction);
}
nano::frontier_req_client::~frontier_req_client ()
{
}
void nano::frontier_req_client::receive_frontier ()
{
auto this_l (shared_from_this ());
size_t size_l (sizeof (nano::uint256_union) + sizeof (nano::uint256_union));
connection->socket->async_read (connection->receive_buffer, size_l, [this_l, size_l](boost::system::error_code const & ec, size_t size_a) {
// An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect,
// we simply get a size of 0.
if (size_a == size_l)
{
this_l->received_frontier (ec, size_a);
}
else
{
if (this_l->connection->node->config.logging.network_message_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Invalid size: expected %1%, got %2%") % size_l % size_a);
}
}
});
}
void nano::frontier_req_client::unsynced (nano::block_hash const & head, nano::block_hash const & end)
{
if (bulk_push_cost < bulk_push_cost_limit)
{
connection->attempt->add_bulk_push_target (head, end);
if (end.is_zero ())
{
bulk_push_cost += 2;
}
else
{
bulk_push_cost += 1;
}
}
}
void nano::frontier_req_client::received_frontier (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == sizeof (nano::uint256_union) + sizeof (nano::uint256_union));
nano::account account;
nano::bufferstream account_stream (connection->receive_buffer->data (), sizeof (nano::uint256_union));
auto error1 (nano::read (account_stream, account));
assert (!error1);
nano::block_hash latest;
nano::bufferstream latest_stream (connection->receive_buffer->data () + sizeof (nano::uint256_union), sizeof (nano::uint256_union));
auto error2 (nano::read (latest_stream, latest));
assert (!error2);
if (count == 0)
{
start_time = std::chrono::steady_clock::now ();
}
++count;
std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time);
double elapsed_sec = time_span.count ();
double blocks_per_sec = (double)count / elapsed_sec;
if (elapsed_sec > bootstrap_connection_warmup_time_sec && blocks_per_sec < bootstrap_minimum_frontier_blocks_per_sec)
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Aborting frontier req because it was too slow"));
promise.set_value (true);
return;
}
if (connection->attempt->should_log ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Received %1% frontiers from %2%") % std::to_string (count) % connection->socket->remote_endpoint ());
}
auto transaction (connection->node->store.tx_begin_read ());
if (!account.is_zero ())
{
while (!current.is_zero () && current < account)
{
// We know about an account they don't.
unsynced (frontier, 0);
next (transaction);
}
if (!current.is_zero ())
{
if (account == current)
{
if (latest == frontier)
{
// In sync
}
else
{
if (connection->node->store.block_exists (transaction, latest))
{
// We know about a block they don't.
unsynced (frontier, latest);
}
else
{
connection->attempt->add_pull (nano::pull_info (account, latest, frontier));
// Either we're behind or there's a fork we differ on
// Either way, bulk pushing will probably not be effective
bulk_push_cost += 5;
}
}
next (transaction);
}
else
{
assert (account < current);
connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0)));
}
}
else
{
connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0)));
}
receive_frontier ();
}
else
{
while (!current.is_zero ())
{
// We know about an account they don't.
unsynced (frontier, 0);
next (transaction);
}
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk push cost: " << bulk_push_cost;
}
{
try
{
promise.set_value (false);
}
catch (std::future_error &)
{
}
connection->attempt->pool_connection (connection);
}
}
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error while receiving frontier %1%") % ec.message ());
}
}
}
void nano::frontier_req_client::next (nano::transaction const & transaction_a)
{
// Filling accounts deque to prevent often read transactions
if (accounts.empty ())
{
size_t max_size (128);
for (auto i (connection->node->store.latest_begin (transaction_a, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i)
{
nano::account_info info (i->second);
accounts.push_back (std::make_pair (nano::account (i->first), info.head));
}
/* If loop breaks before max_size, then latest_end () is reached
Add empty record to finish frontier_req_server */
if (accounts.size () != max_size)
{
accounts.push_back (std::make_pair (nano::account (0), nano::block_hash (0)));
}
}
// Retrieving accounts from deque
auto account_pair (accounts.front ());
accounts.pop_front ();
current = account_pair.first;
frontier = account_pair.second;
}
nano::bulk_pull_client::bulk_pull_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::pull_info const & pull_a) :
connection (connection_a),
pull (pull_a),
total_blocks (0)
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
connection->attempt->condition.notify_all ();
}
nano::bulk_pull_client::~bulk_pull_client ()
{
// If received end block is not expected end block
if (expected != pull.end)
{
pull.head = expected;
if (connection->attempt->lazy_mode)
{
pull.account = expected;
}
connection->attempt->requeue_pull (pull);
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull end block is not expected %1% for account %2%") % pull.end.to_string () % pull.account.to_account ());
}
}
{
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
--connection->attempt->pulling;
}
connection->attempt->condition.notify_all ();
}
void nano::bulk_pull_client::request ()
{
expected = pull.head;
nano::bulk_pull req;
req.start = pull.account;
req.end = pull.end;
req.count = pull.count;
req.set_count_present (pull.count != 0);
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
nano::vectorstream stream (*buffer);
req.serialize (stream);
}
if (connection->node->config.logging.bulk_pull_logging ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Requesting account %1% from %2%. %3% accounts in queue") % req.start.to_account () % connection->endpoint % connection->attempt->pulls.size ());
}
else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ())
{
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
BOOST_LOG (connection->node->log) << boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->pulls.size ());
}
auto this_l (shared_from_this ());
connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->receive_block ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->endpoint);
}
}
});
}
void nano::bulk_pull_client::receive_block ()
{
auto this_l (shared_from_this ());
connection->socket->async_read (connection->receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->received_type ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type: %1%") % ec.message ());
}
}
});
}
void nano::bulk_pull_client::received_type ()
{
auto this_l (shared_from_this ());
nano::block_type type (static_cast<nano::block_type> (connection->receive_buffer->data ()[0]));
switch (type)
{
case nano::block_type::send:
{
connection->socket->async_read (connection->receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::receive:
{
connection->socket->async_read (connection->receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::open:
{
connection->socket->async_read (connection->receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::change:
{
connection->socket->async_read (connection->receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::state:
{
connection->socket->async_read (connection->receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::not_a_block:
{
// Avoid re-using slow peers, or peers that sent the wrong blocks.
if (!connection->pending_stop && expected == pull.end)
{
connection->attempt->pool_connection (connection);
}
break;
}
default:
{
if (connection->node->config.logging.network_packet_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unknown type received as block type: %1%") % static_cast<int> (type));
}
break;
}
}
}
void nano::bulk_pull_client::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a)
{
if (!ec)
{
nano::bufferstream stream (connection->receive_buffer->data (), size_a);
std::shared_ptr<nano::block> block (nano::deserialize_block (stream, type_a));
if (block != nullptr && !nano::work_validate (*block))
{
auto hash (block->hash ());
if (connection->node->config.logging.bulk_pull_logging ())
{
std::string block_l;
block->serialize_json (block_l);
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Pulled block %1% %2%") % hash.to_string () % block_l);
}
bool block_expected (false);
if (hash == expected)
{
expected = block->previous ();
block_expected = true;
}
if (connection->block_count++ == 0)
{
connection->start_time = std::chrono::steady_clock::now ();
}
connection->attempt->total_blocks++;
total_blocks++;
bool stop_pull (connection->attempt->process_block (block, total_blocks, block_expected));
if (!stop_pull && !connection->hard_stop.load ())
{
receive_block ();
}
else if (stop_pull && block_expected)
{
expected = pull.end;
connection->attempt->pool_connection (connection);
}
if (stop_pull)
{
connection->attempt->lazy_stopped++;
}
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request";
}
}
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error bulk receiving block: %1%") % ec.message ());
}
}
}
nano::bulk_push_client::bulk_push_client (std::shared_ptr<nano::bootstrap_client> const & connection_a) :
connection (connection_a)
{
}
nano::bulk_push_client::~bulk_push_client ()
{
}
void nano::bulk_push_client::start ()
{
nano::bulk_push message;
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
nano::vectorstream stream (*buffer);
message.serialize (stream);
}
auto this_l (shared_from_this ());
connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
auto transaction (this_l->connection->node->store.tx_begin_read ());
if (!ec)
{
this_l->push (transaction);
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Unable to send bulk_push request: %1%") % ec.message ());
}
}
});
}
void nano::bulk_push_client::push (nano::transaction const & transaction_a)
{
std::shared_ptr<nano::block> block;
bool finished (false);
while (block == nullptr && !finished)
{
if (current_target.first.is_zero () || current_target.first == current_target.second)
{
std::lock_guard<std::mutex> guard (connection->attempt->mutex);
if (!connection->attempt->bulk_push_targets.empty ())
{
current_target = connection->attempt->bulk_push_targets.back ();
connection->attempt->bulk_push_targets.pop_back ();
}
else
{
finished = true;
}
}
if (!finished)
{
block = connection->node->store.block_get (transaction_a, current_target.first);
if (block == nullptr)
{
current_target.first = nano::block_hash (0);
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk pushing range " << current_target.first.to_string () << " down to " << current_target.second.to_string ();
}
}
}
}
if (finished)
{
send_finished ();
}
else
{
current_target.first = block->previous ();
push_block (*block);
}
}
void nano::bulk_push_client::send_finished ()
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block));
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::out);
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk push finished";
}
auto this_l (shared_from_this ());
connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
try
{
this_l->promise.set_value (false);
}
catch (std::future_error &)
{
}
});
}
void nano::bulk_push_client::push_block (nano::block const & block_a)
{
auto buffer (std::make_shared<std::vector<uint8_t>> ());
{
nano::vectorstream stream (*buffer);
nano::serialize_block (stream, block_a);
}
auto this_l (shared_from_this ());
connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
auto transaction (this_l->connection->node->store.tx_begin_read ());
this_l->push (transaction);
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending block during bulk push: %1%") % ec.message ());
}
}
});
}
nano::pull_info::pull_info () :
account (0),
end (0),
count (0),
attempts (0)
{
}
nano::pull_info::pull_info (nano::account const & account_a, nano::block_hash const & head_a, nano::block_hash const & end_a, count_t count_a) :
account (account_a),
head (head_a),
end (end_a),
count (count_a),
attempts (0)
{
}
nano::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<nano::node> node_a) :
next_log (std::chrono::steady_clock::now ()),
connections (0),
pulling (0),
node (node_a),
account_count (0),
total_blocks (0),
stopped (false),
lazy_mode (false),
lazy_stopped (0)
{
BOOST_LOG (node->log) << "Starting bootstrap attempt";
node->bootstrap_initiator.notify_listeners (true);
}
nano::bootstrap_attempt::~bootstrap_attempt ()
{
BOOST_LOG (node->log) << "Exiting bootstrap attempt";
node->bootstrap_initiator.notify_listeners (false);
}
bool nano::bootstrap_attempt::should_log ()
{
std::lock_guard<std::mutex> lock (mutex);
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (next_log < now)
{
result = true;
next_log = now + std::chrono::seconds (15);
}
return result;
}
bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a)
{
auto result (true);
auto connection_l (connection (lock_a));
connection_frontier_request = connection_l;
if (connection_l)
{
std::future<bool> future;
{
auto client (std::make_shared<nano::frontier_req_client> (connection_l));
client->run ();
frontiers = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
if (result)
{
pulls.clear ();
}
if (node->config.logging.network_logging ())
{
if (!result)
{
BOOST_LOG (node->log) << boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % pulls.size () % connection_l->endpoint);
}
else
{
BOOST_LOG (node->log) << "frontier_req failed, reattempting";
}
}
}
return result;
}
void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a)
{
auto connection_l (connection (lock_a));
if (connection_l)
{
auto pull (pulls.front ());
pulls.pop_front ();
if (lazy_mode)
{
// Check if pull is obsolete (head was processed)
std::unique_lock<std::mutex> lock (lazy_mutex);
while (!pulls.empty () && !pull.head.is_zero () && lazy_blocks.find (pull.head) != lazy_blocks.end ())
{
pull = pulls.front ();
pulls.pop_front ();
}
}
++pulling;
// The bulk_pull_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference
// Dispatch request in an external thread in case it needs to be destroyed
node->background ([connection_l, pull]() {
auto client (std::make_shared<nano::bulk_pull_client> (connection_l, pull));
client->request ();
});
}
}
void nano::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a)
{
bool error (false);
if (auto connection_shared = connection_frontier_request.lock ())
{
std::future<bool> future;
{
auto client (std::make_shared<nano::bulk_push_client> (connection_shared));
client->start ();
push = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
}
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << "Exiting bulk push client";
if (error)
{
BOOST_LOG (node->log) << "Bulk push client failed";
}
}
}
bool nano::bootstrap_attempt::still_pulling ()
{
assert (!mutex.try_lock ());
auto running (!stopped);
auto more_pulls (!pulls.empty ());
auto still_pulling (pulling > 0);
return running && (more_pulls || still_pulling);
}
void nano::bootstrap_attempt::run ()
{
populate_connections ();
std::unique_lock<std::mutex> lock (mutex);
auto frontier_failure (true);
while (!stopped && frontier_failure)
{
frontier_failure = request_frontier (lock);
}
// Shuffle pulls.
for (int i = pulls.size () - 1; i > 0; i--)
{
auto k = nano::random_pool.GenerateWord32 (0, i);
std::swap (pulls[i], pulls[k]);
}
while (still_pulling ())
{
while (still_pulling ())
{
if (!pulls.empty ())
{
if (!node->block_processor.full ())
{
request_pull (lock);
}
else
{
condition.wait_for (lock, std::chrono::seconds (15));
}
}
else
{
condition.wait (lock);
}
}
// Flushing may resolve forks which can add more pulls
BOOST_LOG (node->log) << "Flushing unchecked blocks";
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
BOOST_LOG (node->log) << "Finished flushing unchecked blocks";
}
if (!stopped)
{
BOOST_LOG (node->log) << "Completed pulls";
request_push (lock);
// Start lazy bootstrap if some lazy keys were inserted
if (!lazy_keys.empty () && !node->flags.disable_lazy_bootstrap)
{
lock.unlock ();
lazy_mode = true;
lazy_run ();
lock.lock ();
}
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a)
{
while (!stopped && idle.empty ())
{
condition.wait (lock_a);
}
std::shared_ptr<nano::bootstrap_client> result;
if (!idle.empty ())
{
result = idle.back ();
idle.pop_back ();
}
return result;
}
bool nano::bootstrap_attempt::consume_future (std::future<bool> & future_a)
{
bool result;
try
{
result = future_a.get ();
}
catch (std::future_error &)
{
result = true;
}
return result;
}
struct block_rate_cmp
{
bool operator() (const std::shared_ptr<nano::bootstrap_client> & lhs, const std::shared_ptr<nano::bootstrap_client> & rhs) const
{
return lhs->block_rate () > rhs->block_rate ();
}
};
unsigned nano::bootstrap_attempt::target_connections (size_t pulls_remaining)
{
if (node->config.bootstrap_connections >= node->config.bootstrap_connections_max)
{
return std::max (1U, node->config.bootstrap_connections_max);
}
// Only scale up to bootstrap_connections_max for large pulls.
double step = std::min (1.0, std::max (0.0, (double)pulls_remaining / bootstrap_connection_scale_target_blocks));
double target = (double)node->config.bootstrap_connections + (double)(node->config.bootstrap_connections_max - node->config.bootstrap_connections) * step;
return std::max (1U, (unsigned)(target + 0.5f));
}
void nano::bootstrap_attempt::populate_connections ()
{
double rate_sum = 0.0;
size_t num_pulls = 0;
std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections;
{
std::unique_lock<std::mutex> lock (mutex);
num_pulls = pulls.size ();
for (auto & c : clients)
{
if (auto client = c.lock ())
{
double elapsed_sec = client->elapsed_seconds ();
auto blocks_per_sec = client->block_rate ();
rate_sum += blocks_per_sec;
if (client->elapsed_seconds () > bootstrap_connection_warmup_time_sec && client->block_count > 0)
{
sorted_connections.push (client);
}
// Force-stop the slowest peers, since they can take the whole bootstrap hostage by dribbling out blocks on the last remaining pull.
// This is ~1.5kilobits/sec.
if (elapsed_sec > bootstrap_minimum_termination_time_sec && blocks_per_sec < bootstrap_minimum_blocks_per_sec)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Stopping slow peer %1% (elapsed sec %2%s > %3%s and %4% blocks per second < %5%)") % client->endpoint.address ().to_string () % elapsed_sec % bootstrap_minimum_termination_time_sec % blocks_per_sec % bootstrap_minimum_blocks_per_sec);
}
client->stop (true);
}
}
}
}
auto target = target_connections (num_pulls);
// We only want to drop slow peers when more than 2/3 are active. 2/3 because 1/2 is too aggressive, and 100% rarely happens.
// Probably needs more tuning.
if (sorted_connections.size () >= (target * 2) / 3 && target >= 4)
{
// 4 -> 1, 8 -> 2, 16 -> 4, arbitrary, but seems to work well.
auto drop = (int)roundf (sqrtf ((float)target - 2.0f));
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Dropping %1% bulk pull peers, target connections %2%") % drop % target);
}
for (int i = 0; i < drop; i++)
{
auto client = sorted_connections.top ();
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Dropping peer with block rate %1%, block count %2% (%3%) ") % client->block_rate () % client->block_count % client->endpoint.address ().to_string ());
}
client->stop (false);
sorted_connections.pop ();
}
}
if (node->config.logging.bulk_pull_logging ())
{
std::unique_lock<std::mutex> lock (mutex);
BOOST_LOG (node->log) << boost::str (boost::format ("Bulk pull connections: %1%, rate: %2% blocks/sec, remaining account pulls: %3%, total blocks: %4%") % connections.load () % (int)rate_sum % pulls.size () % (int)total_blocks.load ());
}
if (connections < target)
{
auto delta = std::min ((target - connections) * 2, bootstrap_max_new_connections);
// TODO - tune this better
// Not many peers respond, need to try to make more connections than we need.
for (int i = 0; i < delta; i++)
{
auto peer (node->peers.bootstrap_peer ());
if (peer != nano::endpoint (boost::asio::ip::address_v6::any (), 0))
{
auto client (std::make_shared<nano::bootstrap_client> (node, shared_from_this (), nano::tcp_endpoint (peer.address (), peer.port ())));
client->run ();
std::lock_guard<std::mutex> lock (mutex);
clients.push_back (client);
}
else if (connections == 0)
{
BOOST_LOG (node->log) << boost::str (boost::format ("Bootstrap stopped because there are no peers"));
stopped = true;
condition.notify_all ();
}
}
}
if (!stopped)
{
std::weak_ptr<nano::bootstrap_attempt> this_w (shared_from_this ());
node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (1), [this_w]() {
if (auto this_l = this_w.lock ())
{
this_l->populate_connections ();
}
});
}
}
void nano::bootstrap_attempt::add_connection (nano::endpoint const & endpoint_a)
{
auto client (std::make_shared<nano::bootstrap_client> (node, shared_from_this (), nano::tcp_endpoint (endpoint_a.address (), endpoint_a.port ())));
client->run ();
}
void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_client> client_a)
{
{
std::lock_guard<std::mutex> lock (mutex);
if (!stopped && !client_a->pending_stop)
{
idle.push_front (client_a);
}
}
condition.notify_all ();
}
void nano::bootstrap_attempt::stop ()
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
condition.notify_all ();
for (auto i : clients)
{
if (auto client = i.lock ())
{
client->socket->close ();
}
}
if (auto i = frontiers.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
if (auto i = push.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
}
void nano::bootstrap_attempt::add_pull (nano::pull_info const & pull)
{
{
std::lock_guard<std::mutex> lock (mutex);
pulls.push_back (pull);
}
condition.notify_all ();
}
void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a)
{
auto pull (pull_a);
if (++pull.attempts < bootstrap_frontier_retry_limit)
{
std::lock_guard<std::mutex> lock (mutex);
pulls.push_front (pull);
condition.notify_all ();
}
else if (lazy_mode)
{
{
// Retry for lazy pulls (not weak state block link assumptions)
std::lock_guard<std::mutex> lock (mutex);
pull.attempts++;
pulls.push_back (pull);
}
condition.notify_all ();
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Failed to pull account %1% down to %2% after %3% attempts") % pull.account.to_account () % pull.end.to_string () % pull.attempts);
}
}
}
void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
{
std::lock_guard<std::mutex> lock (mutex);
bulk_push_targets.push_back (std::make_pair (head, end));
}
void nano::bootstrap_attempt::lazy_start (nano::block_hash const & hash_a)
{
std::unique_lock<std::mutex> lock (lazy_mutex);
// Add start blocks, limit 1024 (32k with disabled legacy bootstrap)
size_t max_keys (node->flags.disable_legacy_bootstrap ? 32 * 1024 : 1024);
if (lazy_keys.size () < max_keys && lazy_keys.find (hash_a) == lazy_keys.end () && lazy_blocks.find (hash_a) == lazy_blocks.end ())
{
lazy_keys.insert (hash_a);
lazy_pulls.push_back (hash_a);
}
}
void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a)
{
// Add only unknown blocks
assert (!lazy_mutex.try_lock ());
if (lazy_blocks.find (hash_a) == lazy_blocks.end ())
{
lazy_pulls.push_back (hash_a);
}
}
void nano::bootstrap_attempt::lazy_pull_flush ()
{
std::unique_lock<std::mutex> lock (lazy_mutex);
for (auto & pull_start : lazy_pulls)
{
// Recheck if block was already processed
if (lazy_blocks.find (pull_start) == lazy_blocks.end ())
{
add_pull (nano::pull_info (pull_start, pull_start, nano::block_hash (0), lazy_max_pull_blocks));
}
}
lazy_pulls.clear ();
}
bool nano::bootstrap_attempt::lazy_finished ()
{
bool result (true);
auto transaction (node->store.tx_begin_read ());
std::unique_lock<std::mutex> lock (lazy_mutex);
for (auto it (lazy_keys.begin ()), end (lazy_keys.end ()); it != end && !stopped;)
{
if (node->store.block_exists (transaction, *it))
{
// Could be not safe enough
it = lazy_keys.erase (it);
}
else
{
result = false;
break;
// No need to increment `it` as we break above.
}
}
// Finish lazy bootstrap without lazy pulls (in combination with still_pulling ())
if (!result && lazy_pulls.empty ())
{
result = true;
}
return result;
}
void nano::bootstrap_attempt::lazy_run ()
{
populate_connections ();
auto start_time (std::chrono::steady_clock::now ());
auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 48 * 60 : 30));
std::unique_lock<std::mutex> lock (mutex);
while ((still_pulling () || !lazy_finished ()) && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time)
{
unsigned iterations (0);
while (still_pulling () && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time)
{
if (!pulls.empty ())
{
if (!node->block_processor.full ())
{
request_pull (lock);
}
else
{
condition.wait_for (lock, std::chrono::seconds (15));
}
}
else
{
condition.wait (lock);
}
++iterations;
// Flushing lazy pulls
if (iterations % 100 == 0)
{
lock.unlock ();
lazy_pull_flush ();
lock.lock ();
}
}
// Flushing may resolve forks which can add more pulls
// Flushing lazy pulls
lock.unlock ();
node->block_processor.flush ();
lazy_pull_flush ();
lock.lock ();
}
if (!stopped)
{
BOOST_LOG (node->log) << "Completed lazy pulls";
// Fallback to legacy bootstrap
std::unique_lock<std::mutex> lazy_lock (lazy_mutex);
if (!lazy_keys.empty () && !node->flags.disable_legacy_bootstrap)
{
pulls.clear ();
lock.unlock ();
lazy_blocks.clear ();
lazy_keys.clear ();
lazy_pulls.clear ();
lazy_state_unknown.clear ();
lazy_balances.clear ();
lazy_stopped = 0;
lazy_mode = false;
lazy_lock.unlock ();
run ();
lock.lock ();
}
}
stopped = true;
condition.notify_all ();
idle.clear ();
}
bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_a, uint64_t total_blocks, bool block_expected)
{
bool stop_pull (false);
if (lazy_mode && block_expected)
{
auto hash (block_a->hash ());
std::unique_lock<std::mutex> lock (lazy_mutex);
// Processing new blocks
if (lazy_blocks.find (hash) == lazy_blocks.end ())
{
// Search block in ledger (old)
auto transaction (node->store.tx_begin_read ());
if (!node->store.block_exists (transaction, block_a->type (), hash))
{
nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ());
node->block_processor.add (block_a, std::chrono::steady_clock::time_point ());
// Search for new dependencies
if (!block_a->source ().is_zero () && !node->store.block_exists (transaction, block_a->source ()))
{
lazy_add (block_a->source ());
}
else if (block_a->type () == nano::block_type::send)
{
// Calculate balance for legacy send blocks
std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a));
if (block_l != nullptr)
{
balance = block_l->hashables.balance.number ();
}
}
else if (block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
if (block_l != nullptr)
{
balance = block_l->hashables.balance.number ();
nano::block_hash link (block_l->hashables.link);
// If link is not epoch link or 0. And if block from link unknown
if (!link.is_zero () && link != node->ledger.epoch_link && lazy_blocks.find (link) == lazy_blocks.end () && !node->store.block_exists (transaction, link))
{
nano::block_hash previous (block_l->hashables.previous);
// If state block previous is 0 then source block required
if (previous.is_zero ())
{
lazy_add (link);
}
// In other cases previous block balance required to find out subtype of state block
else if (node->store.block_exists (transaction, previous))
{
nano::amount prev_balance (node->ledger.balance (transaction, previous));
if (prev_balance.number () <= balance)
{
lazy_add (link);
}
}
// Search balance of already processed previous blocks
else if (lazy_blocks.find (previous) != lazy_blocks.end ())
{
auto previous_balance (lazy_balances.find (previous));
if (previous_balance != lazy_balances.end ())
{
if (previous_balance->second <= balance)
{
lazy_add (link);
}
lazy_balances.erase (previous_balance);
}
}
// Insert in unknown state blocks if previous wasn't already processed
else
{
lazy_state_unknown.insert (std::make_pair (previous, std::make_pair (link, balance)));
}
}
}
}
lazy_blocks.insert (hash);
// Adding lazy balances
if (total_blocks == 0)
{
lazy_balances.insert (std::make_pair (hash, balance));
}
// Removing lazy balances
if (!block_a->previous ().is_zero () && lazy_balances.find (block_a->previous ()) != lazy_balances.end ())
{
lazy_balances.erase (block_a->previous ());
}
}
// Drop bulk_pull if block is already known (ledger)
else
{
// Disabled until server rewrite
// stop_pull = true;
// Force drop lazy bootstrap connection for long bulk_pull
if (total_blocks > lazy_max_pull_blocks)
{
stop_pull = true;
}
}
//Search unknown state blocks balances
auto find_state (lazy_state_unknown.find (hash));
if (find_state != lazy_state_unknown.end ())
{
auto next_block (find_state->second);
lazy_state_unknown.erase (hash);
// Retrieve balance for previous state blocks
if (block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
if (block_l->hashables.balance.number () <= next_block.second)
{
lazy_add (next_block.first);
}
}
// Retrieve balance for previous legacy send blocks
else if (block_a->type () == nano::block_type::send)
{
std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a));
if (block_l->hashables.balance.number () <= next_block.second)
{
lazy_add (next_block.first);
}
}
// Weak assumption for other legacy block types
else
{
// Disabled
}
}
}
// Drop bulk_pull if block is already known (processed set)
else
{
// Disabled until server rewrite
// stop_pull = true;
// Force drop lazy bootstrap connection for long bulk_pull
if (total_blocks > lazy_max_pull_blocks)
{
stop_pull = true;
}
}
}
else if (lazy_mode)
{
// Drop connection with unexpected block for lazy bootstrap
stop_pull = true;
}
else
{
node->block_processor.add (block_a, std::chrono::steady_clock::time_point ());
}
return stop_pull;
}
nano::bootstrap_initiator::bootstrap_initiator (nano::node & node_a) :
node (node_a),
stopped (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::bootstrap_initiator);
run_bootstrap ();
})
{
}
nano::bootstrap_initiator::~bootstrap_initiator ()
{
stop ();
thread.join ();
}
void nano::bootstrap_initiator::bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped && attempt == nullptr)
{
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ());
condition.notify_all ();
}
}
void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bool add_to_peers)
{
if (add_to_peers)
{
node.peers.insert (nano::map_endpoint_to_v6 (endpoint_a), nano::protocol_version);
}
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
while (attempt != nullptr)
{
attempt->stop ();
condition.wait (lock);
}
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ());
attempt->add_connection (endpoint_a);
condition.notify_all ();
}
}
void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a, bool force)
{
{
std::unique_lock<std::mutex> lock (mutex);
if (force)
{
while (attempt != nullptr)
{
attempt->stop ();
condition.wait (lock);
}
}
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out);
if (attempt == nullptr)
{
attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ());
attempt->lazy_mode = true;
}
attempt->lazy_start (hash_a);
}
condition.notify_all ();
}
void nano::bootstrap_initiator::run_bootstrap ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped)
{
if (attempt != nullptr)
{
lock.unlock ();
if (!attempt->lazy_mode)
{
attempt->run ();
}
else
{
attempt->lazy_run ();
}
lock.lock ();
attempt = nullptr;
condition.notify_all ();
}
else
{
condition.wait (lock);
}
}
}
void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a)
{
std::lock_guard<std::mutex> lock (mutex);
observers.push_back (observer_a);
}
bool nano::bootstrap_initiator::in_progress ()
{
return current_attempt () != nullptr;
}
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt ()
{
std::lock_guard<std::mutex> lock (mutex);
return attempt;
}
void nano::bootstrap_initiator::stop ()
{
{
std::unique_lock<std::mutex> lock (mutex);
stopped = true;
if (attempt != nullptr)
{
attempt->stop ();
}
}
condition.notify_all ();
}
void nano::bootstrap_initiator::notify_listeners (bool in_progress_a)
{
for (auto & i : observers)
{
i (in_progress_a);
}
}
nano::bootstrap_listener::bootstrap_listener (boost::asio::io_context & io_ctx_a, uint16_t port_a, nano::node & node_a) :
acceptor (io_ctx_a),
local (boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port_a)),
io_ctx (io_ctx_a),
node (node_a)
{
}
void nano::bootstrap_listener::start ()
{
acceptor.open (local.protocol ());
acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true));
boost::system::error_code ec;
acceptor.bind (local, ec);
if (ec)
{
BOOST_LOG (node.log) << boost::str (boost::format ("Error while binding for bootstrap on port %1%: %2%") % local.port () % ec.message ());
throw std::runtime_error (ec.message ());
}
acceptor.listen ();
accept_connection ();
}
void nano::bootstrap_listener::stop ()
{
decltype (connections) connections_l;
{
std::lock_guard<std::mutex> lock (mutex);
on = false;
connections_l.swap (connections);
}
acceptor.close ();
for (auto & i : connections_l)
{
auto connection (i.second.lock ());
if (connection)
{
connection->socket->close ();
}
}
}
void nano::bootstrap_listener::accept_connection ()
{
auto socket (std::make_shared<nano::socket> (node.shared ()));
acceptor.async_accept (socket->socket_m, [this, socket](boost::system::error_code const & ec) {
accept_action (ec, socket);
});
}
void nano::bootstrap_listener::accept_action (boost::system::error_code const & ec, std::shared_ptr<nano::socket> socket_a)
{
if (!ec)
{
accept_connection ();
auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ()));
{
std::lock_guard<std::mutex> lock (mutex);
if (connections.size () < node.config.bootstrap_connections_max && acceptor.is_open ())
{
connections[connection.get ()] = connection;
connection->receive ();
}
}
}
else
{
BOOST_LOG (node.log) << boost::str (boost::format ("Error while accepting bootstrap connections: %1%") % ec.message ());
}
}
boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint ()
{
return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), local.port ());
}
nano::bootstrap_server::~bootstrap_server ()
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << "Exiting bootstrap server";
}
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this);
}
nano::bootstrap_server::bootstrap_server (std::shared_ptr<nano::socket> socket_a, std::shared_ptr<nano::node> node_a) :
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
socket (socket_a),
node (node_a)
{
receive_buffer->resize (128);
}
void nano::bootstrap_server::receive ()
{
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, 8, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_header_action (ec, size_a);
});
}
void nano::bootstrap_server::receive_header_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 8);
nano::bufferstream type_stream (receive_buffer->data (), size_a);
auto error (false);
nano::message_header header (error, type_stream);
if (!error)
{
switch (header.type)
{
case nano::message_type::bulk_pull:
{
uint32_t extended_size;
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull, nano::stat::dir::in);
if (header.bulk_pull_is_count_present ())
{
extended_size = nano::bulk_pull::extended_parameters_size;
}
else
{
extended_size = 0;
}
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, sizeof (nano::uint256_union) + sizeof (nano::uint256_union) + extended_size, [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_action (ec, size_a, header);
});
break;
}
case nano::message_type::bulk_pull_account:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_account, nano::stat::dir::in);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, sizeof (nano::uint256_union) + sizeof (nano::uint128_union) + sizeof (uint8_t), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_account_action (ec, size_a, header);
});
break;
}
case nano::message_type::bulk_pull_blocks:
{
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received deprecated \"bulk_pull_block\" from bootstrap connection %1%") % static_cast<uint8_t> (header.type));
}
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, sizeof (nano::uint256_union) + sizeof (nano::uint256_union) + sizeof (bulk_pull_blocks_mode) + sizeof (uint32_t), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_bulk_pull_blocks_action (ec, size_a, header);
});
break;
}
case nano::message_type::frontier_req:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_req, nano::stat::dir::in);
auto this_l (shared_from_this ());
socket->async_read (receive_buffer, sizeof (nano::uint256_union) + sizeof (uint32_t) + sizeof (uint32_t), [this_l, header](boost::system::error_code const & ec, size_t size_a) {
this_l->receive_frontier_req_action (ec, size_a, header);
});
break;
}
case nano::message_type::bulk_push:
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::in);
add_request (std::unique_ptr<nano::message> (new nano::bulk_push (header)));
break;
}
default:
{
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received invalid type from bootstrap connection %1%") % static_cast<uint8_t> (header.type));
}
break;
}
}
}
}
else
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Error while receiving type: %1%") % ec.message ());
}
}
}
void nano::bootstrap_server::receive_bulk_pull_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::bulk_pull> request (new nano::bulk_pull (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received bulk pull for %1% down to %2%, maximum of %3%") % request->start.to_string () % request->end.to_string () % (request->count ? request->count : std::numeric_limits<double>::infinity ()));
}
add_request (std::unique_ptr<nano::message> (request.release ()));
receive ();
}
}
}
void nano::bootstrap_server::receive_bulk_pull_account_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
assert (size_a == (sizeof (nano::uint256_union) + sizeof (nano::uint128_union) + sizeof (uint8_t)));
nano::bufferstream stream (receive_buffer->data (), size_a);
std::unique_ptr<nano::bulk_pull_account> request (new nano::bulk_pull_account (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received bulk pull account for %1% with a minimum amount of %2%") % request->account.to_account () % nano::amount (request->minimum_amount).format_balance (nano::Mxrb_ratio, 10, true));
}
add_request (std::unique_ptr<nano::message> (request.release ()));
receive ();
}
}
}
void nano::bootstrap_server::receive_bulk_pull_blocks_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), sizeof (nano::uint256_union) + sizeof (nano::uint256_union) + sizeof (bulk_pull_blocks_mode) + sizeof (uint32_t));
std::unique_ptr<nano::bulk_pull_blocks> request (new nano::bulk_pull_blocks (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received deprecated bulk pull blocks for %1% to %2%") % request->min_hash.to_string () % request->max_hash.to_string ());
}
add_request (std::unique_ptr<nano::message> (request.release ()));
receive ();
}
}
}
void nano::bootstrap_server::receive_frontier_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a)
{
if (!ec)
{
auto error (false);
nano::bufferstream stream (receive_buffer->data (), sizeof (nano::uint256_union) + sizeof (uint32_t) + sizeof (uint32_t));
std::unique_ptr<nano::frontier_req> request (new nano::frontier_req (error, stream, header_a));
if (!error)
{
if (node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Received frontier request for %1% with age %2%") % request->start.to_string () % request->age);
}
add_request (std::unique_ptr<nano::message> (request.release ()));
receive ();
}
}
else
{
if (node->config.logging.network_logging ())
{
BOOST_LOG (node->log) << boost::str (boost::format ("Error sending receiving frontier request: %1%") % ec.message ());
}
}
}
void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto start (requests.empty ());
requests.push (std::move (message_a));
if (start)
{
run_next ();
}
}
void nano::bootstrap_server::finish_request ()
{
std::lock_guard<std::mutex> lock (mutex);
requests.pop ();
if (!requests.empty ())
{
run_next ();
}
}
namespace
{
class request_response_visitor : public nano::message_visitor
{
public:
request_response_visitor (std::shared_ptr<nano::bootstrap_server> connection_a) :
connection (connection_a)
{
}
virtual ~request_response_visitor () = default;
void keepalive (nano::keepalive const &) override
{
assert (false);
}
void publish (nano::publish const &) override
{
assert (false);
}
void confirm_req (nano::confirm_req const &) override
{
assert (false);
}
void confirm_ack (nano::confirm_ack const &) override
{
assert (false);
}
void bulk_pull (nano::bulk_pull const &) override
{
auto response (std::make_shared<nano::bulk_pull_server> (connection, std::unique_ptr<nano::bulk_pull> (static_cast<nano::bulk_pull *> (connection->requests.front ().release ()))));
response->send_next ();
}
void bulk_pull_account (nano::bulk_pull_account const &) override
{
auto response (std::make_shared<nano::bulk_pull_account_server> (connection, std::unique_ptr<nano::bulk_pull_account> (static_cast<nano::bulk_pull_account *> (connection->requests.front ().release ()))));
response->send_frontier ();
}
void bulk_pull_blocks (nano::bulk_pull_blocks const &) override
{
auto response (std::make_shared<nano::bulk_pull_blocks_server> (connection, std::unique_ptr<nano::bulk_pull_blocks> (static_cast<nano::bulk_pull_blocks *> (connection->requests.front ().release ()))));
response->send_next ();
}
void bulk_push (nano::bulk_push const &) override
{
auto response (std::make_shared<nano::bulk_push_server> (connection));
response->receive ();
}
void frontier_req (nano::frontier_req const &) override
{
auto response (std::make_shared<nano::frontier_req_server> (connection, std::unique_ptr<nano::frontier_req> (static_cast<nano::frontier_req *> (connection->requests.front ().release ()))));
response->send_next ();
}
void node_id_handshake (nano::node_id_handshake const &) override
{
assert (false);
}
std::shared_ptr<nano::bootstrap_server> connection;
};
}
void nano::bootstrap_server::run_next ()
{
assert (!requests.empty ());
request_response_visitor visitor (shared_from_this ());
requests.front ()->visit (visitor);
}
/**
* Handle a request for the pull of all blocks associated with an account
* The account is supplied as the "start" member, and the final block to
* send is the "end" member. The "start" member may also be a block
* hash, in which case the that hash is used as the start of a chain
* to send. To determine if "start" is interpretted as an account or
* hash, the ledger is checked to see if the block specified exists,
* if not then it is interpretted as an account.
*
* Additionally, if "start" is specified as a block hash the range
* is inclusive of that block hash, that is the range will be:
* [start, end); In the case that a block hash is not specified the
* range will be exclusive of the frontier for that account with
* a range of (frontier, end)
*/
void nano::bulk_pull_server::set_current_end ()
{
include_start = false;
assert (request != nullptr);
auto transaction (connection->node->store.tx_begin_read ());
if (!connection->node->store.block_exists (transaction, request->end))
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull end block doesn't exist: %1%, sending everything") % request->end.to_string ());
}
request->end.clear ();
}
if (connection->node->store.block_exists (transaction, request->start))
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull request for block hash: %1%") % request->start.to_string ());
}
current = request->start;
include_start = true;
}
else
{
nano::account_info info;
auto no_address (connection->node->store.account_get (transaction, request->start, info));
if (no_address)
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Request for unknown account: %1%") % request->start.to_account ());
}
current = request->end;
}
else
{
current = info.head;
if (!request->end.is_zero ())
{
auto account (connection->node->ledger.account (transaction, request->end));
if (account != request->start)
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Request for block that is not on account chain: %1% not on %2%") % request->end.to_string () % request->start.to_account ());
}
current = request->end;
}
}
}
}
sent_count = 0;
if (request->is_count_present ())
{
max_count = request->count;
}
else
{
max_count = 0;
}
}
void nano::bulk_pull_server::send_next ()
{
auto block (get_next ());
if (block != nullptr)
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
nano::serialize_block (stream, *block);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending block: %1%") % block->hash ().to_string ());
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
std::shared_ptr<nano::block> nano::bulk_pull_server::get_next ()
{
std::shared_ptr<nano::block> result;
bool send_current = false, set_current_to_end = false;
/*
* Determine if we should reply with a block
*
* If our cursor is on the final block, we should signal that we
* are done by returning a null result.
*
* Unless we are including the "start" member and this is the
* start member, then include it anyway.
*/
if (current != request->end)
{
send_current = true;
}
else if (current == request->end && include_start == true)
{
send_current = true;
/*
* We also need to ensure that the next time
* are invoked that we return a null result
*/
set_current_to_end = true;
}
/*
* Account for how many blocks we have provided. If this
* exceeds the requested maximum, return an empty object
* to signal the end of results
*/
if (max_count != 0 && sent_count >= max_count)
{
send_current = false;
}
if (send_current)
{
auto transaction (connection->node->store.tx_begin_read ());
result = connection->node->store.block_get (transaction, current);
if (result != nullptr && set_current_to_end == false)
{
auto previous (result->previous ());
if (!previous.is_zero ())
{
current = previous;
}
else
{
current = request->end;
}
}
else
{
current = request->end;
}
sent_count++;
}
/*
* Once we have processed "get_next()" once our cursor is no longer on
* the "start" member, so this flag is not relevant is always false.
*/
include_start = false;
return result;
}
void nano::bulk_pull_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ());
}
}
}
void nano::bulk_pull_server::send_finished ()
{
send_buffer->clear ();
send_buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block));
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk sending finished";
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void nano::bulk_pull_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 1);
connection->finish_request ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Unable to send not-a-block";
}
}
}
nano::bulk_pull_server::bulk_pull_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull> request_a) :
connection (connection_a),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ())
{
set_current_end ();
}
/**
* Bulk pull blocks related to an account
*/
void nano::bulk_pull_account_server::set_params ()
{
assert (request != nullptr);
/*
* Parse the flags
*/
invalid_request = false;
pending_include_address = false;
pending_address_only = false;
if (request->flags == nano::bulk_pull_account_flags::pending_address_only)
{
pending_address_only = true;
}
else if (request->flags == nano::bulk_pull_account_flags::pending_hash_amount_and_address)
{
/**
** This is the same as "pending_hash_and_amount" but with the
** sending address appended, for UI purposes mainly.
**/
pending_include_address = true;
}
else if (request->flags == nano::bulk_pull_account_flags::pending_hash_and_amount)
{
/** The defaults are set above **/
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Invalid bulk_pull_account flags supplied %1%") % static_cast<uint8_t> (request->flags));
}
invalid_request = true;
return;
}
/*
* Initialize the current item from the requested account
*/
current_key.account = request->account;
current_key.hash = 0;
}
void nano::bulk_pull_account_server::send_frontier ()
{
/*
* This function is really the entry point into this class,
* so handle the invalid_request case by terminating the
* request without any response
*/
if (invalid_request)
{
connection->finish_request ();
return;
}
/*
* Supply the account frontier
*/
/**
** Establish a database transaction
**/
auto stream_transaction (connection->node->store.tx_begin_read ());
/**
** Get account balance and frontier block hash
**/
auto account_frontier_hash (connection->node->ledger.latest (stream_transaction, request->account));
auto account_frontier_balance_int (connection->node->ledger.account_balance (stream_transaction, request->account));
nano::uint128_union account_frontier_balance (account_frontier_balance_int);
/**
** Write the frontier block hash and balance into a buffer
**/
send_buffer->clear ();
{
nano::vectorstream output_stream (*send_buffer);
write (output_stream, account_frontier_hash.bytes);
write (output_stream, account_frontier_balance.bytes);
}
/**
** Send the buffer to the requestor
**/
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
void nano::bulk_pull_account_server::send_next_block ()
{
/*
* Get the next item from the queue, it is a tuple with the key (which
* contains the account and hash) and data (which contains the amount)
*/
auto block_data (get_next ());
auto block_info_key (block_data.first.get ());
auto block_info (block_data.second.get ());
if (block_info_key != nullptr)
{
/*
* If we have a new item, emit it to the socket
*/
send_buffer->clear ();
if (pending_address_only)
{
nano::vectorstream output_stream (*send_buffer);
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending address: %1%") % block_info->source.to_string ());
}
write (output_stream, block_info->source.bytes);
}
else
{
nano::vectorstream output_stream (*send_buffer);
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending block: %1%") % block_info_key->hash.to_string ());
}
write (output_stream, block_info_key->hash.bytes);
write (output_stream, block_info->amount.bytes);
if (pending_include_address)
{
/**
** Write the source address as well, if requested
**/
write (output_stream, block_info->source.bytes);
}
}
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
/*
* Otherwise, finalize the connection
*/
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Done sending blocks"));
}
send_finished ();
}
}
std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> nano::bulk_pull_account_server::get_next ()
{
std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> result;
while (true)
{
/*
* For each iteration of this loop, establish and then
* destroy a database transaction, to avoid locking the
* database for a prolonged period.
*/
auto stream_transaction (connection->node->store.tx_begin_read ());
auto stream (connection->node->store.pending_begin (stream_transaction, current_key));
if (stream == nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr))
{
break;
}
nano::pending_key key (stream->first);
nano::pending_info info (stream->second);
/*
* Get the key for the next value, to use in the next call or iteration
*/
current_key.account = key.account;
current_key.hash = key.hash.number () + 1;
/*
* Finish up if the response is for a different account
*/
if (key.account != request->account)
{
break;
}
/*
* Skip entries where the amount is less than the requested
* minimum
*/
if (info.amount < request->minimum_amount)
{
continue;
}
/*
* If the pending_address_only flag is set, de-duplicate the
* responses. The responses are the address of the sender,
* so they are are part of the pending table's information
* and not key, so we have to de-duplicate them manually.
*/
if (pending_address_only)
{
if (!deduplication.insert (info.source).second)
{
/*
* If the deduplication map gets too
* large, clear it out. This may
* result in some duplicates getting
* sent to the client, but we do not
* want to commit too much memory
*/
if (deduplication.size () > 4096)
{
deduplication.clear ();
}
continue;
}
}
result.first = std::unique_ptr<nano::pending_key> (new nano::pending_key (key));
result.second = std::unique_ptr<nano::pending_info> (new nano::pending_info (info));
break;
}
return result;
}
void nano::bulk_pull_account_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
send_next_block ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ());
}
}
}
void nano::bulk_pull_account_server::send_finished ()
{
/*
* The "bulk_pull_account" final sequence is a final block of all
* zeros. If we are sending only account public keys (with the
* "pending_address_only" flag) then it will be 256-bits of zeros,
* otherwise it will be either 384-bits of zeros (if the
* "pending_include_address" flag is not set) or 640-bits of zeros
* (if that flag is set).
*/
send_buffer->clear ();
{
nano::vectorstream output_stream (*send_buffer);
nano::uint256_union account_zero (0);
nano::uint128_union balance_zero (0);
write (output_stream, account_zero.bytes);
if (!pending_address_only)
{
write (output_stream, balance_zero.bytes);
if (pending_include_address)
{
write (output_stream, account_zero.bytes);
}
}
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Bulk sending for an account finished";
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->complete (ec, size_a);
});
}
void nano::bulk_pull_account_server::complete (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
if (pending_address_only)
{
assert (size_a == 32);
}
else
{
if (pending_include_address)
{
assert (size_a == 80);
}
else
{
assert (size_a == 48);
}
}
connection->finish_request ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Unable to pending-as-zero";
}
}
}
nano::bulk_pull_account_server::bulk_pull_account_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull_account> request_a) :
connection (connection_a),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ()),
current_key (0, 0)
{
/*
* Setup the streaming response for the first call to "send_frontier" and "send_next_block"
*/
set_params ();
}
/**
* DEPRECATED
*/
void nano::bulk_pull_blocks_server::set_params ()
{
assert (request != nullptr);
}
void nano::bulk_pull_blocks_server::send_next ()
{
send_finished ();
}
void nano::bulk_pull_blocks_server::send_finished ()
{
send_buffer->clear ();
send_buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block));
auto this_l (shared_from_this ());
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void nano::bulk_pull_blocks_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
assert (size_a == 1);
connection->finish_request ();
}
}
nano::bulk_pull_blocks_server::bulk_pull_blocks_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull_blocks> request_a) :
connection (connection_a),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ())
{
set_params ();
}
nano::bulk_push_server::bulk_push_server (std::shared_ptr<nano::bootstrap_server> const & connection_a) :
receive_buffer (std::make_shared<std::vector<uint8_t>> ()),
connection (connection_a)
{
receive_buffer->resize (256);
}
void nano::bulk_push_server::receive ()
{
if (connection->node->bootstrap_initiator.in_progress ())
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Aborting bulk_push because a bootstrap attempt is in progress";
}
}
else
{
auto this_l (shared_from_this ());
connection->socket->async_read (receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) {
if (!ec)
{
this_l->received_type ();
}
else
{
if (this_l->connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type: %1%") % ec.message ());
}
}
});
}
}
void nano::bulk_push_server::received_type ()
{
auto this_l (shared_from_this ());
nano::block_type type (static_cast<nano::block_type> (receive_buffer->data ()[0]));
switch (type)
{
case nano::block_type::send:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::send, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::receive:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::receive, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::open:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::open, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::change:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::change, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::state:
{
connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::state_block, nano::stat::dir::in);
connection->socket->async_read (receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) {
this_l->received_block (ec, size_a, type);
});
break;
}
case nano::block_type::not_a_block:
{
connection->finish_request ();
break;
}
default:
{
if (connection->node->config.logging.network_packet_logging ())
{
BOOST_LOG (connection->node->log) << "Unknown type received as block type";
}
break;
}
}
}
void nano::bulk_push_server::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a)
{
if (!ec)
{
nano::bufferstream stream (receive_buffer->data (), size_a);
auto block (nano::deserialize_block (stream, type_a));
if (block != nullptr && !nano::work_validate (*block))
{
connection->node->process_active (std::move (block));
receive ();
}
else
{
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request";
}
}
}
}
nano::frontier_req_server::frontier_req_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::frontier_req> request_a) :
connection (connection_a),
current (request_a->start.number () - 1),
frontier (0),
request (std::move (request_a)),
send_buffer (std::make_shared<std::vector<uint8_t>> ()),
count (0)
{
next ();
}
void nano::frontier_req_server::send_next ()
{
if (!current.is_zero () && count <= request->count)
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
write (stream, current.bytes);
write (stream, frontier.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.bulk_pull_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending frontier for %1% %2%") % current.to_account () % frontier.to_string ());
}
next ();
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->sent_action (ec, size_a);
});
}
else
{
send_finished ();
}
}
void nano::frontier_req_server::send_finished ()
{
{
send_buffer->clear ();
nano::vectorstream stream (*send_buffer);
nano::uint256_union zero (0);
write (stream, zero.bytes);
write (stream, zero.bytes);
}
auto this_l (shared_from_this ());
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << "Frontier sending finished";
}
connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) {
this_l->no_block_sent (ec, size_a);
});
}
void nano::frontier_req_server::no_block_sent (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
connection->finish_request ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier finish: %1%") % ec.message ());
}
}
}
void nano::frontier_req_server::sent_action (boost::system::error_code const & ec, size_t size_a)
{
if (!ec)
{
count++;
send_next ();
}
else
{
if (connection->node->config.logging.network_logging ())
{
BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier pair: %1%") % ec.message ());
}
}
}
void nano::frontier_req_server::next ()
{
// Filling accounts deque to prevent often read transactions
if (accounts.empty ())
{
auto now (nano::seconds_since_epoch ());
bool skip_old (request->age != std::numeric_limits<decltype (request->age)>::max ());
size_t max_size (128);
auto transaction (connection->node->store.tx_begin_read ());
for (auto i (connection->node->store.latest_begin (transaction, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i)
{
nano::account_info info (i->second);
if (!skip_old || (now - info.modified) <= request->age)
{
accounts.push_back (std::make_pair (nano::account (i->first), info.head));
}
}
/* If loop breaks before max_size, then latest_end () is reached
Add empty record to finish frontier_req_server */
if (accounts.size () != max_size)
{
accounts.push_back (std::make_pair (nano::account (0), nano::block_hash (0)));
}
}
// Retrieving accounts from deque
auto account_pair (accounts.front ());
accounts.pop_front ();
current = account_pair.first;
frontier = account_pair.second;
}
| 1 | 14,740 | Are these forward declarations needed if we `#include <node/common.hpp>` which includes the definition ? | nanocurrency-nano-node | cpp |
@@ -76,7 +76,7 @@ func (m *Message) Bytes() []byte {
}
func (m *Message) String() string {
- return fmt.Sprintf("MESSAGE: connectid %v MessageType %s", m.ConnectID, m.MessageType)
+ return fmt.Sprintf("MESSAGE: ConnectID %v MessageType %s", m.ConnectID, m.MessageType)
}
func ReadMessageFromTunnel(r io.Reader) (*Message, error) { | 1 | /*
Copyright 2020 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stream
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"k8s.io/klog/v2"
)
type MessageType uint64
const (
MessageTypeLogsConnect MessageType = iota
MessageTypeExecConnect
MessageTypeMetricConnect
MessageTypeData
MessageTypeRemoveConnect
)
func (m MessageType) String() string {
switch m {
case MessageTypeLogsConnect:
return "LOGS_CONNECT"
case MessageTypeExecConnect:
return "EXEC_CONNECT"
case MessageTypeMetricConnect:
return "METRIC_CONNECT"
case MessageTypeData:
return "DATA"
case MessageTypeRemoveConnect:
return "REMOVE_CONNECT"
}
return "UNKNOWN"
}
type Message struct {
// ConnectID indicate the apiserver connection id
ConnectID uint64
MessageType MessageType
Data []byte
}
func NewMessage(id uint64, messType MessageType, data []byte) *Message {
return &Message{
ConnectID: id,
MessageType: messType,
Data: data,
}
}
func (m *Message) Bytes() []byte {
// connectID + MessageType + Data
buf, offset := make([]byte, 16), 0
offset += binary.PutUvarint(buf[offset:], m.ConnectID)
offset += binary.PutUvarint(buf[offset:], uint64(m.MessageType))
return append(buf[0:offset], m.Data...)
}
func (m *Message) String() string {
return fmt.Sprintf("MESSAGE: connectid %v MessageType %s", m.ConnectID, m.MessageType)
}
func ReadMessageFromTunnel(r io.Reader) (*Message, error) {
buf := bufio.NewReader(r)
connectID, err := binary.ReadUvarint(buf)
if err != nil {
return nil, err
}
messageType, err := binary.ReadUvarint(buf)
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(buf)
if err != nil {
return nil, err
}
klog.V(6).Infof("Receive Tunnel message Connectid %d messageType %s data:%v string:[%v]",
connectID, MessageType(messageType), data, string(data))
return &Message{
ConnectID: connectID,
MessageType: MessageType(messageType),
Data: data,
}, nil
}
| 1 | 22,494 | can you use either `connectID` or `ConnectID` i would preffer `connectID` | kubeedge-kubeedge | go |
@@ -112,6 +112,11 @@ class Shopware6ChannelFormModel
*/
public array $customField = [];
+ /**
+ * @var array|null
+ */
+ public ?array $crossSelling = [];
+
public function __construct(Shopware6Channel $channel = null)
{
if ($channel) { | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\ExporterShopware6\Application\Model;
use Ergonode\Core\Infrastructure\Validator\Constraint as CoreAssert;
use Ergonode\ExporterShopware6\Application\Model\Type\CustomFieldAttributeModel;
use Ergonode\ExporterShopware6\Application\Model\Type\PropertyGroupAttributeModel;
use Ergonode\ExporterShopware6\Domain\Entity\Shopware6Channel;
use Ergonode\SharedKernel\Domain\Aggregate\AttributeId;
use Symfony\Component\Validator\Constraints as Assert;
class Shopware6ChannelFormModel
{
/**
* @Assert\NotBlank()
* @Assert\Length(min=2)
*/
public ?string $name = null;
/**
* @Assert\NotBlank()
* @Assert\Url()
*/
public ?string $host = null;
/**
* @Assert\NotBlank()
* @Assert\Length(min=2)
*/
public ?string $clientId = null;
/**
* @Assert\NotBlank()
* @Assert\Length(min=2)
*/
public ?string $clientKey = null;
public ?string $segment = null;
/**
* @Assert\NotBlank(),
*
* @CoreAssert\LanguageCodeExists()
* @CoreAssert\LanguageCodeActive()
*/
public ?string $defaultLanguage = null;
/**
* @var array|null
*
* @Assert\All({
*
* @CoreAssert\LanguageCodeExists(),
* @CoreAssert\LanguageCodeActive()
* })
*/
public ?array $languages = [];
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductName = null;
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductActive = null;
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductStock = null;
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductPriceGross = null;
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductPriceNet = null;
/**
* @Assert\NotNull()
*/
public ?AttributeId $attributeProductTax = null;
public ?AttributeId $attributeProductDescription = null;
public ?AttributeId $attributeProductGallery = null;
public ?string $categoryTree = null;
/**
* @var PropertyGroupAttributeModel[]
*
* @Assert\Valid()
*/
public array $propertyGroup = [];
/**
* @var PropertyGroupAttributeModel[]
*
* @Assert\Valid()
*/
public array $customField = [];
public function __construct(Shopware6Channel $channel = null)
{
if ($channel) {
$this->name = $channel->getName();
$this->host = $channel->getHost();
$this->clientId = $channel->getClientId();
$this->clientKey = $channel->getClientKey();
$this->segment = $channel->getSegment() ? $channel->getSegment()->getValue() : null;
$this->defaultLanguage = $channel->getDefaultLanguage()->getCode();
$this->languages = $channel->getLanguages();
$this->attributeProductName = $channel->getAttributeProductName();
$this->attributeProductActive = $channel->getAttributeProductActive();
$this->attributeProductStock = $channel->getAttributeProductStock();
$this->attributeProductPriceGross = $channel->getAttributeProductPriceGross();
$this->attributeProductPriceNet = $channel->getAttributeProductPriceNet();
$this->attributeProductTax = $channel->getAttributeProductTax();
$this->attributeProductDescription = $channel->getAttributeProductDescription();
$this->attributeProductGallery = $channel->getAttributeProductGallery();
$this->categoryTree = $channel->getCategoryTree() ? $channel->getCategoryTree()->getValue() : null;
foreach ($channel->getPropertyGroup() as $attributeId) {
$this->propertyGroup[] = new PropertyGroupAttributeModel($attributeId->getValue());
}
foreach ($channel->getCustomField() as $attributeId) {
$this->customField[] = new CustomFieldAttributeModel($attributeId->getValue());
}
}
}
}
| 1 | 9,230 | It seems like it cannot be null? | ergonode-backend | php |
@@ -19,6 +19,7 @@ package azkaban.executor;
public class ExecutionReference {
private final int execId;
private Executor executor;
+ //Todo jamiesjc: deprecate updateTime in ExecutionReference class gradually.
private long updateTime;
private long nextCheckTime = -1;
private int numErrors = 0; | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
public class ExecutionReference {
private final int execId;
private Executor executor;
private long updateTime;
private long nextCheckTime = -1;
private int numErrors = 0;
public ExecutionReference(int execId) {
this.execId = execId;
}
public ExecutionReference(int execId, Executor executor) {
if (executor == null) {
throw new IllegalArgumentException(String.format(
"Executor cannot be null for exec id: %d ExecutionReference", execId));
}
this.execId = execId;
this.executor = executor;
}
public void setUpdateTime(long updateTime) {
this.updateTime = updateTime;
}
public void setNextCheckTime(long nextCheckTime) {
this.nextCheckTime = nextCheckTime;
}
public long getUpdateTime() {
return updateTime;
}
public long getNextCheckTime() {
return nextCheckTime;
}
public int getExecId() {
return execId;
}
public String getHost() {
return executor.getHost();
}
public int getPort() {
return executor.getPort();
}
public int getNumErrors() {
return numErrors;
}
public void setNumErrors(int numErrors) {
this.numErrors = numErrors;
}
public void setExecutor(Executor executor) {
this.executor = executor;
}
public Executor getExecutor() {
return executor;
}
} | 1 | 13,578 | should we consider add deprecated annotation here? | azkaban-azkaban | java |
@@ -27,8 +27,8 @@ import (
"golang.org/x/net/context"
)
-type createUserFn func(t testing.TB, ith int, config *libkbfs.ConfigLocal,
- opTimeout time.Duration) *fsUser
+type createUserFn func(t testing.TB, ith int, name libkb.NormalizedUsername,
+ config *libkbfs.ConfigLocal, opTimeout time.Duration) *fsUser
type fsEngine struct {
name string | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
// Without any build tags the tests are run on libkbfs directly.
// With the tag dokan all tests are run through a dokan filesystem.
// With the tag fuse all tests are run through a fuse filesystem.
// Note that fuse cannot be compiled on Windows and Dokan can only
// be compiled on Windows.
package test
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"golang.org/x/net/context"
)
type createUserFn func(t testing.TB, ith int, config *libkbfs.ConfigLocal,
opTimeout time.Duration) *fsUser
type fsEngine struct {
name string
t testing.TB
createUser createUserFn
// journal directory
journalDir string
}
type fsNode struct {
path string
}
type fsUser struct {
mntDir string
username libkb.NormalizedUsername
config *libkbfs.ConfigLocal
cancel func()
close func()
}
// It's important that this be called, even on error paths, as it may
// do unmounts and release locks.
func (u *fsUser) shutdown() {
u.cancel()
u.close()
}
// Perform Init for the engine
func (*fsEngine) Init() {}
// Name returns the name of the Engine.
func (e *fsEngine) Name() string {
return e.name
}
// GetUID is called by the test harness to retrieve a user instance's UID.
func (e *fsEngine) GetUID(user User) keybase1.UID {
u := user.(*fsUser)
_, uid, err := u.config.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
e.t.Fatalf("GetUID: GetCurrentUserInfo failed with %v", err)
}
return uid
}
func buildRootPath(u *fsUser, isPublic bool) string {
var path string
if isPublic {
// TODO: Consolidate all "public" and "private"
// constants in libkbfs.
path = filepath.Join(u.mntDir, "public")
} else {
path = filepath.Join(u.mntDir, "private")
}
return path
}
func buildTlfPath(u *fsUser, tlfName string, isPublic bool) string {
return filepath.Join(buildRootPath(u, isPublic), tlfName)
}
func (e *fsEngine) GetFavorites(user User, public bool) (map[string]bool, error) {
u := user.(*fsUser)
path := buildRootPath(u, public)
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
fis, err := f.Readdir(-1)
if err != nil {
return nil, fmt.Errorf("Readdir on %v failed: %q", f, err.Error())
}
favorites := make(map[string]bool)
for _, fi := range fis {
favorites[fi.Name()] = true
}
return favorites, nil
}
// GetRootDir implements the Engine interface.
func (e *fsEngine) GetRootDir(user User, tlfName string, isPublic bool, expectedCanonicalTlfName string) (dir Node, err error) {
u := user.(*fsUser)
preferredName, err := libkbfs.FavoriteNameToPreferredTLFNameFormatAs(u.username,
libkbfs.CanonicalTlfName(tlfName))
if err != nil {
return nil, err
}
expectedPreferredName, err := libkbfs.FavoriteNameToPreferredTLFNameFormatAs(u.username,
libkbfs.CanonicalTlfName(expectedCanonicalTlfName))
if err != nil {
return nil, err
}
path := buildTlfPath(u, tlfName, isPublic)
var realPath string
// TODO currently we pretend that Dokan has no symbolic links
// here and end up deferencing them. This works but is not
// ideal. (See Lookup.)
if preferredName == expectedPreferredName || e.name == "dokan" {
realPath = path
} else {
realPath, err = filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
realName := filepath.Base(realPath)
if realName != string(expectedPreferredName) {
return nil, fmt.Errorf(
"Expected preferred TLF name %s, got %s",
expectedPreferredName, realName)
}
}
return fsNode{realPath}, nil
}
// CreateDir is called by the test harness to create a directory relative to the passed
// parent directory for the given user.
func (*fsEngine) CreateDir(u User, parentDir Node, name string) (dir Node, err error) {
p := parentDir.(fsNode)
path := filepath.Join(p.path, name)
err = os.Mkdir(path, 0755)
if err != nil {
return nil, err
}
return fsNode{path}, nil
}
// CreateFile is called by the test harness to create a file in the given directory as
// the given user.
func (*fsEngine) CreateFile(u User, parentDir Node, name string) (file Node, err error) {
p := parentDir.(fsNode)
path := filepath.Join(p.path, name)
f, err := os.Create(path)
if err != nil {
return nil, err
}
f.Close()
return fsNode{path}, nil
}
// CreateFileExcl is called by the test harness to exclusively create a file in
// the given directory as the given user. The file is created with
// O_RDWR|O_CREATE|O_EXCL.
func (*fsEngine) CreateFileExcl(u User, parentDir Node, name string) (file Node, err error) {
p := parentDir.(fsNode).path
f, err := os.OpenFile(filepath.Join(p, name), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return nil, err
}
f.Close()
return fsNode{p}, nil
}
// WriteFile is called by the test harness to write to the given file as the given user.
func (*fsEngine) WriteFile(u User, file Node, data []byte, off int64, sync bool) (err error) {
n := file.(fsNode)
f, err := os.OpenFile(n.path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.Seek(off, 0)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
return err
}
if !sync {
return nil
}
return f.Sync()
}
// TruncateFile is called by the test harness to truncate the given file as the given user to the given size.
func (*fsEngine) TruncateFile(u User, file Node, size uint64, sync bool) (err error) {
n := file.(fsNode)
f, err := os.OpenFile(n.path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
err = f.Truncate(int64(size))
if err != nil {
return err
}
if !sync {
return nil
}
return f.Sync()
}
// RemoveDir is called by the test harness as the given user to remove a subdirectory.
func (*fsEngine) RemoveDir(u User, dir Node, name string) (err error) {
n := dir.(fsNode)
return os.Remove(filepath.Join(n.path, name))
}
// RemoveEntry is called by the test harness as the given user to remove a directory entry.
func (*fsEngine) RemoveEntry(u User, dir Node, name string) (err error) {
n := dir.(fsNode)
return os.Remove(filepath.Join(n.path, name))
}
// Rename is called by the test harness as the given user to rename a node.
func (*fsEngine) Rename(u User, srcDir Node, srcName string, dstDir Node, dstName string) (err error) {
snode := srcDir.(fsNode)
dnode := dstDir.(fsNode)
return os.Rename(
filepath.Join(snode.path, srcName),
filepath.Join(dnode.path, dstName))
}
// ReadFile is called by the test harness to read from the given file as the given user.
func (e *fsEngine) ReadFile(u User, file Node, off int64, bs []byte) (int, error) {
n := file.(fsNode)
f, err := os.Open(n.path)
if err != nil {
return 0, err
}
defer f.Close()
return io.ReadFull(io.NewSectionReader(f, off, int64(len(bs))), bs)
}
// GetDirChildrenTypes is called by the test harness as the given user to return a map of child nodes
// and their type names.
func (*fsEngine) GetDirChildrenTypes(u User, parentDir Node) (children map[string]string, err error) {
n := parentDir.(fsNode)
f, err := os.Open(n.path)
if err != nil {
return nil, err
}
defer f.Close()
fis, err := f.Readdir(-1)
if err != nil {
return nil, fmt.Errorf("Readdir on %v failed: %q", f, err.Error())
}
children = map[string]string{}
for _, fi := range fis {
children[fi.Name()] = fiTypeString(fi)
}
return children, nil
}
func (*fsEngine) DisableUpdatesForTesting(user User, tlfName string, isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.DisableUpdatesFileName),
[]byte("off"), 0644)
}
// MakeNaïveStaller implements the Engine interface.
func (*fsEngine) MakeNaïveStaller(u User) *libkbfs.NaïveStaller {
return libkbfs.NewNaïveStaller(u.(*fsUser).config)
}
// ReenableUpdatesForTesting is called by the test harness as the given user to resume updates
// if previously disabled for testing.
func (*fsEngine) ReenableUpdates(user User, tlfName string, isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.EnableUpdatesFileName),
[]byte("on"), 0644)
}
// SyncFromServerForTesting is called by the test harness as the given
// user to actively retrieve new metadata for a folder.
func (e *fsEngine) SyncFromServerForTesting(user User, tlfName string, isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.SyncFromServerFileName),
[]byte("x"), 0644)
}
// ForceQuotaReclamation implements the Engine interface.
func (*fsEngine) ForceQuotaReclamation(user User, tlfName string, isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.ReclaimQuotaFileName),
[]byte("x"), 0644)
}
// AddNewAssertion implements the Engine interface.
func (e *fsEngine) AddNewAssertion(user User, oldAssertion, newAssertion string) error {
u := user.(*fsUser)
return libkbfs.AddNewAssertionForTest(u.config, oldAssertion, newAssertion)
}
// Rekey implements the Engine interface.
func (*fsEngine) Rekey(user User, tlfName string, isPublic bool) error {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.RekeyFileName),
[]byte("x"), 0644)
}
// EnableJournal is called by the test harness as the given user to
// enable journaling.
func (*fsEngine) EnableJournal(user User, tlfName string,
isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.EnableJournalFileName),
[]byte("on"), 0644)
}
// PauseJournal is called by the test harness as the given user to
// pause journaling.
func (*fsEngine) PauseJournal(user User, tlfName string,
isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.PauseJournalBackgroundWorkFileName),
[]byte("on"), 0644)
}
// ResumeJournal is called by the test harness as the given user to
// resume journaling.
func (*fsEngine) ResumeJournal(user User, tlfName string,
isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.ResumeJournalBackgroundWorkFileName),
[]byte("on"), 0644)
}
// FlushJournal is called by the test harness as the given user to
// wait for the journal to flush, if enabled.
func (*fsEngine) FlushJournal(user User, tlfName string,
isPublic bool) (err error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
return ioutil.WriteFile(
filepath.Join(path, libfs.FlushJournalFileName),
[]byte("on"), 0644)
}
func (*fsEngine) UnflushedPaths(user User, tlfName string, isPublic bool) (
[]string, error) {
u := user.(*fsUser)
path := buildTlfPath(u, tlfName, isPublic)
buf, err := ioutil.ReadFile(filepath.Join(path, libfs.StatusFileName))
if err != nil {
return nil, err
}
var bufStatus libkbfs.FolderBranchStatus
err = json.Unmarshal(buf, &bufStatus)
if err != nil {
return nil, err
}
return bufStatus.Journal.UnflushedPaths, nil
}
// Shutdown is called by the test harness when it is done with the
// given user.
func (e *fsEngine) Shutdown(user User) error {
u := user.(*fsUser)
u.shutdown()
// Get the user name before shutting everything down.
var userName libkb.NormalizedUsername
if e.journalDir != "" {
var err error
userName, _, err =
u.config.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
return err
}
}
if err := u.config.Shutdown(); err != nil {
return err
}
if e.journalDir != "" {
// Remove the user journal.
if err := os.RemoveAll(
filepath.Join(e.journalDir, userName.String())); err != nil {
return err
}
// Remove the overall journal dir if it's empty.
if err := os.Remove(e.journalDir); err != nil {
e.t.Logf("Journal dir %s not empty yet", e.journalDir)
}
}
return nil
}
// CreateLink is called by the test harness to create a symlink in the given directory as
// the given user.
func (*fsEngine) CreateLink(u User, parentDir Node, fromName string, toPath string) (err error) {
n := parentDir.(fsNode)
return os.Symlink(toPath, filepath.Join(n.path, fromName))
}
// Lookup is called by the test harness to return a node in the given directory by
// its name for the given user. In the case of a symlink the symPath will be set and
// the node will be nil.
func (e *fsEngine) Lookup(u User, parentDir Node, name string) (file Node, symPath string, err error) {
n := parentDir.(fsNode)
path := filepath.Join(n.path, name)
fi, err := os.Lstat(path)
if err != nil {
return nil, "", err
}
// Return if not a symlink
// TODO currently we pretend that Dokan has no symbolic links
// here and end up deferencing them. This works but is not
// ideal. (See GetRootDir.)
if fi.Mode()&os.ModeSymlink == 0 || e.name == "dokan" {
return fsNode{path}, "", nil
}
symPath, err = os.Readlink(path)
if err != nil {
return nil, "", err
}
return fsNode{path}, symPath, err
}
// SetEx is called by the test harness as the given user to set/unset the executable bit on the
// given file.
func (*fsEngine) SetEx(u User, file Node, ex bool) (err error) {
n := file.(fsNode)
var mode os.FileMode = 0644
if ex {
mode = 0755
}
return os.Chmod(n.path, mode)
}
// SetMtime is called by the test harness as the given user to set the
// mtime on the given file.
func (*fsEngine) SetMtime(u User, file Node, mtime time.Time) (err error) {
n := file.(fsNode)
// KBFS doesn't respect the atime, but we have to give it something
atime := mtime
return os.Chtimes(n.path, atime, mtime)
}
// GetMtime implements the Engine interface.
func (*fsEngine) GetMtime(u User, file Node) (mtime time.Time, err error) {
n := file.(fsNode)
fi, err := os.Lstat(n.path)
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), err
}
func fiTypeString(fi os.FileInfo) string {
m := fi.Mode()
switch {
case m&os.ModeSymlink != 0:
return "SYM"
case m.IsRegular() && m&0100 == 0100:
return "EXEC"
case m.IsRegular():
return "FILE"
case m.IsDir():
return "DIR"
}
return "OTHER"
}
func (e *fsEngine) InitTest(t testing.TB, blockSize int64,
blockChangeSize int64, bwKBps int, opTimeout time.Duration,
users []libkb.NormalizedUsername,
clock libkbfs.Clock, journal bool) map[libkb.NormalizedUsername]User {
e.t = t
res := map[libkb.NormalizedUsername]User{}
initSuccess := false
defer func() {
if !initSuccess {
for _, user := range res {
user.(*fsUser).shutdown()
}
}
}()
if int(opTimeout) > 0 {
// TODO: wrap fs calls in our own timeout-able layer?
t.Logf("Ignoring op timeout for FS test")
}
// create the first user specially
config0 := libkbfs.MakeTestConfigOrBust(t, users...)
config0.SetClock(clock)
setBlockSizes(t, config0, blockSize, blockChangeSize)
maybeSetBw(t, config0, bwKBps)
uids := make([]keybase1.UID, len(users))
cfgs := make([]*libkbfs.ConfigLocal, len(users))
cfgs[0] = config0
uids[0] = nameToUID(t, config0)
for i, name := range users[1:] {
c := libkbfs.ConfigAsUser(config0, name)
c.SetClock(clock)
cfgs[i+1] = c
uids[i+1] = nameToUID(t, c)
}
for i, name := range users {
u := e.createUser(t, i, cfgs[i], opTimeout)
u.username = name
res[name] = u
}
if journal {
jdir, err := ioutil.TempDir(os.TempDir(), "kbfs_journal")
if err != nil {
t.Fatalf("Couldn't enable journaling: %v", err)
}
e.journalDir = jdir
t.Logf("Journal directory: %s", e.journalDir)
for i, c := range cfgs {
c.EnableJournaling(
filepath.Join(jdir, users[i].String()),
libkbfs.TLFJournalBackgroundWorkEnabled)
}
}
initSuccess = true
return res
}
func nameToUID(t testing.TB, config libkbfs.Config) keybase1.UID {
_, uid, err := config.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
return uid
}
| 1 | 14,339 | Why can't the implementor get the username from the config? | keybase-kbfs | go |
@@ -87,7 +87,7 @@ func NewParams(raCtx protocol.RunActionsCtx, execution *action.Execution, stateD
BlockNumber: new(big.Int).SetUint64(raCtx.BlockHeight),
Time: new(big.Int).SetInt64(raCtx.BlockTimeStamp),
Difficulty: new(big.Int).SetUint64(uint64(50)),
- GasLimit: raCtx.ActionGasLimit,
+ GasLimit: execution.GasLimit(),
GasPrice: execution.GasPrice(),
}
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package evm
import (
"context"
"math"
"math/big"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/go-ethereum/common"
"github.com/iotexproject/go-ethereum/core/vm"
"github.com/iotexproject/go-ethereum/params"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/pkg/log"
)
// ErrInconsistentNonce is the error that the nonce is different from executor's nonce
var ErrInconsistentNonce = errors.New("Nonce is not identical to executor nonce")
// CanTransfer checks whether the from account has enough balance
func CanTransfer(db vm.StateDB, fromHash common.Address, balance *big.Int) bool {
return db.GetBalance(fromHash).Cmp(balance) >= 0
}
// MakeTransfer transfers account
func MakeTransfer(db vm.StateDB, fromHash, toHash common.Address, amount *big.Int) {
db.SubBalance(fromHash, amount)
db.AddBalance(toHash, amount)
}
const (
// FailureStatus is the status that contract execution failed
FailureStatus = uint64(0)
// SuccessStatus is the status that contract execution success
SuccessStatus = uint64(1)
)
// Params is the context and parameters
type Params struct {
context vm.Context
nonce uint64
executorRawAddress string
amount *big.Int
contract *common.Address
gas uint64
data []byte
}
// NewParams creates a new context for use in the EVM.
func NewParams(raCtx protocol.RunActionsCtx, execution *action.Execution, stateDB *StateDBAdapter) (*Params, error) {
// If we don't have an explicit author (i.e. not mining), extract from the header
/*
var beneficiary common.Address
if author == nil {
beneficiary, _ = chain.Engine().Author(header) // Ignore error, we're past header validation
} else {
beneficiary = *author
}
*/
executorAddr := common.BytesToAddress(raCtx.Caller.Bytes())
var contractAddrPointer *common.Address
if execution.Contract() != action.EmptyAddress {
contract, err := address.FromString(execution.Contract())
if err != nil {
return nil, errors.Wrap(err, "failed to convert encoded contract address to address")
}
contractAddr := common.BytesToAddress(contract.Bytes())
contractAddrPointer = &contractAddr
}
producer := common.BytesToAddress(raCtx.Producer.Bytes())
context := vm.Context{
CanTransfer: CanTransfer,
Transfer: MakeTransfer,
GetHash: GetHashFn(stateDB),
Origin: executorAddr,
Coinbase: producer,
BlockNumber: new(big.Int).SetUint64(raCtx.BlockHeight),
Time: new(big.Int).SetInt64(raCtx.BlockTimeStamp),
Difficulty: new(big.Int).SetUint64(uint64(50)),
GasLimit: raCtx.ActionGasLimit,
GasPrice: execution.GasPrice(),
}
return &Params{
context,
execution.Nonce(),
raCtx.Caller.String(),
execution.Amount(),
contractAddrPointer,
execution.GasLimit(),
execution.Data(),
}, nil
}
// GetHashFn returns a GetHashFunc which retrieves hashes by number
func GetHashFn(stateDB *StateDBAdapter) func(n uint64) common.Hash {
return func(n uint64) common.Hash {
hash, err := stateDB.cm.GetHashByHeight(stateDB.blockHeight - n)
if err != nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
}
func securityDeposit(ps *Params, stateDB vm.StateDB, gasLimit *uint64) error {
executorNonce := stateDB.GetNonce(ps.context.Origin)
if executorNonce > ps.nonce {
log.S().Errorf("Nonce on %v: %d vs %d", ps.context.Origin, executorNonce, ps.nonce)
// TODO ignore inconsistent nonce problem until the actions are executed sequentially
// return ErrInconsistentNonce
}
if *gasLimit < ps.gas {
return action.ErrHitGasLimit
}
maxGasValue := new(big.Int).Mul(new(big.Int).SetUint64(ps.gas), ps.context.GasPrice)
if stateDB.GetBalance(ps.context.Origin).Cmp(maxGasValue) < 0 {
return action.ErrInsufficientBalanceForGas
}
*gasLimit -= ps.gas
stateDB.SubBalance(ps.context.Origin, maxGasValue)
return nil
}
// ExecuteContract processes a transfer which contains a contract
func ExecuteContract(
ctx context.Context,
sm protocol.StateManager,
execution *action.Execution,
cm protocol.ChainManager,
) (*action.Receipt, error) {
raCtx := protocol.MustGetRunActionsCtx(ctx)
stateDB := NewStateDBAdapter(cm, sm, raCtx.BlockHeight, raCtx.BlockHash, execution.Hash())
ps, err := NewParams(raCtx, execution, stateDB)
if err != nil {
return nil, err
}
retval, depositGas, remainingGas, contractAddress, err := executeInEVM(ps, stateDB, raCtx.GasLimit)
receipt := &action.Receipt{
ReturnValue: retval,
GasConsumed: ps.gas - remainingGas,
ActHash: execution.Hash(),
ContractAddress: contractAddress,
}
if err != nil {
receipt.Status = FailureStatus
} else {
receipt.Status = SuccessStatus
}
if remainingGas > 0 {
*raCtx.GasLimit += remainingGas
remainingValue := new(big.Int).Mul(new(big.Int).SetUint64(remainingGas), ps.context.GasPrice)
stateDB.AddBalance(ps.context.Origin, remainingValue)
}
if depositGas-remainingGas > 0 {
gasValue := new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.context.GasPrice)
if err := rewarding.DepositGas(ctx, sm, gasValue, raCtx.Registry); err != nil {
return nil, err
}
}
if err := stateDB.commitContracts(); err != nil {
return nil, errors.Wrap(err, "failed to commit contracts to underlying db")
}
stateDB.clear()
receipt.Logs = stateDB.Logs()
log.S().Debugf("Receipt: %+v, %v", receipt, err)
return receipt, err
}
func getChainConfig() *params.ChainConfig {
var chainConfig params.ChainConfig
// chainConfig.ChainID
chainConfig.ConstantinopleBlock = new(big.Int).SetUint64(0) // Constantinople switch block (nil = no fork, 0 = already activated)
return &chainConfig
}
func executeInEVM(evmParams *Params, stateDB *StateDBAdapter, gasLimit *uint64) ([]byte, uint64, uint64, string, error) {
remainingGas := evmParams.gas
if err := securityDeposit(evmParams, stateDB, gasLimit); err != nil {
return nil, 0, 0, action.EmptyAddress, err
}
var config vm.Config
chainConfig := getChainConfig()
evm := vm.NewEVM(evmParams.context, stateDB, chainConfig, config)
intriGas, err := intrinsicGas(evmParams.data)
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
if remainingGas < intriGas {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, action.ErrOutOfGas
}
remainingGas -= intriGas
contractRawAddress := action.EmptyAddress
executor := vm.AccountRef(evmParams.context.Origin)
var ret []byte
if evmParams.contract == nil {
// create contract
var evmContractAddress common.Address
ret, evmContractAddress, remainingGas, err = evm.Create(executor, evmParams.data, remainingGas, evmParams.amount)
log.L().Warn("evm Create.", log.Hex("addrHash", evmContractAddress[:]))
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
contractAddress, err := address.FromBytes(evmContractAddress.Bytes())
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
contractRawAddress = contractAddress.String()
} else {
stateDB.SetNonce(evmParams.context.Origin, stateDB.GetNonce(evmParams.context.Origin)+1)
// process contract
ret, remainingGas, err = evm.Call(executor, *evmParams.contract, evmParams.data, remainingGas, evmParams.amount)
}
if err == nil {
err = stateDB.Error()
}
if err == vm.ErrInsufficientBalance {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
refund := (evmParams.gas - remainingGas) / 2
if refund > stateDB.GetRefund() {
refund = stateDB.GetRefund()
}
remainingGas += refund
if err != nil {
// TODO (zhi) should we refund if any error
// return nil, evmParams.gas, 0, contractRawAddress, err
}
// TODO (zhi) figure out what the following function does
// stateDB.Finalise(true)
return ret, evmParams.gas, remainingGas, contractRawAddress, nil
}
// intrinsicGas returns the intrinsic gas of an execution
func intrinsicGas(data []byte) (uint64, error) {
dataSize := uint64(len(data))
if (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
return 0, action.ErrOutOfGas
}
return dataSize*action.ExecutionDataGas + action.ExecutionBaseIntrinsicGas, nil
}
| 1 | 15,260 | @CoderZhi Please confirm if this change makes sense | iotexproject-iotex-core | go |
@@ -127,6 +127,7 @@ public enum JsonRpcError {
PMT_FAILED_INTRINSIC_GAS_EXCEEDS_LIMIT(
-50100,
"Private Marker Transaction failed due to intrinsic gas exeeding the limit. Gas limit used from the Private Transaction."),
+ PRIVATE_SUBSCRIPTION_MULTI_TENANCY_ERROR(-50100, "foo."),
CANT_CONNECT_TO_LOCAL_PEER(-32100, "Cannot add local node as peer."),
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.response;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonFormat(shape = JsonFormat.Shape.OBJECT)
public enum JsonRpcError {
// Standard errors
PARSE_ERROR(-32700, "Parse error"),
INVALID_REQUEST(-32600, "Invalid Request"),
METHOD_NOT_FOUND(-32601, "Method not found"),
INVALID_PARAMS(-32602, "Invalid params"),
INTERNAL_ERROR(-32603, "Internal error"),
METHOD_NOT_ENABLED(-32604, "Method not enabled"),
// eth_sendTransaction specific error message
ETH_SEND_TX_NOT_AVAILABLE(
-32604,
"The method eth_sendTransaction is not supported. Use eth_sendRawTransaction to send a signed transaction to Besu."),
// P2P related errors
P2P_DISABLED(-32000, "P2P has been disabled. This functionality is not available"),
P2P_NETWORK_NOT_RUNNING(-32000, "P2P network is not running"),
// Filter & Subscription Errors
FILTER_NOT_FOUND(-32000, "Filter not found"),
LOGS_FILTER_NOT_FOUND(-32000, "Logs filter not found"),
SUBSCRIPTION_NOT_FOUND(-32000, "Subscription not found"),
NO_MINING_WORK_FOUND(-32000, "No mining work available yet"),
// Transaction validation failures
NONCE_TOO_LOW(-32001, "Nonce too low"),
INVALID_TRANSACTION_SIGNATURE(-32002, "Invalid signature"),
INTRINSIC_GAS_EXCEEDS_LIMIT(-32003, "Intrinsic gas exceeds gas limit"),
TRANSACTION_UPFRONT_COST_EXCEEDS_BALANCE(-32004, "Upfront cost exceeds account balance"),
EXCEEDS_BLOCK_GAS_LIMIT(-32005, "Transaction gas limit exceeds block gas limit"),
INCORRECT_NONCE(-32006, "Incorrect nonce"),
TX_SENDER_NOT_AUTHORIZED(-32007, "Sender account not authorized to send transactions"),
CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE(-32008, "Initial sync is still in progress"),
GAS_PRICE_TOO_LOW(-32009, "Gas price below configured minimum gas price"),
// Miner failures
COINBASE_NOT_SET(-32010, "Coinbase not set. Unable to start mining without a coinbase"),
NO_HASHES_PER_SECOND(-32011, "No hashes being generated by the current node"),
// Wallet errors
COINBASE_NOT_SPECIFIED(-32000, "Coinbase must be explicitly specified"),
// Account errors
NO_ACCOUNT_FOUND(-32000, "Account not found"),
// Worldstate errors
WORLD_STATE_UNAVAILABLE(-32000, "World state unavailable"),
// Debug failures
PARENT_BLOCK_NOT_FOUND(-32000, "Parent block not found"),
// Permissioning/Account whitelist errors
ACCOUNT_WHITELIST_NOT_ENABLED(-32000, "Account whitelisting has not been enabled"),
ACCOUNT_WHITELIST_EMPTY_ENTRY(-32000, "Request contains an empty list of accounts"),
ACCOUNT_WHITELIST_INVALID_ENTRY(-32000, "Request contains an invalid account"),
ACCOUNT_WHITELIST_DUPLICATED_ENTRY(-32000, "Request contains duplicate accounts"),
ACCOUNT_WHITELIST_EXISTING_ENTRY(-32000, "Cannot add an existing account to whitelist"),
ACCOUNT_WHITELIST_ABSENT_ENTRY(-32000, "Cannot remove an absent account from whitelist"),
// Permissioning/Node whitelist errors
NODE_WHITELIST_NOT_ENABLED(-32000, "Node whitelisting has not been enabled"),
NODE_WHITELIST_EMPTY_ENTRY(-32000, "Request contains an empty list of nodes"),
NODE_WHITELIST_INVALID_ENTRY(-32000, "Request contains an invalid node"),
NODE_WHITELIST_DUPLICATED_ENTRY(-32000, "Request contains duplicate nodes"),
NODE_WHITELIST_EXISTING_ENTRY(-32000, "Cannot add an existing node to whitelist"),
NODE_WHITELIST_MISSING_ENTRY(-32000, "Cannot remove an absent node from whitelist"),
NODE_WHITELIST_FIXED_NODE_CANNOT_BE_REMOVED(
-32000, "Cannot remove a fixed node (bootnode or static node) from whitelist"),
// Permissioning/persistence errors
WHITELIST_PERSIST_FAILURE(
-32000, "Unable to persist changes to whitelist configuration file. Changes reverted"),
WHITELIST_FILE_SYNC(
-32000,
"The permissioning whitelist configuration file is out of sync. The changes have been applied, but not persisted to disk"),
WHITELIST_RELOAD_ERROR(
-32000,
"Error reloading permissions file. Please use perm_getAccountsWhitelist and perm_getNodesWhitelist to review the current state of the whitelists"),
PERMISSIONING_NOT_ENABLED(-32000, "Node/Account whitelisting has not been enabled"),
NON_PERMITTED_NODE_CANNOT_BE_ADDED_AS_A_PEER(-32000, "Cannot add a non-permitted node as a peer"),
// Permissioning/Authorization errors
UNAUTHORIZED(-40100, "Unauthorized"),
// Private transaction errors
ENCLAVE_ERROR(-50100, "Error communicating with enclave"),
UNIMPLEMENTED_PRIVATE_TRANSACTION_TYPE(-50100, "Unimplemented private transaction type"),
PRIVACY_NOT_ENABLED(-50100, "Privacy is not enabled"),
CREATE_PRIVACY_GROUP_ERROR(-50100, "Error creating privacy group"),
DELETE_PRIVACY_GROUP_ERROR(-50100, "Error deleting privacy group"),
FIND_PRIVACY_GROUP_ERROR(-50100, "Error finding privacy group"),
FIND_ON_CHAIN_PRIVACY_GROUP_ERROR(-50100, "Error finding on-chain privacy group"),
VALUE_NOT_ZERO(-50100, "We cannot transfer ether in a private transaction yet."),
DECODE_ERROR(-50100, "Unable to decode the private signed raw transaction"),
GET_PRIVATE_TRANSACTION_NONCE_ERROR(-50100, "Unable to determine nonce for account in group."),
OFFCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST(-50100, "Offchain Privacy group does not exist."),
ONCCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST(-50100, "Onchain Privacy group does not exist."),
ONCHAIN_PRIVACY_GROUP_NOT_ENABLED(-50100, "Onchain privacy groups not enabled."),
OFFCHAIN_PRIVACY_GROUP_NOT_ENABLED(
-50100, "Offchain privacy group can't be used with Onchain privacy groups enabled."),
ONCHAIN_PRIVACY_GROUP_ID_NOT_AVAILABLE(
-50100, "Private transactions to on-chain privacy groups must use privacyGroupId"),
PRIVATE_FROM_DOES_NOT_MATCH_ENCLAVE_PUBLIC_KEY(
-50100, "Private from does not match enclave public key"),
PMT_FAILED_INTRINSIC_GAS_EXCEEDS_LIMIT(
-50100,
"Private Marker Transaction failed due to intrinsic gas exeeding the limit. Gas limit used from the Private Transaction."),
CANT_CONNECT_TO_LOCAL_PEER(-32100, "Cannot add local node as peer."),
// Invalid input errors
ENODE_ID_INVALID(
-32000,
"Invalid node ID: node ID must have exactly 128 hexadecimal characters and should not include any '0x' hex prefix."),
// Enclave errors
NODE_MISSING_PEER_URL(-50200, "NodeMissingPeerUrl"),
NODE_PUSHING_TO_PEER(-50200, "NodePushingToPeer"),
NODE_PROPAGATING_TO_ALL_PEERS(-50200, "NodePropagatingToAllPeers"),
NO_SENDER_KEY(-50200, "NoSenderKey"),
INVALID_PAYLOAD(-50200, "InvalidPayload"),
ENCLAVE_CREATE_KEY_PAIR(-50200, "EnclaveCreateKeyPair"),
ENCLAVE_DECODE_PUBLIC_KEY(-50200, "EnclaveDecodePublicKey"),
ENCLAVE_DECRYPT_WRONG_PRIVATE_KEY(-50200, "EnclaveDecryptWrongPrivateKey"),
ENCLAVE_ENCRYPT_COMBINE_KEYS(-50200, "EnclaveEncryptCombineKeys"),
ENCLAVE_MISSING_PRIVATE_KEY_PASSWORD(-50200, "EnclaveMissingPrivateKeyPasswords"),
ENCLAVE_NO_MATCHING_PRIVATE_KEY(-50200, "EnclaveNoMatchingPrivateKey"),
ENCLAVE_NOT_PAYLOAD_OWNER(-50200, "EnclaveNotPayloadOwner"),
ENCLAVE_UNSUPPORTED_PRIVATE_KEY_TYPE(-50200, "EnclaveUnsupportedPrivateKeyType"),
ENCLAVE_STORAGE_DECRYPT(-50200, "EnclaveStorageDecrypt"),
ENCLAVE_PRIVACY_GROUP_CREATION(-50200, "EnclavePrivacyGroupIdCreation"),
ENCLAVE_PAYLOAD_NOT_FOUND(-50200, "EnclavePayloadNotFound"),
CREATE_GROUP_INCLUDE_SELF(-50200, "CreatePrivacyGroupShouldIncludeSelf"),
/** Storing privacy group issue */
ENCLAVE_UNABLE_STORE_PRIVACY_GROUP(-50200, "PrivacyGroupNotStored"),
ENCLAVE_UNABLE_DELETE_PRIVACY_GROUP(-50200, "PrivacyGroupNotDeleted"),
ENCLAVE_UNABLE_PUSH_DELETE_PRIVACY_GROUP(-50200, "PrivacyGroupNotPushed"),
ENCLAVE_PRIVACY_GROUP_MISSING(-50200, "PrivacyGroupNotFound"),
ENCLAVE_PRIVACY_QUERY_ERROR(-50200, "PrivacyGroupQueryError"),
ENCLAVE_KEYS_CANNOT_DECRYPT_PAYLOAD(-50200, "EnclaveKeysCannotDecryptPayload"),
METHOD_UNIMPLEMENTED(-50200, "MethodUnimplemented"),
/** Plugins error */
PLUGIN_NOT_FOUND(-60000, "Plugin not found");
private final int code;
private final String message;
JsonRpcError(final int code, final String message) {
this.code = code;
this.message = message;
}
@JsonGetter("code")
public int getCode() {
return code;
}
@JsonGetter("message")
public String getMessage() {
return message;
}
@JsonCreator
public static JsonRpcError fromJson(
@JsonProperty("code") final int code, @JsonProperty("message") final String message) {
for (final JsonRpcError error : JsonRpcError.values()) {
if (error.code == code && error.message.equals(message)) {
return error;
}
}
return null;
}
}
| 1 | 22,512 | Do you think you can come up with a better message? :-) It looks like this is not used anywhere. So maybe just delete the line. | hyperledger-besu | java |
@@ -1065,11 +1065,7 @@ public interface Traversable<T> extends Foldable<T>, Value<T> {
@Override
default T reduceLeft(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
- if (isEmpty()) {
- throw new NoSuchElementException("reduceLeft on Nil");
- } else {
- return tail().foldLeft(head(), op);
- }
+ return iterator().reduceLeft(op);
}
/** | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/.ɪᴏ
* ᶜᵒᵖʸʳᶦᵍʰᵗ ᵇʸ ᵛᵃᵛʳ ⁻ ˡᶦᶜᵉⁿˢᵉᵈ ᵘⁿᵈᵉʳ ᵗʰᵉ ᵃᵖᵃᶜʰᵉ ˡᶦᶜᵉⁿˢᵉ ᵛᵉʳˢᶦᵒⁿ ᵗʷᵒ ᵈᵒᵗ ᶻᵉʳᵒ
*/
package io.vavr.collection;
import io.vavr.PartialFunction;
import io.vavr.Tuple2;
import io.vavr.Tuple3;
import io.vavr.Value;
import io.vavr.control.Option;
import java.math.BigInteger;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
* An interface for inherently recursive, multi-valued data structures. The order of elements is determined by
* {@link Iterable#iterator()}, which may vary each time it is called.
*
* <p>
* Basic operations:
*
* <ul>
* <li>{@link #collect(PartialFunction)}</li>
* <li>{@link #contains(Object)}</li>
* <li>{@link #containsAll(Iterable)}</li>
* <li>{@link #head()}</li>
* <li>{@link #headOption()}</li>
* <li>{@link #init()}</li>
* <li>{@link #initOption()}</li>
* <li>{@link #isEmpty()}</li>
* <li>{@link #last()}</li>
* <li>{@link #lastOption()}</li>
* <li>{@link #length()}</li>
* <li>{@link #size()}</li>
* <li>{@link #tail()}</li>
* <li>{@link #tailOption()}</li>
* </ul>
*
* Iteration:
*
* <ul>
* <li>{@link #grouped(int)}</li>
* <li>{@link #iterator()}</li>
* <li>{@link #slideBy(Function)}</li>
* <li>{@link #sliding(int)}</li>
* <li>{@link #sliding(int, int)}</li>
* </ul>
*
* Numeric operations:
*
* <ul>
* <li>{@link #average()}</li>
* <li>{@link #max()}</li>
* <li>{@link #maxBy(Comparator)}</li>
* <li>{@link #maxBy(Function)}</li>
* <li>{@link #min()}</li>
* <li>{@link #minBy(Comparator)}</li>
* <li>{@link #minBy(Function)}</li>
* <li>{@link #product()}</li>
* <li>{@link #sum()}</li>
* </ul>
*
* Reduction/Folding:
*
* <ul>
* <li>{@link #count(Predicate)}</li>
* <li>{@link #fold(Object, BiFunction)}</li>
* <li>{@link #foldLeft(Object, BiFunction)}</li>
* <li>{@link #foldRight(Object, BiFunction)}</li>
* <li>{@link #mkString()}</li>
* <li>{@link #mkString(CharSequence)}</li>
* <li>{@link #mkString(CharSequence, CharSequence, CharSequence)}</li>
* <li>{@link #reduce(BiFunction)}</li>
* <li>{@link #reduceOption(BiFunction)}</li>
* <li>{@link #reduceLeft(BiFunction)}</li>
* <li>{@link #reduceLeftOption(BiFunction)}</li>
* <li>{@link #reduceRight(BiFunction)}</li>
* <li>{@link #reduceRightOption(BiFunction)}</li>
* </ul>
*
* Selection:
*
* <ul>
* <li>{@link #drop(int)}</li>
* <li>{@link #dropRight(int)}</li>
* <li>{@link #dropUntil(Predicate)}</li>
* <li>{@link #dropWhile(Predicate)}</li>
* <li>{@link #filter(Predicate)}</li>
* <li>{@link #find(Predicate)}</li>
* <li>{@link #findLast(Predicate)}</li>
* <li>{@link #groupBy(Function)}</li>
* <li>{@link #partition(Predicate)}</li>
* <li>{@link #retainAll(Iterable)}</li>
* <li>{@link #take(int)}</li>
* <li>{@link #takeRight(int)}</li>
* <li>{@link #takeUntil(Predicate)}</li>
* <li>{@link #takeWhile(Predicate)}</li>
* </ul>
*
* Tests:
*
* <ul>
* <li>{@link #existsUnique(Predicate)}</li>
* <li>{@link #hasDefiniteSize()}</li>
* <li>{@link #isDistinct()}</li>
* <li>{@link #isOrdered()}</li>
* <li>{@link #isSequential()}</li>
* <li>{@link #isTraversableAgain()}</li>
* </ul>
*
* Transformation:
*
* <ul>
* <li>{@link #distinct()}</li>
* <li>{@link #distinctBy(Comparator)}</li>
* <li>{@link #distinctBy(Function)}</li>
* <li>{@link #flatMap(Function)}</li>
* <li>{@link #map(Function)}</li>
* <li>{@link #replace(Object, Object)}</li>
* <li>{@link #replaceAll(Object, Object)}</li>
* <li>{@link #scan(Object, BiFunction)}</li>
* <li>{@link #scanLeft(Object, BiFunction)}</li>
* <li>{@link #scanRight(Object, BiFunction)}</li>
* <li>{@link #span(Predicate)}</li>
* <li>{@link #unzip(Function)}</li>
* <li>{@link #unzip3(Function)}</li>
* <li>{@link #zip(Iterable)}</li>
* <li>{@link #zipAll(Iterable, Object, Object)}</li>
* <li>{@link #zipWithIndex()}</li>
* </ul>
*
* @param <T> Component type
* @author Daniel Dietrich and others
*/
public interface Traversable<T> extends Foldable<T>, Value<T> {
/**
* Narrows a widened {@code Traversable<? extends T>} to {@code Traversable<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param traversable An {@code Traversable}.
* @param <T> Component type of the {@code Traversable}.
* @return the given {@code traversable} instance as narrowed type {@code Traversable<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Traversable<T> narrow(Traversable<? extends T> traversable) {
return (Traversable<T>) traversable;
}
/**
* Matches each element with a unique key that you extract from it.
* If the same key is present twice, the function will return {@code None}.
*
* @param getKey A function which extracts a key from elements
* @param <K> key class type
* @return A Map containing the elements arranged by their keys.
* @throws NullPointerException if {@code getKey} is null.
* @see #groupBy(Function)
*/
default <K> Option<Map<K, T>> arrangeBy(Function<? super T, ? extends K> getKey) {
return Option.of(groupBy(getKey).mapValues(Traversable<T>::singleOption))
.filter(map -> !map.exists(kv -> kv._2.isEmpty()))
.map(map -> Map.narrow(map.mapValues(Option::get)));
}
/**
* Calculates the average of this elements. Returns {@code None} if this is empty, otherwise {@code Some(average)}.
* Supported component types are {@code Byte}, {@code Double}, {@code Float}, {@code Integer}, {@code Long},
* {@code Short}, {@code BigInteger} and {@code BigDecimal}.
* <p>
* Examples:
* <pre>
* <code>
* List.empty().average() // = None
* List.of(1, 2, 3).average() // = Some(2.0)
* List.of(0.1, 0.2, 0.3).average() // = Some(0.2)
* List.of("apple", "pear").average() // throws
* </code>
* </pre>
*
* @return {@code Some(average)} or {@code None}, if there are no elements
* @throws UnsupportedOperationException if this elements are not numeric
*/
@SuppressWarnings({ "unchecked", "OptionalGetWithoutIsPresent" })
default Option<Double> average() {
if (isEmpty()) {
return Option.none();
} else {
final Traversable<?> objects = isTraversableAgain() ? this : toStream();
final Object o = objects.head();
if (o instanceof Number) {
final Traversable<Number> numbers = (Traversable<Number>) objects;
final double d;
if (o instanceof Integer || o instanceof Long || o instanceof Byte || o instanceof BigInteger || o instanceof Short) {
d = numbers.toJavaStream()
.mapToLong(Number::longValue)
.average()
.getAsDouble();
} else {
d = numbers.toJavaStream()
.mapToDouble(Number::doubleValue)
.average()
.getAsDouble();
}
return Option.some(d);
} else {
throw new UnsupportedOperationException("not numeric");
}
}
}
/**
* Collects all elements that are in the domain of the given {@code partialFunction} by mapping the elements to type {@code R}.
* <p>
* More specifically, for each of this elements in iteration order first it is checked
*
* <pre>{@code
* partialFunction.isDefinedAt(element)
* }</pre>
*
* If the elements makes it through that filter, the mapped instance is added to the result collection
*
* <pre>{@code
* R newElement = partialFunction.apply(element)
* }</pre>
*
* <strong>Note:</strong>If this {@code Traversable} is ordered (i.e. extends {@link Ordered},
* the caller of {@code collect} has to ensure that the elements are comparable (i.e. extend {@link Comparable}).
*
* @param partialFunction A function that is not necessarily defined of all elements of this traversable.
* @param <R> The new element type
* @return A new {@code Traversable} instance containing elements of type {@code R}
* @throws NullPointerException if {@code partialFunction} is null
*/
<R> Traversable<R> collect(PartialFunction<? super T, ? extends R> partialFunction);
/**
* Tests if this Traversable contains all given elements.
* <p>
* The result is equivalent to
* {@code elements.isEmpty() ? true : contains(elements.head()) && containsAll(elements.tail())} but implemented
* without recursion.
*
* @param elements A List of values of type T.
* @return true, if this List contains all given elements, false otherwise.
* @throws NullPointerException if {@code elements} is null
*/
default boolean containsAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
for (T element : elements) {
if (!contains(element)) {
return false;
}
}
return true;
}
/**
* Counts the elements which satisfy the given predicate.
*
* @param predicate A predicate
* @return A number {@code >= 0}
* @throws NullPointerException if {@code predicate} is null.
*/
default int count(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return foldLeft(0, (i, t) -> predicate.test(t) ? i + 1 : i);
}
/**
* Returns a new version of this which contains no duplicates. Elements are compared using {@code equals}.
*
* @return a new {@code Traversable} containing this elements without duplicates
*/
Traversable<T> distinct();
/**
* Returns a new version of this which contains no duplicates. Elements are compared using the given
* {@code comparator}.
*
* @param comparator A comparator
* @return a new {@code Traversable} containing this elements without duplicates
* @throws NullPointerException if {@code comparator} is null.
*/
Traversable<T> distinctBy(Comparator<? super T> comparator);
/**
* Returns a new version of this which contains no duplicates. Elements mapped to keys which are compared using
* {@code equals}.
* <p>
* The elements of the result are determined in the order of their occurrence - first match wins.
*
* @param keyExtractor A key extractor
* @param <U> key type
* @return a new {@code Traversable} containing this elements without duplicates
* @throws NullPointerException if {@code keyExtractor} is null
*/
<U> Traversable<T> distinctBy(Function<? super T, ? extends U> keyExtractor);
/**
* Drops the first n elements of this or all elements, if this length < n.
*
* @param n The number of elements to drop.
* @return a new instance consisting of all elements of this except the first n ones, or else the empty instance,
* if this has less than n elements.
*/
Traversable<T> drop(int n);
/**
* Drops the last n elements of this or all elements, if this length < n.
*
* @param n The number of elements to drop.
* @return a new instance consisting of all elements of this except the last n ones, or else the empty instance,
* if this has less than n elements.
*/
Traversable<T> dropRight(int n);
/**
* Drops elements until the predicate holds for the current element.
*
* @param predicate A condition tested subsequently for this elements.
* @return a new instance consisting of all elements starting from the first one which does satisfy the given
* predicate.
* @throws NullPointerException if {@code predicate} is null
*/
Traversable<T> dropUntil(Predicate<? super T> predicate);
/**
* Drops elements while the predicate holds for the current element.
* <p>
* Note: This is essentially the same as {@code dropUntil(predicate.negate())}.
* It is intended to be used with method references, which cannot be negated directly.
*
* @param predicate A condition tested subsequently for this elements.
* @return a new instance consisting of all elements starting from the first one which does not satisfy the
* given predicate.
* @throws NullPointerException if {@code predicate} is null
*/
Traversable<T> dropWhile(Predicate<? super T> predicate);
/**
* In Vavr there are four basic classes of collections:
*
* <ul>
* <li>Seq (sequential elements)</li>
* <li>Set (distinct elements)</li>
* <li>Map (indexed elements)</li>
* <li>Multimap (indexed collections)</li>
* </ul>
*
* Two collection instances of these classes are equal if and only if both collections
*
* <ul>
* <li>belong to the same basic collection class (Seq, Set, Map or Multimap)</li>
* <li>contain the same elements</li>
* <li>have the same element order, if the collections are of type Seq</li>
* </ul>
*
* Two Map/Multimap elements, resp. entries, (key1, value1) and (key2, value2) are equal,
* if the keys are equal and the values are equal.
* <p>
* <strong>Notes:</strong>
*
* <ul>
* <li>No collection instance equals null, e.g. Queue(1) not equals null.</li>
* <li>Nulls are allowed and handled as expected, e.g. List(null, 1) equals Stream(null, 1)
* and HashMap((null, 1)) equals LinkedHashMap((null, 1)).
* </li>
* <li>The element order is taken into account for Seq only.
* E.g. List(null, 1) not equals Stream(1, null)
* and HashMap((null, 1), ("a", null)) equals LinkedHashMap(("a", null), (null, 1)).
* The reason is, that we do not know which implementations we compare when having
* two instances of type Map, Multimap or Set (see <a href="https://en.wikipedia.org/wiki/Liskov_substitution_principle">Liskov Substitution Principle</a>).</li>
* <li>Other collection classes are equal if their types are equal and their elements are equal (in iteration order).</li>
* <li>Iterator equality is defined to be object reference equality.</li>
* </ul>
*
* @param obj an object, may be null
* @return true, if this collection equals the given object according to the rules described above, false otherwise.
*/
boolean equals(Object obj);
/**
* Checks, if a unique elements exists such that the predicate holds.
*
* @param predicate A Predicate
* @return true, if predicate holds for a unique element, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean existsUnique(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
boolean exists = false;
for (T t : this) {
if (predicate.test(t)) {
if (exists) {
return false;
} else {
exists = true;
}
}
}
return exists;
}
/**
* Returns a new traversable consisting of all elements which satisfy the given predicate.
*
* @param predicate A predicate
* @return a new traversable
* @throws NullPointerException if {@code predicate} is null
*/
Traversable<T> filter(Predicate<? super T> predicate);
/**
* Returns the first element of this which satisfies the given predicate.
*
* @param predicate A predicate.
* @return Some(element) or None, where element may be null (i.e. {@code List.of(null).find(e -> e == null)}).
* @throws NullPointerException if {@code predicate} is null
*/
default Option<T> find(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (T a : this) {
if (predicate.test(a)) {
return Option.some(a); // may be Some(null)
}
}
return Option.none();
}
/**
* Returns the last element of this which satisfies the given predicate.
* <p>
* Same as {@code reverse().find(predicate)}.
*
* @param predicate A predicate.
* @return Some(element) or None, where element may be null (i.e. {@code List.of(null).find(e -> e == null)}).
* @throws NullPointerException if {@code predicate} is null
*/
default Option<T> findLast(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return iterator().findLast(predicate);
}
/**
* FlatMaps this Traversable.
*
* @param mapper A mapper
* @param <U> The resulting component type.
* @return A new Traversable instance.
*/
<U> Traversable<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper);
@Override
default <U> U foldLeft(U zero, BiFunction<? super U, ? super T, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
U xs = zero;
for (T x : this) {
xs = f.apply(xs, x);
}
return xs;
}
@Override
<U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f);
/**
* Gets the first value in iteration order if this {@code Traversable} is not empty, otherwise throws.
*
* @return the first value
* @throws NoSuchElementException if this {@code Traversable} is empty.
*/
@Override
default T get() {
return head();
}
/**
* Groups this elements by classifying the elements.
*
* @param classifier A function which classifies elements into classes
* @param <C> classified class type
* @return A Map containing the grouped elements
* @throws NullPointerException if {@code classifier} is null.
* @see #arrangeBy(Function)
*/
<C> Map<C, ? extends Traversable<T>> groupBy(Function<? super T, ? extends C> classifier);
/**
* Groups this {@code Traversable} into fixed size blocks.
* <p>
* Let length be the length of this Iterable. Then grouped is defined as follows:
* <ul>
* <li>If {@code this.isEmpty()}, the resulting {@code Iterator} is empty.</li>
* <li>If {@code size <= length}, the resulting {@code Iterator} will contain {@code length / size} blocks of size
* {@code size} and maybe a non-empty block of size {@code length % size}, if there are remaining elements.</li>
* <li>If {@code size > length}, the resulting {@code Iterator} will contain one block of size {@code length}.</li>
* </ul>
* Examples:
* <pre>
* <code>
* [].grouped(1) = []
* [].grouped(0) throws
* [].grouped(-1) throws
* [1,2,3,4].grouped(2) = [[1,2],[3,4]]
* [1,2,3,4,5].grouped(2) = [[1,2],[3,4],[5]]
* [1,2,3,4].grouped(5) = [[1,2,3,4]]
* </code>
* </pre>
*
* Please note that {@code grouped(int)} is a special case of {@linkplain #sliding(int, int)}, i.e.
* {@code grouped(size)} is the same as {@code sliding(size, size)}.
*
* @param size a positive block size
* @return A new Iterator of grouped blocks of the given size
* @throws IllegalArgumentException if {@code size} is negative or zero
*/
Iterator<? extends Traversable<T>> grouped(int size);
/**
* Checks if this Traversable is known to have a finite size.
* <p>
* This method should be implemented by classes only, i.e. not by interfaces.
*
* @return true, if this Traversable is known to have a finite size, false otherwise.
*/
boolean hasDefiniteSize();
/**
* Returns the first element of a non-empty Traversable.
*
* @return The first element of this Traversable.
* @throws NoSuchElementException if this is empty
*/
T head();
/**
* Returns the first element of a non-empty Traversable as {@code Option}.
*
* @return {@code Some(element)} or {@code None} if this is empty.
*/
default Option<T> headOption() {
return isEmpty() ? Option.none() : Option.some(head());
}
/**
* Returns the hash code of this collection.
* <br>
* We distinguish between two types of hashes, those for collections with predictable iteration order (like Seq) and those with arbitrary iteration order (like Set, Map and Multimap).
* <br>
* In all cases the hash of an empty collection is defined to be 1.
* <br>
* Collections with predictable iteration order are hashed as follows:
*
* <pre>{@code
* int hash = 1;
* for (T t : this) { hash = hash * 31 + Objects.hashCode(t); }
* }</pre>
*
* Collections with arbitrary iteration order are hashed in a way such that the hash of a fixed number of elements is independent of their iteration order.
*
* <pre>{@code
* int hash = 1;
* for (T t : this) { hash += Objects.hashCode(t); }
* }</pre>
*
* Please note that the particular hashing algorithms may change in a future version of Vavr.
* <br>
* Generally, hash codes of collections aren't cached in Vavr (opposed to the size/length).
* Storing hash codes in order to reduce the time complexity would increase the memory footprint.
* Persistent collections are built upon tree structures, it allows us to implement efficient memory sharing.
* A drawback of tree structures is that they make it necessary to store collection attributes at each tree node (read: element).
* <br>
* The computation of the hash code is linear in time, i.e. O(n). If the hash code of a collection is re-calculated often,
* e.g. when using a List as HashMap key, we might want to cache the hash code.
* This can be achieved by simply using a wrapper class, which is not included in Vavr but could be implemented like this:
*
* <pre>{@code public final class Hashed<K> {
*
* private final K key;
* private final Lazy<Integer> hashCode;
*
* public Hashed(K key) {
* this.key = key;
* this.hashCode = Lazy.of(() -> Objects.hashCode(key));
* }
*
* public K key() {
* return key;
* }
*
* @Override
* public boolean equals(Object o) {
* if (o == key) {
* return true;
* } else if (key != null && o instanceof Hashed) {
* final Hashed that = (Hashed) o;
* return key.equals(that.key);
* } else {
* return false;
* }
* }
*
* @Override
* public int hashCode() {
* return hashCode.get();
* }
*
* @Override
* public String toString() {
* return "Hashed(" + (key == null ? "null" : key.toString()) + ")";
* }
* }}</pre>
*
* @return The hash code of this collection
*/
int hashCode();
/**
* Dual of {@linkplain #tail()}, returning all elements except the last.
*
* @return a new instance containing all elements except the last.
* @throws UnsupportedOperationException if this is empty
*/
Traversable<T> init();
/**
* Dual of {@linkplain #tailOption()}, returning all elements except the last as {@code Option}.
*
* @return {@code Some(traversable)} or {@code None} if this is empty.
*/
default Option<? extends Traversable<T>> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
/**
* Checks if this Traversable may consist of distinct elements only.
*
* @return true if this Traversable may consist of distinct elements only, false otherwise.
*/
default boolean isDistinct() {
return false;
}
/**
* Checks if this Traversable is empty.
*
* @return true, if this Traversable contains no elements, false otherwise.
*/
@Override
default boolean isEmpty() {
return length() == 0;
}
/**
* Checks if this Traversable is ordered
*
* @return true, if this Traversable is ordered, false otherwise.
*/
default boolean isOrdered() {
return false;
}
/**
* Checks if the elements of this Traversable appear in encounter order.
*
* @return true, if the insertion order of elements is preserved, false otherwise.
*/
default boolean isSequential() {
return false;
}
/**
* Each of Vavr's collections may contain more than one element.
*
* @return {@code false}
*/
@Override
default boolean isSingleValued() {
return false;
}
/**
* Checks if this Traversable can be repeatedly traversed.
* <p>
* This method should be implemented by classes only, i.e. not by interfaces.
*
* @return true, if this Traversable is known to be traversable repeatedly, false otherwise.
*/
boolean isTraversableAgain();
/**
* An iterator by means of head() and tail(). Subclasses may want to override this method.
*
* @return A new Iterator of this Traversable elements.
*/
@Override
default Iterator<T> iterator() {
final Traversable<T> that = this;
return new AbstractIterator<T>() {
Traversable<T> traversable = that;
@Override
public boolean hasNext() {
return !traversable.isEmpty();
}
@Override
public T getNext() {
final T result = traversable.head();
traversable = traversable.tail();
return result;
}
};
}
/**
* Dual of {@linkplain #head()}, returning the last element.
*
* @return the last element.
* @throws NoSuchElementException is this is empty
*/
default T last() {
if (isEmpty()) {
throw new NoSuchElementException("last of empty Traversable");
} else {
final Iterator<T> it = iterator();
T result = null;
while (it.hasNext()) {
result = it.next();
}
return result;
}
}
/**
* Dual of {@linkplain #headOption()}, returning the last element as {@code Option}.
*
* @return {@code Some(element)} or {@code None} if this is empty.
*/
default Option<T> lastOption() {
return isEmpty() ? Option.none() : Option.some(last());
}
/**
* Computes the number of elements of this Traversable.
* <p>
* Same as {@link #size()}.
*
* @return the number of elements
*/
int length();
/**
* Maps the elements of this {@code Traversable} to elements of a new type preserving their order, if any.
*
* @param mapper A mapper.
* @param <U> Component type of the target Traversable
* @return a mapped Traversable
* @throws NullPointerException if {@code mapper} is null
*/
@Override
<U> Traversable<U> map(Function<? super T, ? extends U> mapper);
/**
* Calculates the maximum of this elements according to their natural order.
*
* @return {@code Some(maximum)} of this elements or {@code None} if this is empty
* @throws NullPointerException if an element is null
* @throws ClassCastException if the elements do not have a natural order, i.e. they do not implement Comparable
*/
@SuppressWarnings("unchecked")
default Option<T> max() {
if (isEmpty()) {
return Option.none();
} else {
final Traversable<T> ts = isTraversableAgain() ? this : toStream();
return ts.maxBy(Comparators.naturalComparator());
}
}
/**
* Calculates the maximum of this elements using a specific comparator.
*
* @param comparator A non-null element comparator
* @return {@code Some(maximum)} of this elements or {@code None} if this is empty
* @throws NullPointerException if {@code comparator} is null
*/
default Option<T> maxBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
if (isEmpty()) {
return Option.none();
} else {
final T value = reduce((t1, t2) -> comparator.compare(t1, t2) >= 0 ? t1 : t2);
return Option.some(value);
}
}
/**
* Calculates the maximum of this elements within the co-domain of a specific function.
*
* @param f A function that maps this elements to comparable elements
* @param <U> The type where elements are compared
* @return The element of type T which is the maximum within U
* @throws NullPointerException if {@code f} is null.
*/
default <U extends Comparable<? super U>> Option<T> maxBy(Function<? super T, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
if (isEmpty()) {
return Option.none();
} else {
final Iterator<T> iter = iterator();
T tm = iter.next();
U um = f.apply(tm);
while (iter.hasNext()) {
final T t = iter.next();
final U u = f.apply(t);
if (u.compareTo(um) > 0) {
um = u;
tm = t;
}
}
return Option.some(tm);
}
}
/**
* Calculates the minimum of this elements according to their natural order.
*
* @return {@code Some(minimum)} of this elements or {@code None} if this is empty
* @throws NullPointerException if an element is null
* @throws ClassCastException if the elements do not have a natural order, i.e. they do not implement Comparable
*/
@SuppressWarnings("unchecked")
default Option<T> min() {
if (isEmpty()) {
return Option.none();
} else {
final Traversable<T> ts = isTraversableAgain() ? this : toStream();
return ts.minBy(Comparators.naturalComparator());
}
}
/**
* Calculates the minimum of this elements using a specific comparator.
*
* @param comparator A non-null element comparator
* @return {@code Some(minimum)} of this elements or {@code None} if this is empty
* @throws NullPointerException if {@code comparator} is null
*/
default Option<T> minBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
if (isEmpty()) {
return Option.none();
} else {
final T value = reduce((t1, t2) -> comparator.compare(t1, t2) <= 0 ? t1 : t2);
return Option.some(value);
}
}
/**
* Calculates the minimum of this elements within the co-domain of a specific function.
*
* @param f A function that maps this elements to comparable elements
* @param <U> The type where elements are compared
* @return The element of type T which is the minimum within U
* @throws NullPointerException if {@code f} is null.
*/
default <U extends Comparable<? super U>> Option<T> minBy(Function<? super T, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
if (isEmpty()) {
return Option.none();
} else {
final Iterator<T> iter = iterator();
T tm = iter.next();
U um = f.apply(tm);
while (iter.hasNext()) {
final T t = iter.next();
final U u = f.apply(t);
if (u.compareTo(um) < 0) {
um = u;
tm = t;
}
}
return Option.some(tm);
}
}
/**
* Joins the elements of this by concatenating their string representations.
* <p>
* This has the same effect as calling {@code mkCharSeq("", "", "")}.
*
* @return a new {@link CharSeq}
*/
default CharSeq mkCharSeq() {
return mkCharSeq("", "", "");
}
/**
* Joins the string representations of this elements using a specific delimiter.
* <p>
* This has the same effect as calling {@code mkCharSeq("", delimiter, "")}.
*
* @param delimiter A delimiter string put between string representations of elements of this
* @return A new {@link CharSeq}
*/
default CharSeq mkCharSeq(CharSequence delimiter) {
return mkCharSeq("", delimiter, "");
}
/**
* Joins the string representations of this elements using a specific delimiter, prefix and suffix.
* <p>
* Example: {@code List.of("a", "b", "c").mkCharSeq("Chars(", ", ", ")") = CharSeq.of("Chars(a, b, c))"}
*
* @param prefix prefix of the resulting {@link CharSeq}
* @param delimiter A delimiter string put between string representations of elements of this
* @param suffix suffix of the resulting {@link CharSeq}
* @return a new {@link CharSeq}
*/
default CharSeq mkCharSeq(CharSequence prefix, CharSequence delimiter, CharSequence suffix) {
return CharSeq.of(mkString(prefix, delimiter, suffix));
}
/**
* Joins the elements of this by concatenating their string representations.
* <p>
* This has the same effect as calling {@code mkString("", "", "")}.
*
* @return a new String
*/
default String mkString() {
return mkString("", "", "");
}
/**
* Joins the string representations of this elements using a specific delimiter.
* <p>
* This has the same effect as calling {@code mkString("", delimiter, "")}.
*
* @param delimiter A delimiter string put between string representations of elements of this
* @return A new String
*/
default String mkString(CharSequence delimiter) {
return mkString("", delimiter, "");
}
/**
* Joins the string representations of this elements using a specific delimiter, prefix and suffix.
* <p>
* Example: {@code List.of("a", "b", "c").mkString("Chars(", ", ", ")") = "Chars(a, b, c)"}
*
* @param prefix prefix of the resulting string
* @param delimiter A delimiter string put between string representations of elements of this
* @param suffix suffix of the resulting string
* @return a new String
*/
default String mkString(CharSequence prefix, CharSequence delimiter, CharSequence suffix) {
final StringBuilder builder = new StringBuilder(prefix);
iterator().map(String::valueOf).intersperse(String.valueOf(delimiter)).forEach(builder::append);
return builder.append(suffix).toString();
}
/**
* Checks, this {@code Traversable} is not empty.
* <p>
* The call is equivalent to {@code !isEmpty()}.
*
* @return true, if an underlying value is present, false otherwise.
*/
default boolean nonEmpty() {
return !isEmpty();
}
/**
* Returns this {@code Traversable} if it is nonempty, otherwise return the alternative.
*
* @param other An alternative {@code Traversable}
* @return this {@code Traversable} if it is nonempty, otherwise return the alternative.
*/
Traversable<T> orElse(Iterable<? extends T> other);
/**
* Returns this {@code Traversable} if it is nonempty, otherwise return the result of evaluating supplier.
*
* @param supplier An alternative {@code Traversable} supplier
* @return this {@code Traversable} if it is nonempty, otherwise return the result of evaluating supplier.
*/
Traversable<T> orElse(Supplier<? extends Iterable<? extends T>> supplier);
/**
* Creates a partition of this {@code Traversable} by splitting this elements in two in distinct tarversables
* according to a predicate.
*
* @param predicate A predicate which classifies an element if it is in the first or the second traversable.
* @return A disjoint union of two traversables. The first {@code Traversable} contains all elements that satisfy the given {@code predicate}, the second {@code Traversable} contains all elements that don't. The original order of elements is preserved.
* @throws NullPointerException if predicate is null
*/
Tuple2<? extends Traversable<T>, ? extends Traversable<T>> partition(Predicate<? super T> predicate);
@Override
Traversable<T> peek(Consumer<? super T> action);
/**
* Calculates the product of this elements. Supported component types are {@code Byte}, {@code Double}, {@code Float},
* {@code Integer}, {@code Long}, {@code Short}, {@code BigInteger} and {@code BigDecimal}.
* <p>
* Examples:
* <pre>
* <code>
* List.empty().product() // = 1
* List.of(1, 2, 3).product() // = 6L
* List.of(0.1, 0.2, 0.3).product() // = 0.006
* List.of("apple", "pear").product() // throws
* </code>
* </pre>
*
* @return a {@code Number} representing the sum of this elements
* @throws UnsupportedOperationException if this elements are not numeric
*/
@SuppressWarnings("unchecked")
default Number product() {
if (isEmpty()) {
return 1;
} else {
final Iterator<?> iter = iterator();
final Object o = iter.next();
if (o instanceof Number) {
final Number head = (Number) o;
final Iterator<Number> numbers = (Iterator<Number>) iter;
if (head instanceof Integer || head instanceof Long || head instanceof Byte || head instanceof BigInteger || head instanceof Short) {
return numbers.toJavaStream().mapToLong(Number::longValue).reduce(head.longValue(), (l1, l2) -> l1 * l2);
} else {
return numbers.toJavaStream().mapToDouble(Number::doubleValue).reduce(head.doubleValue(), (d1, d2) -> d1 * d2);
}
} else {
throw new UnsupportedOperationException("not numeric");
}
}
}
/**
* Accumulates the elements of this Traversable by successively calling the given operation {@code op} from the left.
*
* @param op A BiFunction of type T
* @return the reduced value.
* @throws NoSuchElementException if this is empty
* @throws NullPointerException if {@code op} is null
*/
@Override
default T reduceLeft(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
if (isEmpty()) {
throw new NoSuchElementException("reduceLeft on Nil");
} else {
return tail().foldLeft(head(), op);
}
}
/**
* Shortcut for {@code isEmpty() ? Option.none() : Option.some(reduceLeft(op))}.
*
* @param op A BiFunction of type T
* @return a reduced value
* @throws NullPointerException if {@code op} is null
*/
@Override
default Option<T> reduceLeftOption(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
return isEmpty() ? Option.none() : Option.some(reduceLeft(op));
}
/**
* Accumulates the elements of this Traversable by successively calling the given operation {@code op} from the right.
*
* @param op An operation of type T
* @return the reduced value.
* @throws NoSuchElementException if this is empty
* @throws NullPointerException if {@code op} is null
*/
@Override
default T reduceRight(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
if (isEmpty()) {
throw new NoSuchElementException("reduceRight on empty");
} else {
return iterator().reduceRight(op);
}
}
/**
* Shortcut for {@code isEmpty() ? Option.none() : Option.some(reduceRight(op))}.
*
* @param op An operation of type T
* @return a reduced value
* @throws NullPointerException if {@code op} is null
*/
@Override
default Option<T> reduceRightOption(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
return isEmpty() ? Option.none() : Option.some(reduceRight(op));
}
/**
* Replaces the first occurrence (if exists) of the given currentElement with newElement.
*
* @param currentElement An element to be substituted.
* @param newElement A replacement for currentElement.
* @return a Traversable containing all elements of this where the first occurrence of currentElement is replaced with newElement.
*/
Traversable<T> replace(T currentElement, T newElement);
/**
* Replaces all occurrences of the given currentElement with newElement.
*
* @param currentElement An element to be substituted.
* @param newElement A replacement for currentElement.
* @return a Traversable containing all elements of this where all occurrences of currentElement are replaced with newElement.
*/
Traversable<T> replaceAll(T currentElement, T newElement);
/**
* Keeps all occurrences of the given elements from this.
*
* @param elements Elements to be kept.
* @return a Traversable containing all occurrences of the given elements.
* @throws NullPointerException if {@code elements} is null
*/
Traversable<T> retainAll(Iterable<? extends T> elements);
/**
* Computes a prefix scan of the elements of the collection.
*
* Note: The neutral element z may be applied more than once.
*
* @param zero neutral element for the operator op
* @param operation the associative operator for the scan
* @return a new traversable collection containing the prefix scan of the elements in this traversable collection
* @throws NullPointerException if {@code operation} is null.
*/
Traversable<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation);
/**
* Produces a collection containing cumulative results of applying the
* operator going left to right.
*
* Note: will not terminate for infinite-sized collections.
*
* Note: might return different results for different runs, unless the
* underlying collection type is ordered.
*
* @param <U> the type of the elements in the resulting collection
* @param zero the initial value
* @param operation the binary operator applied to the intermediate result and the element
* @return collection with intermediate results
* @throws NullPointerException if {@code operation} is null.
*/
<U> Traversable<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation);
/**
* Produces a collection containing cumulative results of applying the
* operator going right to left. The head of the collection is the last
* cumulative result.
*
* Note: will not terminate for infinite-sized collections.
*
* Note: might return different results for different runs, unless the
* underlying collection type is ordered.
*
* @param <U> the type of the elements in the resulting collection
* @param zero the initial value
* @param operation the binary operator applied to the intermediate result and the element
* @return collection with intermediate results
* @throws NullPointerException if {@code operation} is null.
*/
<U> Traversable<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation);
/**
* Returns the single element of this Traversable or throws, if this is empty or contains more than one element.
*
* @return the single element from the Traversable
* @throws NoSuchElementException if the Traversable does not contain a single element.
*/
default T single() {
return singleOption().getOrElseThrow(() -> new NoSuchElementException("Does not contain a single value"));
}
/**
* Returns the only element of a Traversable as {@code Option}.
*
* @return {@code Some(element)} or {@code None} if the Traversable does not contain a single element.
*/
default Option<T> singleOption() {
final Iterator<T> it = iterator();
if (!it.hasNext()) {
return Option.none();
}
final T first = it.next();
if (it.hasNext()) {
return Option.none();
} else {
return Option.some(first);
}
}
/**
* Computes the number of elements of this Traversable.
* <p>
* Same as {@link #length()}.
*
* @return the number of elements
*/
default int size() {
return length();
}
/**
* Slides a non-overlapping window of a variable size over this {@code Traversable}.
* <p>
* Each window contains elements with the same class, as determined by {@code classifier}. Two consecutive
* values in this {@code Traversable} will be in the same window only if {@code classifier} returns equal
* values for them. Otherwise, the values will constitute the last element of the previous window and the
* first element of the next window.
* <p>
* Examples:
* <pre>{@code
* [].slideBy(Function.identity()) = []
* [1,2,3,4,4,5].slideBy(Function.identity()) = [[1],[2],[3],[4,4],[5]]
* [1,2,3,10,12,5,7,20,29].slideBy(x -> x/10) = [[1,2,3],[10,12],[5,7],[20,29]]
* }</pre>
*
* @param classifier A function which classifies elements into classes
* @return A new Iterator of windows of the grouped elements
* @throws NullPointerException if {@code classifier} is null.
*/
Iterator<? extends Traversable<T>> slideBy(Function<? super T, ?> classifier);
/**
* Slides a window of a specific {@code size} and step size 1 over this {@code Traversable} by calling
* {@link #sliding(int, int)}.
*
* @param size a positive window size
* @return a new Iterator of windows of a specific size using step size 1
* @throws IllegalArgumentException if {@code size} is negative or zero
*/
Iterator<? extends Traversable<T>> sliding(int size);
/**
* Slides a window of a specific {@code size} and {@code step} size over this {@code Traversable}.
* <p>
* Examples:
* <pre>
* <code>
* [].sliding(1,1) = []
* [1,2,3,4,5].sliding(2,3) = [[1,2],[4,5]]
* [1,2,3,4,5].sliding(2,4) = [[1,2],[5]]
* [1,2,3,4,5].sliding(2,5) = [[1,2]]
* [1,2,3,4].sliding(5,3) = [[1,2,3,4],[4]]
* </code>
* </pre>
*
* @param size a positive window size
* @param step a positive step size
* @return a new Iterator of windows of a specific size using a specific step size
* @throws IllegalArgumentException if {@code size} or {@code step} are negative or zero
*/
Iterator<? extends Traversable<T>> sliding(int size, int step);
/**
* Returns a tuple where the first element is the longest prefix of elements that satisfy the given
* {@code predicate} and the second element is the remainder.
*
* @param predicate A predicate.
* @return a {@code Tuple} containing the longest prefix of elements that satisfy p and the remainder.
* @throws NullPointerException if {@code predicate} is null
*/
Tuple2<? extends Traversable<T>, ? extends Traversable<T>> span(Predicate<? super T> predicate);
@Override
default Spliterator<T> spliterator() {
int characteristics = Spliterator.IMMUTABLE;
if (isDistinct()) {
characteristics |= Spliterator.DISTINCT;
}
if (isOrdered()) {
characteristics |= (Spliterator.SORTED | Spliterator.ORDERED);
}
if (isSequential()) {
characteristics |= Spliterator.ORDERED;
}
if (hasDefiniteSize()) {
characteristics |= (Spliterator.SIZED | Spliterator.SUBSIZED);
return Spliterators.spliterator(iterator(), length(), characteristics);
} else {
return Spliterators.spliteratorUnknownSize(iterator(), characteristics);
}
}
/**
* Calculates the sum of this elements. Supported component types are {@code Byte}, {@code Double}, {@code Float},
* {@code Integer}, {@code Long}, {@code Short}, {@code BigInteger} and {@code BigDecimal}.
* <p>
* Examples:
* <pre>
* <code>
* List.empty().sum() // = 0
* List.of(1, 2, 3).sum() // = 6L
* List.of(0.1, 0.2, 0.3).sum() // = 0.6
* List.of("apple", "pear").sum() // throws
* </code>
* </pre>
*
* @return a {@code Number} representing the sum of this elements
* @throws UnsupportedOperationException if this elements are not numeric
*/
@SuppressWarnings("unchecked")
default Number sum() {
if (isEmpty()) {
return 0;
} else {
final Iterator<?> iter = iterator();
final Object o = iter.next();
if (o instanceof Number) {
final Number head = (Number) o;
final Iterator<Number> numbers = (Iterator<Number>) iter;
if (head instanceof Integer || head instanceof Long || head instanceof Byte || head instanceof BigInteger || head instanceof Short) {
return numbers.foldLeft(head.longValue(), (n1, n2) -> n1 + n2.longValue());
} else {
return numbers.foldLeft(head.doubleValue(), (n1, n2) -> n1 + n2.doubleValue());
}
} else {
throw new UnsupportedOperationException("not numeric");
}
}
}
/**
* Drops the first element of a non-empty Traversable.
*
* @return A new instance of Traversable containing all elements except the first.
* @throws UnsupportedOperationException if this is empty
*/
Traversable<T> tail();
/**
* Drops the first element of a non-empty Traversable and returns an {@code Option}.
*
* @return {@code Some(traversable)} or {@code None} if this is empty.
*/
Option<? extends Traversable<T>> tailOption();
/**
* Takes the first n elements of this or all elements, if this length < n.
* <p>
* The result is equivalent to {@code sublist(0, max(0, min(length(), n)))} but does not throw if {@code n < 0} or
* {@code n > length()}.
* <p>
* In the case of {@code n < 0} the empty instance is returned, in the case of {@code n > length()} this is returned.
*
* @param n The number of elements to take.
* @return A new instance consisting the first n elements of this or all elements, if this has less than n elements.
*/
Traversable<T> take(int n);
/**
* Takes the last n elements of this or all elements, if this length < n.
* <p>
* The result is equivalent to {@code sublist(max(0, min(length(), length() - n)), n)}, i.e. takeRight will not
* throw if {@code n < 0} or {@code n > length()}.
* <p>
* In the case of {@code n < 0} the empty instance is returned, in the case of {@code n > length()} this is returned.
*
* @param n The number of elements to take.
* @return A new instance consisting the first n elements of this or all elements, if this has less than n elements.
*/
Traversable<T> takeRight(int n);
/**
* Takes elements until the predicate holds for the current element.
* <p>
* Note: This is essentially the same as {@code takeWhile(predicate.negate())}. It is intended to be used with
* method references, which cannot be negated directly.
*
* @param predicate A condition tested subsequently for this elements.
* @return a new instance consisting of all elements until the first which does satisfy the given predicate.
* @throws NullPointerException if {@code predicate} is null
*/
Traversable<T> takeUntil(Predicate<? super T> predicate);
/**
* Takes elements while the predicate holds for the current element.
*
* @param predicate A condition tested subsequently for the contained elements.
* @return a new instance consisting of all elements until the first which does not satisfy the given predicate.
* @throws NullPointerException if {@code predicate} is null
*/
Traversable<T> takeWhile(Predicate<? super T> predicate);
/**
* Unzips this elements by mapping this elements to pairs which are subsequently split into two distinct
* sets.
*
* @param unzipper a function which converts elements of this to pairs
* @param <T1> 1st element type of a pair returned by unzipper
* @param <T2> 2nd element type of a pair returned by unzipper
* @return A pair of set containing elements split by unzipper
* @throws NullPointerException if {@code unzipper} is null
*/
<T1, T2> Tuple2<? extends Traversable<T1>, ? extends Traversable<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper);
/**
* Unzips this elements by mapping this elements to triples which are subsequently split into three distinct
* sets.
*
* @param unzipper a function which converts elements of this to pairs
* @param <T1> 1st element type of a triplet returned by unzipper
* @param <T2> 2nd element type of a triplet returned by unzipper
* @param <T3> 3rd element type of a triplet returned by unzipper
* @return A triplet of set containing elements split by unzipper
* @throws NullPointerException if {@code unzipper} is null
*/
<T1, T2, T3> Tuple3<? extends Traversable<T1>, ? extends Traversable<T2>, ? extends Traversable<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper);
/**
* Returns a traversable formed from this traversable and another Iterable collection by combining
* corresponding elements in pairs. If one of the two iterables is longer than the other, its remaining elements
* are ignored.
* <p>
* The length of the returned traversable is the minimum of the lengths of this traversable and {@code that}
* iterable.
*
* @param <U> The type of the second half of the returned pairs.
* @param that The Iterable providing the second half of each result pair.
* @return a new traversable containing pairs consisting of corresponding elements of this traversable and {@code that} iterable.
* @throws NullPointerException if {@code that} is null
*/
<U> Traversable<Tuple2<T, U>> zip(Iterable<? extends U> that);
/**
* Returns a traversable formed from this traversable and another Iterable by combining corresponding elements in
* pairs. If one of the two collections is shorter than the other, placeholder elements are used to extend the
* shorter collection to the length of the longer.
* <p>
* The length of the returned traversable is the maximum of the lengths of this traversable and {@code that}
* iterable.
* <p>
* Special case: if this traversable is shorter than that elements, and that elements contains duplicates, the
* resulting traversable may be shorter than the maximum of the lengths of this and that because a traversable
* contains an element at most once.
* <p>
* If this Traversable is shorter than that, thisElem values are used to fill the result.
* If that is shorter than this Traversable, thatElem values are used to fill the result.
*
* @param <U> The type of the second half of the returned pairs.
* @param that The Iterable providing the second half of each result pair.
* @param thisElem The element to be used to fill up the result if this traversable is shorter than that.
* @param thatElem The element to be used to fill up the result if that is shorter than this traversable.
* @return A new traversable containing pairs consisting of corresponding elements of this traversable and that.
* @throws NullPointerException if {@code that} is null
*/
<U> Traversable<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem);
/**
* Returns a traversable formed from this traversable and another Iterable collection by mapping elements.
* If one of the two iterables is longer than the other, its remaining elements are ignored.
* <p>
* The length of the returned traversable is the minimum of the lengths of this traversable and {@code that}
* iterable.
*
* @param <U> The type of the second parameter of the mapper.
* @param <R> The type of the mapped elements.
* @param that The Iterable providing the second parameter of the mapper.
* @param mapper a mapper.
* @return a new traversable containing mapped elements of this traversable and {@code that} iterable.
* @throws NullPointerException if {@code that} or {@code mapper} is null
*/
<U, R> Traversable<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper);
/**
* Zips this traversable with its indices.
*
* @return A new traversable containing all elements of this traversable paired with their index, starting with 0.
*/
Traversable<Tuple2<T, Integer>> zipWithIndex();
/**
* Returns a traversable formed from this traversable and another Iterable collection by mapping elements.
* If one of the two iterables is longer than the other, its remaining elements are ignored.
* <p>
* The length of the returned traversable is the minimum of the lengths of this traversable and {@code that}
* iterable.
*
* @param <U> The type of the mapped elements.
* @param mapper a mapper.
* @return a new traversable containing mapped elements of this traversable and {@code that} iterable.
* @throws NullPointerException if {@code mapper} is null
*/
<U> Traversable<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper);
}
| 1 | 12,360 | `tail()` is an expensive operation for certain collections | vavr-io-vavr | java |
@@ -86,7 +86,8 @@ var rubyMappings = {
'http_infrastructure':['../../../TestServer/swagger/httpInfrastructure.json','HttpInfrastructureModule'],
'required_optional':['../../../TestServer/swagger/required-optional.json','RequiredOptionalModule'],
'report':['../../../TestServer/swagger/report.json','ReportModule'],
- 'model_flattening':['../../../TestServer/swagger/model-flattening.json', 'ModelFlatteningModule'],
+ 'model_flattening':['../../../TestServer/swagger/model-flattening.json', 'ModelFlatteningModule'],
+ 'parameter_grouping':['../../../TestServer/swagger/azure-parameter-grouping.json', 'ParameterGroupingModule'],
};
var defaultAzureMappings = { | 1 | /// <binding Clean='clean' />
var gulp = require('gulp'),
msbuild = require('gulp-msbuild'),
debug = require('gulp-debug'),
env = require('gulp-env'),
path = require('path'),
fs = require('fs'),
merge = require('merge2'),
shell = require('gulp-shell'),
glob = require('glob'),
spawn = require('child_process').spawn,
assemblyInfo = require('gulp-dotnet-assembly-info'),
nuspecSync = require('./Tools/gulp/gulp-nuspec-sync'),
runtimeVersionSync = require('./Tools/gulp/gulp-runtime-version-sync'),
nugetProjSync = require('./Tools/gulp/gulp-nuget-proj-sync'),
regenExpected = require('./Tools/gulp/gulp-regenerate-expected'),
del = require('del'),
gutil = require('gulp-util'),
runSequence = require('run-sequence'),
requireDir = require('require-dir')('./Tools/gulp'),
exec = require('child_process').exec;
const DEFAULT_ASSEMBLY_VERSION = '0.9.0.0';
const MAX_BUFFER = 1024 * 4096;
var isWindows = (process.platform.lastIndexOf('win') === 0);
process.env.MSBUILDDISABLENODEREUSE = 1;
function basePathOrThrow() {
if (!gutil.env.basePath) {
return __dirname;
}
return gutil.env.basePath;
}
function mergeOptions(obj1,obj2){
var obj3 = {};
for (var attrname in obj1) { obj3[attrname] = obj1[attrname]; }
for (var attrname in obj2) { obj3[attrname] = obj2[attrname]; }
return obj3;
}
var defaultMappings = {
'AcceptanceTests/ParameterFlattening': '../../../TestServer/swagger/parameter-flattening.json',
'AcceptanceTests/BodyArray': '../../../TestServer/swagger/body-array.json',
'AcceptanceTests/BodyBoolean': '../../../TestServer/swagger/body-boolean.json',
'AcceptanceTests/BodyByte': '../../../TestServer/swagger/body-byte.json',
'AcceptanceTests/BodyComplex': '../../../TestServer/swagger/body-complex.json',
'AcceptanceTests/BodyDate': '../../../TestServer/swagger/body-date.json',
'AcceptanceTests/BodyDateTime': '../../../TestServer/swagger/body-datetime.json',
'AcceptanceTests/BodyDateTimeRfc1123': '../../../TestServer/swagger/body-datetime-rfc1123.json',
'AcceptanceTests/BodyDuration': '../../../TestServer/swagger/body-duration.json',
'AcceptanceTests/BodyDictionary': '../../../TestServer/swagger/body-dictionary.json',
'AcceptanceTests/BodyFile': '../../../TestServer/swagger/body-file.json',
'AcceptanceTests/BodyFormData': '../../../TestServer/swagger/body-formdata.json',
'AcceptanceTests/BodyInteger': '../../../TestServer/swagger/body-integer.json',
'AcceptanceTests/BodyNumber': '../../../TestServer/swagger/body-number.json',
'AcceptanceTests/BodyString': '../../../TestServer/swagger/body-string.json',
'AcceptanceTests/Header': '../../../TestServer/swagger/header.json',
'AcceptanceTests/Http': '../../../TestServer/swagger/httpInfrastructure.json',
'AcceptanceTests/Report': '../../../TestServer/swagger/report.json',
'AcceptanceTests/RequiredOptional': '../../../TestServer/swagger/required-optional.json',
'AcceptanceTests/Url': '../../../TestServer/swagger/url.json',
'AcceptanceTests/Validation': '../../../TestServer/swagger/validation.json',
'AcceptanceTests/CustomBaseUri': '../../../TestServer/swagger/custom-baseUrl.json',
'AcceptanceTests/CustomBaseUriMoreOptions': '../../../TestServer/swagger/custom-baseUrl-more-options.json',
'AcceptanceTests/ModelFlattening': '../../../TestServer/swagger/model-flattening.json'
};
var rubyMappings = {
'boolean':['../../../TestServer/swagger/body-boolean.json', 'BooleanModule'],
'integer':['../../../TestServer/swagger/body-integer.json','IntegerModule'],
'number':['../../../TestServer/swagger/body-number.json','NumberModule'],
'string':['../../../TestServer/swagger/body-string.json','StringModule'],
'byte':['../../../TestServer/swagger/body-byte.json','ByteModule'],
'array':['../../../TestServer/swagger/body-array.json','ArrayModule'],
'dictionary':['../../../TestServer/swagger/body-dictionary.json','DictionaryModule'],
'date':['../../../TestServer/swagger/body-date.json','DateModule'],
'datetime':['../../../TestServer/swagger/body-datetime.json','DatetimeModule'],
'datetime_rfc1123':['../../../TestServer/swagger/body-datetime-rfc1123.json','DatetimeRfc1123Module'],
'duration':['../../../TestServer/swagger/body-duration.json','DurationModule'],
'complex':['../../../TestServer/swagger/body-complex.json','ComplexModule'],
'url':['../../../TestServer/swagger/url.json','UrlModule'],
'url_items':['../../../TestServer/swagger/url.json','UrlModule'],
'url_query':['../../../TestServer/swagger/url.json','UrlModule'],
'header_folder':['../../../TestServer/swagger/header.json','HeaderModule'],
'http_infrastructure':['../../../TestServer/swagger/httpInfrastructure.json','HttpInfrastructureModule'],
'required_optional':['../../../TestServer/swagger/required-optional.json','RequiredOptionalModule'],
'report':['../../../TestServer/swagger/report.json','ReportModule'],
'model_flattening':['../../../TestServer/swagger/model-flattening.json', 'ModelFlatteningModule'],
};
var defaultAzureMappings = {
'AcceptanceTests/Lro': '../../../TestServer/swagger/lro.json',
'AcceptanceTests/Paging': '../../../TestServer/swagger/paging.json',
'AcceptanceTests/AzureReport': '../../../TestServer/swagger/azure-report.json',
'AcceptanceTests/AzureParameterGrouping': '../../../TestServer/swagger/azure-parameter-grouping.json',
'AcceptanceTests/AzureResource': '../../../TestServer/swagger/azure-resource.json',
'AcceptanceTests/Head': '../../../TestServer/swagger/head.json',
'AcceptanceTests/HeadExceptions': '../../../TestServer/swagger/head-exceptions.json',
'AcceptanceTests/SubscriptionIdApiVersion': '../../../TestServer/swagger/subscriptionId-apiVersion.json',
'AcceptanceTests/AzureSpecials': '../../../TestServer/swagger/azure-special-properties.json',
'AcceptanceTests/CustomBaseUri': '../../../TestServer/swagger/custom-baseUrl.json'
};
var compositeMappings = {
'AcceptanceTests/CompositeBoolIntClient': '../../../TestServer/swagger/composite-swagger.json'
};
var azureCompositeMappings = {
'AcceptanceTests/AzureCompositeModelClient': '../../../TestServer/swagger/azure-composite-swagger.json'
};
var nodeAzureMappings = {
'AcceptanceTests/StorageManagementClient': '../../../TestServer/swagger/storage.json'
};
var nodeMappings = {
'AcceptanceTests/ComplexModelClient': '../../../TestServer/swagger/complex-model.json'
};
var rubyAzureMappings = {
'head':['../../../TestServer/swagger/head.json', 'HeadModule'],
'head_exceptions':['../../../TestServer/swagger/head-exceptions.json', 'HeadExceptionsModule'],
'paging':['../../../TestServer/swagger/paging.json', 'PagingModule'],
'azure_resource':['../../../TestServer/swagger/azure-resource.json', 'AzureResourceModule'],
'lro':['../../../TestServer/swagger/lro.json', 'LroModule'],
'azure_url':['../../../TestServer/swagger/subscriptionId-apiVersion.json', 'AzureUrlModule'],
'azure_special_properties': ['../../../TestServer/swagger/azure-special-properties.json', 'AzureSpecialPropertiesModule'],
'azure_report':['../../../TestServer/swagger/azure-report.json', 'AzureReportModule'],
'custom_base_uri':['../../../TestServer/swagger/custom-baseUrl.json', 'CustomBaseUriModule'],
'custom_base_uri_more':['../../../TestServer/swagger/custom-baseUrl-more-options.json', 'CustomBaseUriMoreModule'],
};
gulp.task('regenerate:expected', function(cb){
runSequence('regenerate:delete',
[
'regenerate:expected:csazure',
'regenerate:expected:cs',
'regenerate:expected:node',
'regenerate:expected:nodeazure',
'regenerate:expected:ruby',
'regenerate:expected:rubyazure',
'regenerate:expected:java',
'regenerate:expected:javaazure',
'regenerate:expected:python',
'regenerate:expected:pythonazure',
'regenerate:expected:samples'
],
cb);
});
gulp.task('regenerate:delete', function(cb){
del([
'AutoRest/Generators/CSharp/Azure.CSharp.Tests/Expected',
'AutoRest/Generators/CSharp/CSharp.Tests/Expected',
'AutoRest/Generators/NodeJS/NodeJS.Tests/Expected',
'AutoRest/Generators/NodeJS/Azure.NodeJS.Tests/Expected',
'AutoRest/Generators/Java/Java.Tests/src/main/java',
'AutoRest/Generators/Java/Azure.Java.Tests/src/main/java',
'AutoRest/Generators/Python/Python.Tests/Expected',
'AutoRest/Generators/Python/Azure.Python.Tests/Expected'
], cb);
});
gulp.task('regenerate:expected:nodecomposite', function (cb) {
regenExpected({
'outputBaseDir': 'AutoRest/Generators/NodeJS/NodeJS.Tests',
'inputBaseDir': 'AutoRest/Generators/NodeJS/NodeJS.Tests',
'mappings': compositeMappings,
'modeler': 'CompositeSwagger',
'outputDir': 'Expected',
'codeGenerator': 'NodeJS',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:nodeazurecomposite', function (cb) {
regenExpected({
'outputBaseDir': 'AutoRest/Generators/NodeJS/Azure.NodeJS.Tests',
'inputBaseDir': 'AutoRest/Generators/NodeJS/Azure.NodeJS.Tests',
'mappings': azureCompositeMappings,
'modeler': 'CompositeSwagger',
'outputDir': 'Expected',
'codeGenerator': 'Azure.NodeJS',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:nodeazure', ['regenerate:expected:nodeazurecomposite'], function (cb) {
for (var p in defaultAzureMappings) {
nodeAzureMappings[p] = defaultAzureMappings[p];
}
regenExpected({
'outputBaseDir': 'AutoRest/Generators/NodeJS/Azure.NodeJS.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': nodeAzureMappings,
'outputDir': 'Expected',
'codeGenerator': 'Azure.NodeJS',
'flatteningThreshold': '1'
}, cb);
})
gulp.task('regenerate:expected:node', ['regenerate:expected:nodecomposite'], function (cb) {
for (var p in defaultMappings) {
nodeMappings[p] = defaultMappings[p];
}
regenExpected({
'outputBaseDir': 'AutoRest/Generators/NodeJS/NodeJS.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': nodeMappings,
'outputDir': 'Expected',
'codeGenerator': 'NodeJS',
'flatteningThreshold': '1'
}, cb);
})
gulp.task('regenerate:expected:python', function(cb){
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Python/Python.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': defaultMappings,
'outputDir': 'Expected',
'codeGenerator': 'Python',
'flatteningThreshold': '1'
}, cb);
})
gulp.task('regenerate:expected:pythonazure', function(cb){
mappings = mergeOptions({
'AcceptanceTests/AzureBodyDuration': '../../../TestServer/swagger/body-duration.json',
'AcceptanceTests/StorageManagementClient': '../../../TestServer/swagger/storage.json'
}, defaultAzureMappings);
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Python/Azure.Python.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'Azure.Python',
'flatteningThreshold': '1'
}, cb);
})
gulp.task('regenerate:expected:rubyazure', function(cb){
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Ruby/Azure.Ruby.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': rubyAzureMappings,
'outputDir': 'RspecTests/Generated',
'codeGenerator': 'Azure.Ruby',
'nsPrefix': 'MyNamespace'
}, cb);
})
gulp.task('regenerate:expected:ruby', function(cb){
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Ruby/Ruby.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': rubyMappings,
'outputDir': 'RspecTests/Generated',
'codeGenerator': 'Ruby',
'nsPrefix': 'MyNamespace'
}, cb);
})
gulp.task('regenerate:expected:javaazure', function(cb){
mappings = {};
for (var key in defaultAzureMappings) {
mappings[key.substring(16).toLowerCase()] = defaultAzureMappings[key];
}
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Java/Azure.Java.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': mappings,
'outputDir': 'src/main/java/fixtures',
'codeGenerator': 'Azure.Java',
'nsPrefix': 'Fixtures'
}, cb);
})
gulp.task('regenerate:expected:java', function(cb){
mappings = {};
for (var key in defaultMappings) {
mappings[key.substring(16).toLowerCase()] = defaultMappings[key];
}
regenExpected({
'outputBaseDir': 'AutoRest/Generators/Java/Java.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': mappings,
'outputDir': 'src/main/java/fixtures',
'codeGenerator': 'Java',
'nsPrefix': 'Fixtures'
}, cb);
})
gulp.task('regenerate:expected:csazure', ['regenerate:expected:csazurecomposite','regenerate:expected:csazureallsync', 'regenerate:expected:csazurenosync'], function (cb) {
mappings = mergeOptions({
'AcceptanceTests/AzureBodyDuration': '../../../TestServer/swagger/body-duration.json'
}, defaultAzureMappings);
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'Azure.CSharp',
'nsPrefix': 'Fixtures.Azure',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:cs', ['regenerate:expected:cswithcreds', 'regenerate:expected:cscomposite', 'regenerate:expected:csallsync', 'regenerate:expected:csnosync'], function (cb) {
mappings = mergeOptions({
'Mirror.RecursiveTypes': 'Swagger/swagger-mirror-recursive-type.json',
'Mirror.Primitives': 'Swagger/swagger-mirror-primitives.json',
'Mirror.Sequences': 'Swagger/swagger-mirror-sequences.json',
'Mirror.Polymorphic': 'Swagger/swagger-mirror-polymorphic.json',
'Internal.Ctors': 'Swagger/swagger-internal-ctors.json',
'Additional.Properties': 'Swagger/swagger-additional-properties.yaml',
'DateTimeOffset': 'Swagger/swagger-datetimeoffset.json'
}, defaultMappings);
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:cswithcreds', function(cb){
mappings = mergeOptions(
{
'PetstoreV2': 'Swagger/swagger.2.0.example.v2.json',
});
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1',
'addCredentials': true
}, cb);
});
gulp.task('regenerate:expected:csallsync', function(cb){
mappings = mergeOptions(
{
'PetstoreV2AllSync': 'Swagger/swagger.2.0.example.v2.json',
});
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1',
'syncMethods': 'all'
}, cb);
});
gulp.task('regenerate:expected:csnosync', function(cb){
mappings = mergeOptions(
{
'PetstoreV2NoSync': 'Swagger/swagger.2.0.example.v2.json',
});
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1',
'syncMethods': 'none'
}, cb);
});
gulp.task('regenerate:expected:csazureallsync', function(cb){
mappings = mergeOptions(
{
'AcceptanceTests/AzureBodyDurationAllSync': '../../../TestServer/swagger/body-duration.json'
});
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'Azure.CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1',
'syncMethods': 'all'
}, cb);
});
gulp.task('regenerate:expected:csazurenosync', function(cb){
mappings = mergeOptions(
{
'AcceptanceTests/AzureBodyDurationNoSync': '../../../TestServer/swagger/body-duration.json'
});
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': mappings,
'outputDir': 'Expected',
'codeGenerator': 'Azure.CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1',
'syncMethods': 'none'
}, cb);
});
gulp.task('regenerate:expected:cscomposite', function (cb) {
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/CSharp.Tests',
'mappings': compositeMappings,
'modeler' : 'CompositeSwagger',
'outputDir': 'Expected',
'codeGenerator': 'CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:csazurecomposite', function (cb) {
regenExpected({
'outputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'inputBaseDir': 'AutoRest/Generators/CSharp/Azure.CSharp.Tests',
'mappings': azureCompositeMappings,
'modeler': 'CompositeSwagger',
'outputDir': 'Expected',
'codeGenerator': 'Azure.CSharp',
'nsPrefix': 'Fixtures',
'flatteningThreshold': '1'
}, cb);
});
gulp.task('regenerate:expected:samples', ['regenerate:expected:samples:azure'], function(){
var autorestConfigPath = path.join(basePathOrThrow(), 'binaries/net45/AutoRest.Release.json');
var content = fs.readFileSync(autorestConfigPath).toString();
if (content.charCodeAt(0) === 0xFEFF) {
content = content.slice(1);
}
var autorestConfig = JSON.parse(content);
for (var lang in autorestConfig.codeGenerators) {
if (!lang.match(/^Azure\..+/)) {
var generateCmd = path.join(basePathOrThrow(), 'binaries/net45/AutoRest.exe') + ' -Modeler Swagger -CodeGenerator ' + lang + ' -OutputDirectory ' + path.join(basePathOrThrow(), 'Samples/petstore/' + lang) + ' -Namespace Petstore -Input ' + path.join(basePathOrThrow(), 'Samples/petstore/petstore.json') + ' -Header NONE';
exec(clrCmd(generateCmd), function(err, stdout, stderr) {
console.log(stdout);
console.error(stderr);
});
}
}
});
gulp.task('regenerate:expected:samples:azure', function(){
var autorestConfigPath = path.join(basePathOrThrow(), 'binaries/net45/AutoRest.Release.json');
var content = fs.readFileSync(autorestConfigPath).toString();
if (content.charCodeAt(0) === 0xFEFF) {
content = content.slice(1);
}
var autorestConfig = JSON.parse(content);
for (var lang in autorestConfig.codeGenerators) {
if (lang.match(/^Azure\..+/)) {
var generateCmd = path.join(basePathOrThrow(), 'binaries/net45/AutoRest.exe') + ' -Modeler Swagger -CodeGenerator ' + lang + ' -OutputDirectory ' + path.join(basePathOrThrow(), 'Samples/azure-storage/' + lang) + ' -Namespace Petstore -Input ' + path.join(basePathOrThrow(), 'Samples/azure-storage/azure-storage.json') + ' -Header NONE';
exec(clrCmd(generateCmd), function(err, stdout, stderr) {
console.log(stdout);
console.error(stderr);
});
}
}
});
var msBuildToolsVersion = 12.0;
if (isWindows) {
fs.readdirSync('C:/Program Files (x86)/MSBuild/').forEach(function (item) {
var itemAsFloat = parseFloat(item);
if (itemAsFloat > msBuildToolsVersion) {
msBuildToolsVersion = itemAsFloat;
}
});
}
var msbuildDefaults = {
stdout: process.stdout,
stderr: process.stderr,
maxBuffer: MAX_BUFFER,
verbosity: 'normal',
errorOnFail: true,
toolsVersion: msBuildToolsVersion
};
gulp.task('clean:node_modules', function(cb) {
del(['./AutoRest/**/node_modules', './ClientRuntimes/**/node_modules'], cb)
})
gulp.task('clean:build', ['clean:node_modules'], function (cb) {
return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, {
targets: ['clean']
})));
});
gulp.task('clean:templates', function(cb) {
del([
'./AutoRest/**/Templates/*.cs',
], cb);
});
gulp.task('clean:generatedTest', function(cb) {
var basePath = './AutoRest/NugetPackageTest';
del([
path.join(basePath, 'Generated/**/*'),
path.join(basePath, 'packages/**/*'),
], cb);
});
gulp.task('clean', ['clean:build', 'clean:templates', 'clean:generatedTest']);
gulp.task('syncDependencies:nugetProj', function() {
var dirs = glob.sync(path.join(basePathOrThrow(), '/**/project.json'))
.map(function(filePath) {
return path.dirname(filePath);
});
return gulp.src(dirs.map(function (dir) {
return path.join(dir, '/**/AssemblyInfo.cs');
}), {
base: './'
})
.pipe(nugetProjSync({
default_version: DEFAULT_ASSEMBLY_VERSION
}))
.pipe(gulp.dest('.'));
})
gulp.task('syncDependencies:nuspec', function() {
var dirs = glob.sync(path.join(basePathOrThrow(), '/**/packages.config'))
.map(function(filePath) {
return path.dirname(filePath);
});
return gulp.src(dirs.map(function (dir) {
return path.join(dir, '/**/*.nuspec');
}), {
base: './'
})
.pipe(nuspecSync())
.pipe(gulp.dest('.'));
});
gulp.task('syncDependencies:runtime', ['syncDependencies:runtime:cs', 'syncDependencies:runtime:csazure', 'syncDependencies:runtime:node', 'syncDependencies:runtime:nodeazure', 'syncDependencies:runtime:ruby', 'syncDependencies:runtime:rubyazure']);
gulp.task('syncDependencies', ['syncDependencies:nugetProj', 'syncDependencies:nuspec', 'syncDependencies:runtime']);
gulp.task('build', function(cb) {
// warning 0219 is for unused variables, which causes the build to fail on xbuild
return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, {
targets: ['build'],
properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' },
stdout: true,
errorOnFail: true
})));
});
gulp.task('build:release', function(cb) {
// warning 0219 is for unused variables, which causes the build to fail on xbuild
return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults,{
targets: ['build'],
properties: { WarningsNotAsErrors: 0219, Configuration: 'Release' }
})));
});
gulp.task('package', function(cb) {
return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, {
targets: ['package'],
verbosity: 'normal',
})));
});
gulp.task('test:clientruntime:node', shell.task('npm test', { cwd: './ClientRuntimes/NodeJS/ms-rest/', verbosity: 3 }));
gulp.task('test:clientruntime:nodeazure', shell.task('npm test', { cwd: './ClientRuntimes/NodeJS/ms-rest-azure/', verbosity: 3 }));
gulp.task('test:clientruntime:ruby', ['syncDependencies:runtime:ruby'], shell.task('bundle exec rspec', { cwd: './ClientRuntimes/Ruby/ms-rest/', verbosity: 3 }));
gulp.task('test:clientruntime:rubyazure', ['syncDependencies:runtime:rubyazure'], shell.task('bundle exec rspec', { cwd: './ClientRuntimes/Ruby/ms-rest-azure/', verbosity: 3 }));
gulp.task('test:clientruntime:java', shell.task(basePathOrThrow() + '/gradlew :client-runtime:check', { cwd: './', verbosity: 3 }));
gulp.task('test:clientruntime:javaazure', shell.task(basePathOrThrow() + '/gradlew :azure-client-runtime:check', { cwd: './', verbosity: 3 }));
gulp.task('test:clientruntime:python', shell.task('tox', { cwd: './ClientRuntimes/Python/msrest/', verbosity: 3 }));
gulp.task('test:clientruntime:pythonazure', shell.task('tox', { cwd: './ClientRuntimes/Python/msrestazure/', verbosity: 3 }));
gulp.task('test:clientruntime:javaauthjdk', shell.task(basePathOrThrow() + '/gradlew :azure-client-authentication:check', { cwd: './', verbosity: 3 }));
gulp.task('test:clientruntime:javaauthandroid', shell.task(basePathOrThrow() + '/gradlew :azure-android-client-authentication:check', { cwd: './', verbosity: 3 }));
gulp.task('test:clientruntime', function (cb) {
runSequence('test:clientruntime:node', 'test:clientruntime:nodeazure',
'test:clientruntime:ruby', 'test:clientruntime:rubyazure',
'test:clientruntime:python', 'test:clientruntime:pythonazure',
'test:clientruntime:java', 'test:clientruntime:javaazure',
'test:clientruntime:javaauthjdk', 'test:clientruntime:javaauthandroid', cb);
});
gulp.task('test:node', shell.task('npm test', {cwd: './AutoRest/Generators/NodeJS/NodeJS.Tests/', verbosity: 3}));
gulp.task('test:node:azure', shell.task('npm test', {cwd: './AutoRest/Generators/NodeJS/Azure.NodeJS.Tests/', verbosity: 3}));
gulp.task('test:ruby', ['regenerate:expected:ruby'], shell.task('ruby RspecTests/tests_runner.rb', { cwd: './AutoRest/Generators/Ruby/Ruby.Tests', verbosity: 3 }));
gulp.task('test:ruby:azure', ['regenerate:expected:rubyazure'], shell.task('ruby RspecTests/tests_runner.rb', { cwd: './AutoRest/Generators/Ruby/Azure.Ruby.Tests', verbosity: 3 }));
gulp.task('test:java', shell.task(basePathOrThrow() + '/gradlew :codegen-tests:check', {cwd: './', verbosity: 3}));
gulp.task('test:java:azure', shell.task(basePathOrThrow() + '/gradlew :azure-codegen-tests:check', {cwd: './', verbosity: 3}));
gulp.task('test:python', shell.task('tox', {cwd: './AutoRest/Generators/Python/Python.Tests/', verbosity: 3}));
gulp.task('test:python:azure', shell.task('tox', {cwd: './AutoRest/Generators/Python/Azure.Python.Tests/', verbosity: 3}));
var xunitTestsDlls = [
'AutoRest/AutoRest.Core.Tests/bin/Net45-Debug/AutoRest.Core.Tests.dll',
'AutoRest/Modelers/Swagger.Tests/bin/Net45-Debug/AutoRest.Modeler.Swagger.Tests.dll',
'AutoRest/Generators/Azure.Common/Azure.Common.Tests/bin/Net45-Debug/AutoRest.Generator.Azure.Common.Tests.dll',
'AutoRest/Generators/Extensions/Extensions.Tests/bin/Net45-Debug/AutoRest.Generator.Extensions.Tests.dll',
'AutoRest/Generators/Extensions/Azure.Extensions.Tests/bin/Net45-Debug/AutoRest.Generator.Azure.Extensions.Tests.dll'
];
var xunitNetCoreXproj = [
'AutoRest/Generators/CSharp/CSharp.Tests/project.json',
'AutoRest/Generators/CSharp/Azure.CSharp.Tests/project.json',
'ClientRuntimes/CSharp/Microsoft.Rest.ClientRuntime.Tests/project.json',
'ClientRuntimes/CSharp/Microsoft.Rest.ClientRuntime.Azure.Tests/project.json'
];
var defaultShellOptions = {
verbosity: 3,
env: {
AUTOREST_TEST_SERVER_PATH: path.resolve('./AutoRest/TestServer')
}
};
var clrCmd = function(cmd){
return isWindows ? cmd : ('mono ' + cmd);
};
var execClrCmd = function(cmd, options){
gutil.log(cmd);
return shell(clrCmd(cmd), options);
};
var clrTask = function(cmd, options){
return shell.task(clrCmd(cmd), options);
};
var xunit = function(template, options){
var xunitRunner = path.resolve('packages/xunit.runner.console.2.1.0/tools/xunit.console.exe');
return execClrCmd(xunitRunner + ' ' + template, options);
}
var xunitnetcore = function(options){
options.templateData = {
f: function (s) {
return path.basename(path.dirname(s))
}
};
var printStatusCodeCmd = 'echo Status code: %errorlevel%';
if (!isWindows) {
printStatusCodeCmd = 'echo Status code: $?';
}
var netcoreScript = 'dotnet test "<%= file.path %>" -verbose -xml "' + path.join(basePathOrThrow(), '/TestResults/') + '<%= f(file.path) %>.xml" && ' + printStatusCodeCmd;
return shell(netcoreScript, options);
}
gulp.task('test:xunit', ['test:xunit:netcore'], function () {
return gulp.src(xunitTestsDlls).pipe(xunit('<%= file.path %> -noshadow -noappdomain -diagnostics', defaultShellOptions));
});
gulp.task('test:xunit:netcore', ['regenerate:expected:cs', 'regenerate:expected:csazure'], function () {
return gulp.src(xunitNetCoreXproj)
.pipe(debug())
.pipe(xunitnetcore(defaultShellOptions));
});
var nugetPath = path.resolve('Tools/NuGet.exe');
var nugetTestProjDir = path.resolve('AutoRest/NugetPackageTest');
var packagesDir = path.resolve('binaries/packages');
var cachedClientRuntimePackages = path.join(process.env.HOME || (process.env.HOMEDRIVE + process.env.HOMEPATH),
'AppData', 'Local', 'NuGet', 'Cache', "Microsoft.Rest.ClientRuntime.*.nupkg");
gulp.task('test:nugetPackages:restore', ['test:nugetPackages:clean'], clrTask(nugetPath + ' restore ' + path.join(nugetTestProjDir, '/NugetPackageTest.sln') + ' -source "' + path.resolve(packagesDir) + ';https://www.nuget.org/api/v2/"'));
gulp.task('test:nugetPackages:clean', function () {
//turn on 'force' so we can remove files outside of repo folder.
return del([path.join(nugetTestProjDir, 'Generated'), cachedClientRuntimePackages], {'force' : true});
});
var autoRestExe = function(){
gutil.log(glob.sync(path.join(basePathOrThrow(), 'AutoRest/NugetPackageTest/packages/autorest.*/tools/AutoRest.exe')));
return glob.sync(path.join(basePathOrThrow(), 'AutoRest/NugetPackageTest/packages/autorest.*/tools/AutoRest.exe'))[0];
}
gulp.task('test:nugetPackages:generate:csharp', ['test:nugetPackages:restore', 'test:nugetPackages:clean'], function(){
var csharp = autoRestExe() + ' -Modeler Swagger -CodeGenerator CSharp -OutputDirectory ' + path.join(nugetTestProjDir, '/Generated/CSharp') + ' -Namespace Fixtures.Bodynumber -Input <%= file.path %> -Header NONE';
return gulp.src('AutoRest/TestServer/swagger/body-number.json').pipe(execClrCmd(csharp, {verbosity: 3}));
});
gulp.task('test:nugetPackages:generate:node', ['test:nugetPackages:restore', 'test:nugetPackages:clean'], function(){
var nodejs = autoRestExe() + ' -Modeler Swagger -CodeGenerator NodeJS -OutputDirectory ' + path.join(nugetTestProjDir, '/Generated/NodeJS') + ' -Input <%= file.path %> -Header NONE';
return gulp.src('AutoRest/TestServer/swagger/body-number.json').pipe(execClrCmd(nodejs, {verbosity: 3}));
});
gulp.task('test:nugetPackages:generate', ['test:nugetPackages:generate:csharp', 'test:nugetPackages:generate:node']);
gulp.task('test:nugetPackages:build', ['test:nugetPackages:generate'], function(){
return gulp.src(path.join(nugetTestProjDir, 'NugetPackageCSharpTest.csproj'))
.pipe(msbuild(mergeOptions(msbuildDefaults, { targets: ['build'], properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' } })));
});
gulp.task('test:nugetPackages:xunit', ['test:nugetPackages:build'], function(){
var xunitSrc = gulp.src(path.join(nugetTestProjDir, 'bin/Debug/NuGetPackageCSharpTest.dll'));
return xunitSrc.pipe(xunit('<%= file.path %> -noshadow -noappdomain', defaultShellOptions))
});
gulp.task('test:nugetPackages:npm', ['test:nugetPackages:generate'], shell.task('npm test', {cwd: nugetTestProjDir, verbosity: 3}))
gulp.task('test', function(cb){
if (isWindows) {
runSequence(
'test:xunit',
'test:clientruntime',
'test:nugetPackages:xunit',
'test:node',
'test:node:azure',
'test:nugetPackages:npm',
'test:ruby',
'test:ruby:azure',
'test:java',
'test:java:azure',
'test:python',
'test:python:azure',
cb);
} else {
runSequence(
'test:xunit',
'test:clientruntime',
'test:node',
'test:node:azure',
'test:ruby',
'test:ruby:azure',
'test:java',
'test:java:azure',
'test:python',
'test:python:azure',
cb);
}
});
gulp.task('analysis', function(cb) {
return gulp.src('build.proj').pipe(msbuild(mergeOptions(msbuildDefaults, {
targets: ['codeanalysis'],
properties: { WarningsNotAsErrors: 0219, Configuration: 'Debug' },
})));
});
gulp.task('default', function(cb){
// Notes:
// Analysis runs rebuild under the covers, so this causes build to be run in DEBUG
// The build RELEASE causes release bits to be built, so we can package RELEASE dlls
// Test then runs in DEBUG, but uses the packages created in package
if (isWindows) {
runSequence('clean', 'build', 'analysis', 'build:release', 'package', 'test', cb);
} else {
runSequence('clean', 'build', 'test', cb);
}
});
| 1 | 22,190 | do we need to add this one in this PR? | Azure-autorest | java |
@@ -223,7 +223,6 @@ namespace Nethermind.State.Test.Runner
public void ReportGasUpdateForVmTrace(long refund, long gasAvailable)
{
- throw new NotImplementedException();
}
public void ReportRefundForVmTrace(long refund, long gasAvailable) | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Evm;
using Nethermind.Evm.Tracing;
using Nethermind.State;
namespace Nethermind.State.Test.Runner
{
public class StateTestTxTracer : ITxTracer
{
private StateTestTxTraceEntry _traceEntry;
private StateTestTxTrace _trace = new StateTestTxTrace();
private bool _gasAlreadySetForCurrentOp;
public bool IsTracingReceipt => true;
bool ITxTracer.IsTracingActions => false;
public bool IsTracingOpLevelStorage => true;
public bool IsTracingMemory { get; set; } = true;
bool ITxTracer.IsTracingInstructions => true;
public bool IsTracingRefunds { get; } = false;
public bool IsTracingCode => false;
public bool IsTracingStack { get; set; } = true;
bool ITxTracer.IsTracingState => false;
public bool IsTracingBlockHash { get; } = false;
public void MarkAsSuccess(Address recipient, long gasSpent, byte[] output, LogEntry[] logs, Keccak stateRoot = null)
{
_trace.Result.Output = output;
_trace.Result.GasUsed = gasSpent;
}
public void MarkAsFailed(Address recipient, long gasSpent, byte[] output, string error, Keccak stateRoot = null)
{
_trace.Result.Error = _traceEntry?.Error ?? error;
_trace.Result.Output = output ?? Bytes.Empty;
_trace.Result.GasUsed = gasSpent;
}
public void StartOperation(int depth, long gas, Instruction opcode, int pc)
{
// var previousTraceEntry = _traceEntry;
_gasAlreadySetForCurrentOp = false;
_traceEntry = new StateTestTxTraceEntry();
_traceEntry.Pc = pc;
_traceEntry.Operation = (byte)opcode;
_traceEntry.OperationName = Enum.GetName(typeof(Instruction), opcode);
_traceEntry.Gas = gas;
_traceEntry.Depth = depth;
_trace.Entries.Add(_traceEntry);
// if (_traceEntry.Depth > (previousTraceEntry?.Depth ?? 0))
// {
// _traceEntry.Storage = new Dictionary<string, string>();
// _trace.StorageByDepth.Push(previousTraceEntry != null ? previousTraceEntry.Storage : new Dictionary<string, string>());
// }
// else if (_traceEntry.Depth < (previousTraceEntry?.Depth ?? 0))
// {
// if (previousTraceEntry == null)
// {
// throw new InvalidOperationException("Unexpected missing previous trace when leaving a call.");
// }
//
// _traceEntry.Storage = new Dictionary<string, string>(_trace.StorageByDepth.Pop());
// }
// else
// {
// if (previousTraceEntry == null)
// {
// throw new InvalidOperationException("Unexpected missing previous trace on continuation.");
// }
//
// _traceEntry.Storage = new Dictionary<string, string>(previousTraceEntry.Storage);
// }
}
public void ReportOperationError(EvmExceptionType error)
{
_traceEntry.Error = GetErrorDescription(error);
}
private string GetErrorDescription(EvmExceptionType evmExceptionType)
{
switch (evmExceptionType)
{
case EvmExceptionType.None:
return null;
case EvmExceptionType.BadInstruction:
return "BadInstruction";
case EvmExceptionType.StackOverflow:
return "StackOverflow";
case EvmExceptionType.StackUnderflow:
return "StackUnderflow";
case EvmExceptionType.OutOfGas:
return "gas uint64 overflow";
case EvmExceptionType.InvalidJumpDestination:
return "BadJumpDestination";
case EvmExceptionType.AccessViolation:
return "AccessViolation";
case EvmExceptionType.StaticCallViolation:
return "evm: write protection";
default:
return "Error";
}
}
public void ReportOperationRemainingGas(long gas)
{
if (!_gasAlreadySetForCurrentOp)
{
_gasAlreadySetForCurrentOp = true;
_traceEntry.GasCost = _traceEntry.Gas - gas;
}
}
public void SetOperationMemorySize(ulong newSize)
{
_traceEntry.UpdateMemorySize(newSize);
int diff = (int) _traceEntry.MemSize * 2 - (_traceEntry.Memory.Length - 2);
if (diff > 0)
{
_traceEntry.Memory += new string('0', diff);
}
}
public void ReportMemoryChange(long offset, Span<byte> data)
{
}
public void ReportStorageChange(Span<byte> key, Span<byte> value)
{
}
public void SetOperationStorage(Address address, UInt256 storageIndex, byte[] newValue, byte[] currentValue)
{
// byte[] bigEndian = new byte[32];
// storageIndex.ToBigEndian(bigEndian);
// _traceEntry.Storage[bigEndian.ToHexString(false)] = newValue.PadLeft(32).ToHexString(false);
}
public void ReportSelfDestruct(Address address, UInt256 balance, Address refundAddress)
{
throw new NotSupportedException();
}
public void ReportBalanceChange(Address address, UInt256? before, UInt256? after)
{
throw new NotSupportedException();
}
public void ReportCodeChange(Address address, byte[] before, byte[] after)
{
throw new NotSupportedException();
}
public void ReportNonceChange(Address address, UInt256? before, UInt256? after)
{
throw new NotSupportedException();
}
public void ReportAccountRead(Address address)
{
throw new NotImplementedException();
}
public void ReportStorageChange(StorageCell storageAddress, byte[] before, byte[] after)
{
throw new NotSupportedException();
}
public void ReportAction(long gas, UInt256 value, Address @from, Address to, byte[] input, ExecutionType callType, bool isPrecompileCall = false)
{
throw new NotSupportedException();
}
public void ReportActionEnd(long gas, byte[] output)
{
throw new NotSupportedException();
}
public void ReportActionError(EvmExceptionType exceptionType)
{
throw new NotSupportedException();
}
public void ReportActionEnd(long gas, Address deploymentAddress, byte[] deployedCode)
{
throw new NotSupportedException();
}
public void ReportBlockHash(Keccak blockHash)
{
throw new NotImplementedException();
}
public void ReportByteCode(byte[] byteCode)
{
throw new NotSupportedException();
}
public void ReportGasUpdateForVmTrace(long refund, long gasAvailable)
{
throw new NotImplementedException();
}
public void ReportRefundForVmTrace(long refund, long gasAvailable)
{
}
public void ReportRefund(long refund)
{
_traceEntry.Refund = (int)refund;
}
public void ReportExtraGasPressure(long extraGasPressure)
{
throw new NotImplementedException();
}
public void SetOperationStack(List<string> stackTrace)
{
_traceEntry.Stack = new List<string>();
foreach (string s in stackTrace)
{
ReadOnlySpan<char> inProgress = s.AsSpan();
if (s.StartsWith("0x"))
{
inProgress = inProgress.Slice(2);
}
inProgress = inProgress.TrimStart('0');
_traceEntry.Stack.Add(inProgress.Length == 0 ? "0x0" : "0x" + inProgress.ToString());
}
}
public void ReportStackPush(Span<byte> stackItem)
{
}
public void SetOperationMemory(List<string> memoryTrace)
{
_traceEntry.Memory = string.Concat("0x", string.Join("", memoryTrace.Select(mt => mt.Replace("0x", string.Empty))));
}
public StateTestTxTrace BuildResult()
{
return _trace;
}
}
} | 1 | 24,305 | where is a test for it? | NethermindEth-nethermind | .cs |
@@ -46,7 +46,7 @@ class Page extends CmsCompoundObject
*/
public $rules = [
'title' => 'required',
- 'url' => ['required', 'regex:/^\/[a-z0-9\/\:_\-\*\[\]\+\?\|\.\^\\\$]*$/i']
+ 'url' => ['required', 'regex:/^\/[a-z0-9\/\:_\-\*\[\]\+\?\|\.\^\\\$]*$/i']
];
/** | 1 | <?php namespace Cms\Classes;
use Lang;
use Cms\Classes\Theme;
use Cms\Classes\Layout;
use ApplicationException;
use October\Rain\Filesystem\Definitions as FileDefinitions;
/**
* The CMS page class.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
*/
class Page extends CmsCompoundObject
{
/**
* @var string The container name associated with the model, eg: pages.
*/
protected $dirName = 'pages';
/**
* @var array The attributes that are mass assignable.
*/
protected $fillable = [
'url',
'layout',
'title',
'description',
'is_hidden',
'meta_title',
'meta_description',
'markup',
'settings',
'code'
];
/**
* @var array The API bag allows the API handler code to bind arbitrary
* data to the page object.
*/
public $apiBag = [];
/**
* @var array The rules to be applied to the data.
*/
public $rules = [
'title' => 'required',
'url' => ['required', 'regex:/^\/[a-z0-9\/\:_\-\*\[\]\+\?\|\.\^\\\$]*$/i']
];
/**
* Creates an instance of the object and associates it with a CMS theme.
* @param array $attributes
*/
public function __construct(array $attributes = [])
{
parent::__construct($attributes);
$this->customMessages = [
'url.regex' => Lang::get('cms::lang.page.invalid_url')
];
}
protected function parseSettings()
{
}
/**
* Returns name of a PHP class to us a parent for the PHP class created for the object's PHP section.
* @return mixed Returns the class name or null.
*/
public function getCodeClassParent()
{
return '\Cms\Classes\PageCode';
}
/**
* Returns a list of layouts available in the theme.
* This method is used by the form widget.
* @return array Returns an array of strings.
*/
public function getLayoutOptions()
{
if (!($theme = Theme::getEditTheme())) {
throw new ApplicationException(Lang::get('cms::lang.theme.edit.not_found'));
}
$layouts = Layout::listInTheme($theme, true);
$result = [];
$result[null] = Lang::get('cms::lang.page.no_layout');
foreach ($layouts as $layout) {
$baseName = $layout->getBaseFileName();
if (FileDefinitions::isPathIgnored($baseName)) {
continue;
}
$result[$baseName] = strlen($layout->name) ? $layout->name : $baseName;
}
return $result;
}
/**
* Helper that returns a nicer list of pages for use in dropdowns.
* @return array
*/
public static function getNameList()
{
$result = [];
$pages = self::sortBy('baseFileName')->all();
foreach ($pages as $page) {
$result[$page->baseFileName] = $page->title.' ('.$page->baseFileName.')';
}
return $result;
}
/**
* Helper that makes a URL for a page in the active theme.
* @param mixed $page Specifies the Cms Page file name.
* @param array $params Route parameters to consider in the URL.
* @return string
*/
public static function url($page, $params = [])
{
/*
* Reuse existing controller or create a new one,
* assuming that the method is called not during the front-end
* request processing.
*/
$controller = Controller::getController() ?: new Controller;
return $controller->pageUrl($page, $params, true);
}
/**
* Handler for the pages.menuitem.getTypeInfo event.
* Returns a menu item type information. The type information is returned as array
* with the following elements:
* - references - a list of the item type reference options. The options are returned in the
* ["key"] => "title" format for options that don't have sub-options, and in the format
* ["key"] => ["title"=>"Option title", "items"=>[...]] for options that have sub-options. Optional,
* required only if the menu item type requires references.
* - nesting - Boolean value indicating whether the item type supports nested items. Optional,
* false if omitted.
* - dynamicItems - Boolean value indicating whether the item type could generate new menu items.
* Optional, false if omitted.
* - cmsPages - a list of CMS pages (objects of the Cms\Classes\Page class), if the item type requires
* a CMS page reference to resolve the item URL.
* @param string $type Specifies the menu item type
* @return array Returns an array
*/
public static function getMenuTypeInfo($type)
{
$result = [];
if ($type == 'cms-page') {
$theme = Theme::getActiveTheme();
$pages = self::listInTheme($theme, true);
$references = [];
foreach ($pages as $page) {
$references[$page->getBaseFileName()] = $page->title . ' ['.$page->getBaseFileName().']';
}
$result = [
'references' => $references,
'nesting' => false,
'dynamicItems' => false
];
}
return $result;
}
/**
* Handler for the pages.menuitem.resolveItem event.
* Returns information about a menu item. The result is an array
* with the following keys:
* - url - the menu item URL. Not required for menu item types that return all available records.
* The URL should be returned relative to the website root and include the subdirectory, if any.
* Use the Url::to() helper to generate the URLs.
* - isActive - determines whether the menu item is active. Not required for menu item types that
* return all available records.
* - items - an array of arrays with the same keys (url, isActive, items) + the title key.
* The items array should be added only if the $item's $nesting property value is TRUE.
* @param \RainLab\Pages\Classes\MenuItem $item Specifies the menu item.
* @param string $url Specifies the current page URL, normalized, in lower case
* @param \Cms\Classes\Theme $theme Specifies the current theme.
* The URL is specified relative to the website root, it includes the subdirectory name, if any.
* @return mixed Returns an array. Returns null if the item cannot be resolved.
*/
public static function resolveMenuItem($item, $url, $theme)
{
$result = null;
if ($item->type == 'cms-page') {
if (!$item->reference) {
return;
}
$page = self::loadCached($theme, $item->reference);
$controller = Controller::getController() ?: new Controller;
$pageUrl = $controller->pageUrl($item->reference, [], false);
$result = [];
$result['url'] = $pageUrl;
$result['isActive'] = $pageUrl == $url;
$result['mtime'] = $page ? $page->mtime : null;
}
return $result;
}
/**
* Handler for the backend.richeditor.getTypeInfo event.
* Returns a menu item type information. The type information is returned as array
* @param string $type Specifies the page link type
* @return array
*/
public static function getRichEditorTypeInfo($type)
{
$result = [];
if ($type == 'cms-page') {
$theme = Theme::getActiveTheme();
$pages = self::listInTheme($theme, true);
foreach ($pages as $page) {
$url = self::url($page->getBaseFileName());
$result[$url] = $page->title;
}
}
return $result;
}
}
| 1 | 12,958 | Why has this spacing been adjusted? | octobercms-october | php |
@@ -90,10 +90,15 @@ class PipelineBuilder(object):
raise api_errors.ApiInitializationError(e)
api_version = self.api_map.get(api_name).get('version')
- if api_version is None:
- api = api_class()
- else:
- api = api_class(version=api_version)
+ try:
+ if api_version is None:
+ api = api_class()
+ else:
+ api = api_class(version=api_version)
+ except api_errors.ApiExecutionError as e:
+ LOGGER.error('Failed to execute API %s, v=%s',
+ api_class_name, api_version)
+ raise api_errors.ApiInitializationError(e)
self.initialized_api_map[api_name] = api
| 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the inventory pipelines to run, and in the correct order to run."""
import importlib
import sys
import anytree
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import file_loader
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import pipeline_requirements_map
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,redundant-returns-doc
LOGGER = log_util.get_logger(__name__)
class PipelineBuilder(object):
"""Inventory Pipeline Builder."""
def __init__(self, cycle_timestamp, config_path, flags,
api_map, dao_map):
"""Initialize the pipeline builder.
Args:
cycle_timestamp: String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
config_path: String of the path to the inventory config file.
flags: Dictionary of flag values.
api_map: Dictionary of GCP API info, mapped to each resource.
dao_map: Dictionary of DAO instances, mapped to each resource.
Returns:
None
"""
self.cycle_timestamp = cycle_timestamp
self.config_path = config_path
self.flags = flags
self.api_map = api_map
self.dao_map = dao_map
self.initialized_api_map = {}
def _get_api(self, api_name):
"""Get the api instance for the pipeline.
The purpose is that we only want to initialize the APIs for the
pipelines that are enabled, in order to minimize setup.
Args:
api_name: String of the API name to get.
Returns:
Instance of the API.
"""
api = self.initialized_api_map.get(api_name)
if api is None:
api_module_path = 'google.cloud.security.common.gcp_api.{}'
try:
api_module_name = api_module_path.format(
self.api_map.get(api_name).get('module_name'))
api_module = importlib.import_module(api_module_name)
except (AttributeError, ImportError, TypeError, ValueError) as e:
LOGGER.error('Unable to get module %s\n%s', api_name, e)
raise api_errors.ApiInitializationError(e)
api_class_name = (
self.api_map.get(api_name).get('class_name'))
try:
api_class = getattr(api_module, api_class_name)
except AttributeError as e:
LOGGER.error('Unable to instantiate %s\n%s',
api_class_name, sys.exc_info()[0])
raise api_errors.ApiInitializationError(e)
api_version = self.api_map.get(api_name).get('version')
if api_version is None:
api = api_class()
else:
api = api_class(version=api_version)
self.initialized_api_map[api_name] = api
return api
def _find_runnable_pipelines(self, root):
"""Find the enabled pipelines to run.
Args:
root: PipelineNode representing the top-level starting point
of the pipeline dependency tree. The entire pipeline
dependency tree are tuple of children PipelineNodes
of this root.
Example:
root.resource_name = 'organizations'
root.enabled = True
root.parent = None
root.children = (pipeline_node1, pipeline_node2, ...)
Returns:
runnable_pipelines: List of the pipelines that will be run. The
order in the list represents the order they need to be run.
i.e. going top-down in the dependency tree.
"""
# If child pipeline is true, then all parents will become true.
# Even if the parent(s) is(are) false.
# Manually traverse the parents since anytree walker api doesn't
# make sense.
for node in anytree.iterators.PostOrderIter(root):
if node.enabled:
while node.parent is not None:
node.parent.enabled = node.enabled
node = node.parent
LOGGER.debug('Dependency tree of the pipelines: %s',
anytree.RenderTree(root, style=anytree.AsciiStyle())
.by_attr('resource_name'))
LOGGER.debug('Which pipelines are enabled: %s',
anytree.RenderTree(root, style=anytree.AsciiStyle())
.by_attr('enabled'))
# Now, we have the true state of whether a pipeline should be run.
# Get a list of pipeline instances that will actually be run.
# The order matters: must go top-down in the tree, by PreOrder.
# http://anytree.readthedocs.io/en/latest/apidoc/anytree.iterators.html
runnable_pipelines = []
for node in anytree.iterators.PreOrderIter(root):
if node.enabled:
module_path = 'google.cloud.security.inventory.pipelines.{}'
module_name = module_path.format(
pipeline_requirements_map.REQUIREMENTS_MAP
.get(node.resource_name)
.get('module_name'))
try:
module = importlib.import_module(module_name)
except (ImportError, TypeError, ValueError) as e:
LOGGER.error('Unable to import %s\n%s', module_name, e)
continue
# Convert module naming to class naming.
# Module naming is "this_is_foo"
# Class naming is "ThisIsFoo"
class_name = (
pipeline_requirements_map.REQUIREMENTS_MAP
.get(node.resource_name)
.get('module_name')
.title()
.replace('_', ''))
try:
pipeline_class = getattr(module, class_name)
except AttributeError:
LOGGER.error('Unable to instantiate %s\n%s',
class_name, sys.exc_info()[0])
continue
api_name = (pipeline_requirements_map.REQUIREMENTS_MAP
.get(node.resource_name)
.get('api_name'))
try:
api = self._get_api(api_name)
except api_errors.ApiInitializationError:
continue
dao = self.dao_map.get(
pipeline_requirements_map.REQUIREMENTS_MAP
.get(node.resource_name)
.get('dao_name'))
if dao is None:
LOGGER.error('Unable to find dao for %s',
node.resource_name)
continue
pipeline = pipeline_class(
self.cycle_timestamp, self.flags, api, dao)
runnable_pipelines.append(pipeline)
return runnable_pipelines
def _build_dependency_tree(self):
"""Build the dependency tree with all the pipeline nodes.
Returns:
PipelineNode representing the top-level starting point
of the pipeline dependency tree. The entire pipeline
dependency tree are children of this root.
Example:
root.resource_name = 'organizations'
root.enabled = True
root.parent = None
root.children = (pipeline_node1, pipeline_node2, ...)
"""
# First pass: map all the pipelines to their own nodes,
# regardless if they should run or not.
map_of_all_pipeline_nodes = {}
config = file_loader.read_and_parse_file(self.config_path)
configured_pipelines = config.get('pipelines', [])
for entry in configured_pipelines:
map_of_all_pipeline_nodes[entry.get('resource')] = PipelineNode(
entry.get('resource'), entry.get('enabled'))
# Another pass: build the dependency tree by setting the parents
# correctly on all the nodes.
for entry in configured_pipelines:
parent_name = (
pipeline_requirements_map.REQUIREMENTS_MAP.get(
entry.get('resource')).get('depends_on'))
if parent_name is not None:
parent_node = map_of_all_pipeline_nodes[parent_name]
map_of_all_pipeline_nodes[entry.get('resource')].parent = (
parent_node)
# Assume root is organizations.
return map_of_all_pipeline_nodes.get('organizations')
def build(self):
"""Build the pipelines to load data.
Returns:
List of pipelines instances that will be run.
"""
root = self._build_dependency_tree()
return self._find_runnable_pipelines(root)
class PipelineNode(anytree.node.NodeMixin):
"""A custom anytree node with pipeline attributes.
More info at anytree's documentation.
http://anytree.readthedocs.io/en/latest/apidoc/anytree.node.html
"""
def __init__(self, resource_name, enabled, parent=None):
"""Initialize the pipeline node.
Args:
resource_name: String of name of the resource.
enabled: Boolean whether the pipeline should run.
parent: PipelineNode of this node's parent.
Returns:
None
"""
self.resource_name = resource_name
self.enabled = enabled
self.parent = parent
| 1 | 26,323 | Sorry to do this, since you're touching this file can you resolve the pylint doc messages above? | forseti-security-forseti-security | py |
@@ -81,7 +81,7 @@ class BigQueryWriter(object):
}
bq_data.append({
'json': row,
- 'insertId': "%s-%s" % (listen['user_name'], listen['listened_at'])
+ 'insertId': "%s-%s-%s" % (listen['user_name'], listen['listened_at'], listen['recording_msid'])
})
body = { 'rows' : bq_data } | 1 | #!/usr/bin/env python3
import sys
import os
import ujson
import json
import logging
import pika
from time import time, sleep
import listenbrainz.config as config
from redis import Redis
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
REPORT_FREQUENCY = 5000
APP_CREDENTIALS_FILE = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
ERROR_RETRY_DELAY = 3 # number of seconds to wait until retrying an operation
DUMP_JSON_WITH_ERRORS = True
# TODO:
# Big query hardcoded data set ids
class BigQueryWriter(object):
def __init__(self):
self.log = logging.getLogger(__name__)
logging.basicConfig()
self.log.setLevel(logging.INFO)
self.redis = None
self.connection = None
self.channel = None
self.total_inserts = 0
self.inserts = 0
self.time = 0
def connect_to_rabbitmq(self):
while True:
try:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=config.RABBITMQ_HOST, port=config.RABBITMQ_PORT))
break
except Exception as e:
self.log.error("Cannot connect to rabbitmq: %s, retrying in 3 seconds" % str(e))
sleep(ERROR_RETRY_DELAY)
@staticmethod
def static_callback(ch, method, properties, body, obj):
return obj.callback(ch, method, body)
def callback(self, ch, method, body):
listens = ujson.loads(body)
count = len(listens)
# We've collected listens to write, now write them
bq_data = []
for listen in listens:
meta = listen['track_metadata']
row = {
'user_name' : listen['user_name'],
'listened_at' : listen['listened_at'],
'artist_msid' : meta['additional_info']['artist_msid'],
'artist_name' : meta['artist_name'],
'artist_mbids' : ",".join(meta['additional_info'].get('artist_mbids', [])),
'release_msid' : meta['additional_info'].get('release_msid', ''),
'release_name' : meta['additional_info'].get('release_name', ''),
'release_mbid' : meta['additional_info'].get('release_mbid', ''),
'track_name' : meta['track_name'],
'recording_msid' : listen['recording_msid'],
'recording_mbid' : meta['additional_info'].get('recording_mbid', ''),
'tags' : ",".join(meta['additional_info'].get('tags', [])),
}
bq_data.append({
'json': row,
'insertId': "%s-%s" % (listen['user_name'], listen['listened_at'])
})
body = { 'rows' : bq_data }
while True:
try:
t0 = time()
ret = self.bigquery.tabledata().insertAll(
projectId=config.BIGQUERY_PROJECT_ID,
datasetId=config.BIGQUERY_DATASET_ID,
tableId=config.BIGQUERY_TABLE_ID,
body=body).execute(num_retries=5)
self.time += time() - t0
break
except HttpError as e:
self.log.error("Submit to BigQuery failed: %s. Retrying in 3 seconds." % str(e))
except Exception as e:
self.log.error("Unknown exception on submit to BigQuery failed: %s. Retrying in 3 seconds." % str(e))
if DUMP_JSON_WITH_ERRORS:
self.log.error(json.dumps(body, indent=3))
sleep(ERROR_RETRY_DELAY)
while True:
try:
self.channel.basic_ack(delivery_tag = method.delivery_tag)
break
except pika.exceptions.ConnectionClosed:
self.connect_to_rabbitmq()
self.log.info("inserted %d listens." % count)
# collect and occasionally print some stats
self.inserts += count
if self.inserts >= REPORT_FREQUENCY:
self.total_inserts += self.inserts
if self.time > 0:
self.log.info("Inserted %d rows in %.1fs (%.2f listens/sec). Total %d rows." % \
(self.inserts, self.time, count / self.time, self.total_inserts))
self.inserts = 0
self.time = 0
return True
def start(self):
self.log.info("biqquer-writer init")
if not hasattr(config, "REDIS_HOST"):
self.log.error("Redis service not defined. Sleeping 3 seconds and exiting.")
sleep(ERROR_RETRY_DELAY)
return
if not hasattr(config, "RABBITMQ_HOST"):
self.log.error("RabbitMQ service not defined. Sleeping 3 seconds and exiting.")
sleep(ERROR_RETRY_DELAY)
return
# if we're not supposed to run, just sleep
if not config.WRITE_TO_BIGQUERY:
sleep(66666)
return
if not APP_CREDENTIALS_FILE:
self.log.error("BiqQueryWriter not started, the GOOGLE_APPLICATION_CREDENTIALS env var is not defined.")
sleep(1000)
return
if not os.path.exists(APP_CREDENTIALS_FILE):
self.log.error("BiqQueryWriter not started, %s is missing." % APP_CREDENTIALS_FILE)
sleep(1000)
return
credentials = GoogleCredentials.get_application_default()
self.bigquery = discovery.build('bigquery', 'v2', credentials=credentials)
while True:
try:
self.redis = Redis(host=config.REDIS_HOST, port=config.REDIS_PORT)
self.redis.ping()
break
except Exception as err:
self.log.error("Cannot connect to redis: %s. Retrying in 3 seconds and trying again." % str(err))
sleep(ERROR_RETRY_DELAY)
while True:
self.connect_to_rabbitmq()
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='unique', type='fanout')
self.channel.queue_declare('unique', durable=True)
self.channel.queue_bind(exchange='unique', queue='unique')
self.channel.basic_consume(lambda ch, method, properties, body: self.static_callback(ch, method, properties, body, obj=self), queue='unique')
self.log.info("bigquery-writer started")
try:
self.channel.start_consuming()
except pika.exceptions.ConnectionClosed:
self.log.info("Connection to rabbitmq closed. Re-opening.")
self.connection = None
self.channel = None
continue
self.connection.close()
if __name__ == "__main__":
bq = BigQueryWriter()
bq.start()
| 1 | 14,454 | @alastair, because the `insertId` for two listens with different metadata and same ts was the same, only one of them would get written into BQ. A question is what would be the ideal way to write tests for stuff like this, so that this doesn't break again? | metabrainz-listenbrainz-server | py |
@@ -1883,7 +1883,7 @@ UDS_RDBI.dataIdentifiers[0x4080] = "AirbagLock_NEU"
UDS_RDBI.dataIdentifiers[0x4140] = "BodyComConfig"
UDS_RDBI.dataIdentifiers[0x4ab4] = "Betriebsstundenzaehler"
UDS_RDBI.dataIdentifiers[0x5fc2] = "WDBI_DME_ABGLEICH_PROG_REQ"
-UDS_RDBI.dataIdentifiers[0xd114] = "Gesamtweg-Streckenzähler Offset"
+UDS_RDBI.dataIdentifiers[0xd114] = "Gesamtweg-Streckenzaehler Offset"
UDS_RDBI.dataIdentifiers[0xd387] = "STATUS_DIEBSTAHLSCHUTZ"
UDS_RDBI.dataIdentifiers[0xdb9c] = "InitStatusEngineAngle"
UDS_RDBI.dataIdentifiers[0xEFE9] = "WakeupRegistry" | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <[email protected]>
# This program is published under a GPLv2 license
# scapy.contrib.description = BMW specific definitions for UDS
# scapy.contrib.status = loads
from scapy.packet import Packet, bind_layers
from scapy.fields import ByteField, ShortField, ByteEnumField, X3BytesField, \
StrField, StrFixedLenField, LEIntField, LEThreeBytesField, \
PacketListField, IntField, IPField, ThreeBytesField, ShortEnumField
from scapy.contrib.automotive.uds import UDS, UDS_RDBI, UDS_DSC, UDS_IOCBI, \
UDS_RC, UDS_RD, UDS_RSDBI, UDS_RDBIPR
BMW_specific_enum = {
0: "requestIdentifiedBCDDTCAndStatus",
1: "requestSupportedBCDDTCAndStatus",
2: "requestIdentified2ByteHexDTCAndStatus",
3: "requestSupported2ByteHexDTCAndStatus",
128: "ECUIdentificationDataTable",
129: "ECUIdentificationScalingTable",
134: "BMW_currentUIFdataTable",
135: "BMW_physicalECUhardwareNumber",
136: "BMW_changeIndex",
137: "BMW_systemSupplierECUserialNumber",
138: "BMW_systemSupplierSpecific",
139: "BMW_systemSupplierSpecific",
140: "BMW_systemSupplierSpecific",
141: "BMW_systemSupplierSpecific",
142: "BMW_systemSupplierSpecific",
143: "BMW_systemSupplierSpecific",
144: "VIN - Vehicle Identification Number",
145: "vehicleManufacturerECUHardwareNumber",
146: "systemSupplierECUHardwareNumber",
147: "systemSupplierECUHardwareVersionNumber",
148: "systemSupplierECUSoftwareNumber",
149: "systemSupplierECUSoftwareVersionNumber",
150: "exhaustRegulationOrTypeApprovalNumber",
151: "systemNameOrEngineType",
152: "repairShopCodeOrTesterSerialNumber",
153: "programmingDate",
154: "BMW_vehicleManufacturerECUhardwareVersionNumber",
155: "BMW_vehicleManufacturerCodingIndex",
156: "BMW_vehicleManufacturerDiagnosticIndex",
157: "BMW_dateOfECUmanufacturing",
158: "BMW_systemSupplierIndex",
159: "BMW_vehicleManufECUsoftwareLayerVersionNumbers",
241: "BMW / OBD tester address",
245: "OBD via function bus",
250: "MOST tester address"}
BMW_memoryTypeIdentifiers = {
0: "BMW_linearAddressRange",
1: "BMW_ROM_EPROM_internal",
2: "BMW_ROM_EPROM_external",
3: "BMW_NVRAM_characteristicZones_DTCmemory",
4: "BMW_RAM_internal_shortMOV",
5: "BMW_RAM_external_xDataMOV",
6: "BMW_flashEPROM_internal",
7: "BMW_UIFmemory",
8: "BMW_vehicleOrderDataMemory_onlyToBeUsedByDS2_ECUs",
9: "BMW_flashEPROM_external",
11: "BMW_RAM_internal_longMOVatRegister"}
class IOCBLI_REQ(Packet):
name = 'InputOutputControlByLocalIdentifier_Request'
fields_desc = [
ByteField('inputOutputLocalIdentifier', 1),
ByteEnumField('inputOutputControlParameter', 0,
{0: "returnControlToECU",
1: "reportCurrentState",
2: "reportIOConditions",
3: "reportIOScaling",
4: "resetToDefault",
5: "freezeCurrentState",
6: "executeControlOption",
7: "shortTermAdjustment",
8: "longTerAdjustment",
9: "reportIOCalibrationParameters"})]
bind_layers(UDS, IOCBLI_REQ, service=0x30)
UDS.services[0x30] = 'InputOutputControlByLocalIdentifier'
class RDTCBS_REQ(Packet):
name = 'ReadDTCByStatus_Request'
fields_desc = [
ByteEnumField('statusOfDTC', 0, BMW_specific_enum),
ShortField('groupOfDTC', 0)]
bind_layers(UDS, RDTCBS_REQ, service=0x18)
UDS.services[0x18] = 'ReadDTCByStatus'
class RSODTC_REQ(Packet):
name = 'ReadStatusOfDTC_Request'
fields_desc = [
ShortField('groupOfDTC', 0)]
bind_layers(UDS, RSODTC_REQ, service=0x17)
UDS.services[0x17] = 'ReadStatusOfDTC'
class REI_IDENT_REQ(Packet):
name = 'Read ECU Identification_Request'
fields_desc = [
ByteEnumField('identificationDataTable', 0, BMW_specific_enum)]
bind_layers(UDS, REI_IDENT_REQ, service=0x1a)
UDS.services[0x1a] = 'ReadECUIdentification'
class SPRBLI_REQ(Packet):
name = 'StopRoutineByLocalIdentifier_Request'
fields_desc = [
ByteEnumField('localIdentifier', 0,
{1: "codingChecksum",
2: "clearMemory",
3: "clearHistoryMemory",
4: "selfTest",
5: "powerDown",
6: "clearDTCshadowMemory",
7: "requestForAuthentication",
8: "releaseAuthentication",
9: "checkSignature",
10: "checkProgrammingStatus",
11: "executeDiagnosticService",
12: "controlEnergySavingMode",
13: "resetSystemFaultMessage",
14: "timeControlledPowerdown",
15: "disableCommunicationOverGateway",
31: "SweepingTechnologies"}),
StrField('routineExitOption', b"")]
bind_layers(UDS, SPRBLI_REQ, service=0x32)
UDS.services[0x32] = 'StopRoutineByLocalIdentifier'
class ENMT_REQ(Packet):
name = 'EnableNormalMessageTransmission_Request'
fields_desc = [
ByteEnumField('responseRequired', 0, {1: "yes", 2: "no"})]
bind_layers(UDS, ENMT_REQ, service=0x29)
UDS.services[0x29] = 'EnableNormalMessageTransmission'
class WDBLI_REQ(Packet):
name = 'WriteDataByLocalIdentifier_Request'
fields_desc = [
ByteEnumField('recordLocalIdentifier', 0, {144: "shortVIN"}),
StrField('recordValue', b"")]
bind_layers(UDS, WDBLI_REQ, service=0x3b)
UDS.services[0x3b] = 'WriteDataByLocalIdentifier'
class RDS2TCM_REQ(Packet):
name = 'ReadDS2TroubleCodeMemory_Request'
fields_desc = [
ByteField('DS2faultNumber', 0)]
bind_layers(UDS, RDS2TCM_REQ, service=0xa0)
UDS.services[0xa0] = 'ReadDS2TroubleCodeMemory'
class RDBLI_REQ(Packet):
name = 'ReadDataByLocalIdentifier_Request'
fields_desc = [
ByteField('recordLocalIdentifier', 0)]
bind_layers(UDS, RDBLI_REQ, service=0x21)
UDS.services[0x21] = 'ReadDataByLocalIdentifier'
class RRRBA_REQ(Packet):
name = 'RequestRoutineResultsByAddress_Request'
fields_desc = [
X3BytesField('routineAddress', 0),
ByteEnumField('memoryTypeIdentifier', 0, BMW_memoryTypeIdentifiers)]
bind_layers(UDS, RRRBA_REQ, service=0x3a)
UDS.services[0x3a] = 'RequestRoutineResultsByAddress'
class RRRBLI_REQ(Packet):
name = 'RequestRoutineResultsByLocalIdentifier_Request'
fields_desc = [
ByteField('routineLocalID', 0)]
bind_layers(UDS, RRRBLI_REQ, service=0x33)
UDS.services[0x33] = 'RequestRoutineResultsByLocalIdentifier'
class SPRBA_REQ(Packet):
name = 'StopRoutineByAddress_Request'
fields_desc = [
X3BytesField('routineAddress', 0),
ByteEnumField('memoryTypeIdentifier', 0, BMW_memoryTypeIdentifiers),
StrField('routineExitOption', 0)]
bind_layers(UDS, SPRBA_REQ, service=0x39)
UDS.services[0x39] = 'StopRoutineByAddress'
class STRBA_REQ(Packet):
name = 'StartRoutineByAddress_Request'
fields_desc = [
X3BytesField('routineAddress', 0),
ByteEnumField('memoryTypeIdentifier', 0, BMW_memoryTypeIdentifiers),
StrField('routineEntryOption', 0)]
bind_layers(UDS, STRBA_REQ, service=0x38)
UDS.services[0x38] = 'StartRoutineByAddress'
class UDS2S_REQ(Packet):
name = 'UnpackDS2Service_Request'
fields_desc = [
ByteField('DS2ECUAddress', 0),
ByteField('DS2requestLength', 0),
ByteField('DS2ControlByte', 0),
StrField('DS2requestParameters', 0)]
bind_layers(UDS, UDS2S_REQ, service=0xa5)
UDS.services[0xa5] = 'UnpackDS2Service'
class SVK_DateField(LEThreeBytesField):
def i2repr(self, pkt, x):
x = self.addfield(pkt, b"", x)
return "%02X.%02X.20%02X" % (x[0], x[1], x[2])
class SVK_Entry(Packet):
fields_desc = [
ByteEnumField("processClass", 0, {1: "HWEL", 2: "HWAP", 4: "GWTB",
5: "CAFD", 6: "BTLD", 7: "FLSL",
8: "SWFL"}),
StrFixedLenField("svk_id", b"", length=4),
ByteField("mainVersion", 0),
ByteField("subVersion", 0),
ByteField("patchVersion", 0)]
def extract_padding(self, p):
return b"", p
class SVK(Packet):
prog_status_enum = {
1: "signature check and programming-dependencies check passed",
2: "software entry invalid or programming-dependencies check failed",
3: "software entry incompatible to hardware entry",
4: "software entry incompatible with other software entry"}
fields_desc = [
ByteEnumField("prog_status1", 0, prog_status_enum),
ByteEnumField("prog_status2", 0, prog_status_enum),
ShortField("entries_count", 0),
SVK_DateField("prog_date", b'\x00\x00\x00'),
ByteField("pad1", 0),
LEIntField("prog_milage", 0),
StrFixedLenField("pad2", 0, length=5),
PacketListField("entries", [], SVK_Entry,
count_from=lambda x: x.entries_count)]
class DIAG_SESSION_RESP(Packet):
fields_desc = [
ByteField('DIAG_SESSION_VALUE', 0),
StrField('DIAG_SESSION_TEXT', '')
]
class IP_CONFIG_RESP(Packet):
fields_desc = [
ByteField('ADDRESS_FORMAT_ID', 0),
IPField('IP', '192.168.0.10'),
IPField('SUBNETMASK', '255.255.255.0'),
IPField('DEFAULT_GATEWAY', '192.168.0.1')
]
bind_layers(UDS_RDBIPR, IP_CONFIG_RESP, dataIdentifier=0x172a)
bind_layers(UDS_RDBIPR, DIAG_SESSION_RESP, dataIdentifier=0xf186)
class DEV_JOB(Packet):
identifiers = {
0x51F1: "ControlReciprocalMonitor",
0xCADD: "EnableDebugCan",
0xDEAD: "LockJtag1",
0xDEAE: "LockJtag2",
0xDEAF: "UnlockJtag",
0xF510: "ControlFuSiIO",
0xFF00: "ReadTransportMessageStatus",
0xFF10: "ControlEthernetActivation",
0xFF51: "ControlPwfMaster",
0xFF66: "ControlWebsite",
0xFF77: "ControlIdleMessage",
0xFFB0: "ReadManufacturerData",
0xFFB1: "ReadBuildNumber",
0xFFD0: "ReadFzmSentryStates",
0xFFD1: "ReadFzmSlaveStates",
0xFFD2: "ReadFzmMasterState",
0xFFD3: "ControlLifecycle",
0xFFD5: "IsCertificateValid",
0xFFFA: "SetDiagRouting",
0xFFFF: "ReadMemory"}
fields_desc = [
ShortEnumField('identifier', 0xffff, identifiers)
]
class DEV_JOB_PR(Packet):
fields_desc = [
ShortEnumField('identifier', 0xffff, DEV_JOB.identifiers)
]
def answers(self, other):
return isinstance(other, DEV_JOB) and \
self.identifier == other.identifier
UDS.services[0xBF] = "DevelopmentJob"
UDS.services[0xFF] = "DevelopmentJobPositiveResponse"
bind_layers(UDS, DEV_JOB, service=0xBF)
bind_layers(UDS, DEV_JOB_PR, service=0xFF)
class READ_MEM(Packet):
fields_desc = [
IntField('read_addr', 0),
IntField('read_length', 0)
]
class READ_MEM_PR(Packet):
fields_desc = [
StrField('data', ''),
]
class WEBSERVER(Packet):
fields_desc = [
ByteField('enable', 1),
ThreeBytesField('password', 0x10203)
]
bind_layers(DEV_JOB, WEBSERVER, identifier=0xff66)
bind_layers(DEV_JOB_PR, WEBSERVER, identifier=0xff66)
bind_layers(DEV_JOB, READ_MEM, identifier=0xffff)
bind_layers(DEV_JOB_PR, READ_MEM_PR, identifier=0xffff)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf101)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf102)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf103)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf104)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf105)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf106)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf107)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf108)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf109)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10a)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10b)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10c)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10d)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10e)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf10f)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf110)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf111)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf112)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf113)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf114)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf115)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf116)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf117)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf118)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf119)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11a)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11b)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11c)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11d)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11e)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf11f)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf120)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf121)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf122)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf123)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf124)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf125)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf126)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf127)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf128)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf129)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12a)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12b)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12c)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12d)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12e)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf12f)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf130)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf131)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf132)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf133)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf134)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf135)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf136)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf137)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf138)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf139)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13a)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13b)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13c)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13d)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13e)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf13f)
bind_layers(UDS_RDBIPR, SVK, dataIdentifier=0xf140)
UDS_RDBI.dataIdentifiers[0x0014] = "RDBCI_IS_LESEN_DETAIL_REQ"
UDS_RDBI.dataIdentifiers[0x0015] = "RDBCI_HS_LESEN_DETAIL_REQ"
UDS_RDBI.dataIdentifiers[0x0e80] = "AirbagLock"
UDS_RDBI.dataIdentifiers[0x1000] = "TestStamp"
UDS_RDBI.dataIdentifiers[0x1001] = "CBSdata"
UDS_RDBI.dataIdentifiers[0x1002] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1003] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1004] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1005] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1006] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1007] = "smallUserInformationField"
UDS_RDBI.dataIdentifiers[0x1008] = "smallUserInformationFieldBMWfast"
UDS_RDBI.dataIdentifiers[0x1009] = "vehicleProductionDate"
UDS_RDBI.dataIdentifiers[0x100A] = "EnergyMode"
UDS_RDBI.dataIdentifiers[0x100B] = "VcmIntegrationStep"
UDS_RDBI.dataIdentifiers[0x100d] = "gatewayTableVersionNumber"
UDS_RDBI.dataIdentifiers[0x100e] = "ExtendedMode"
UDS_RDBI.dataIdentifiers[0x1010] = "fullVehicleIdentificationNumber"
UDS_RDBI.dataIdentifiers[0x1011] = "vehicleType"
UDS_RDBI.dataIdentifiers[0x1012] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1013] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1014] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1015] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1016] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1017] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1018] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1019] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101a] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101b] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101c] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101d] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101e] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x101f] = "chipCardData_1012_101F"
UDS_RDBI.dataIdentifiers[0x1600] = "IdentifyNumberofSubbusMembers"
UDS_RDBI.dataIdentifiers[0x1601] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1602] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1603] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1604] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1605] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1606] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1607] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1608] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1609] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x160f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1610] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1611] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1612] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1613] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1614] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1615] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1616] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1617] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1618] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1619] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x161f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1620] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1621] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1622] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1623] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1624] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1625] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1626] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1627] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1628] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1629] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x162f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1630] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1631] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1632] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1633] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1634] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1635] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1636] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1637] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1638] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1639] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x163f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1640] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1641] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1642] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1643] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1644] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1645] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1646] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1647] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1648] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1649] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x164f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1650] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1651] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1652] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1653] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1654] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1655] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1656] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1657] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1658] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1659] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x165f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1660] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1661] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1662] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1663] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1664] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1665] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1666] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1667] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1668] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1669] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x166f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1670] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1671] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1672] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1673] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1674] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1675] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1676] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1677] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1678] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1679] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x167f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1680] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1681] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1682] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1683] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1684] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1685] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1686] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1687] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1688] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1689] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x168f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1690] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1691] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1692] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1693] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1694] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1695] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1696] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1697] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1698] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1699] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169a] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169b] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169c] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169d] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169e] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x169f] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16a9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16aa] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ab] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ac] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ad] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ae] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16af] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16b9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ba] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16bb] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16bc] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16bd] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16be] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16bf] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16c9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ca] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16cb] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16cc] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16cd] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ce] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16cf] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16d9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16da] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16db] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16dc] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16dd] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16de] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16df] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16e9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ea] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16eb] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ec] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ed] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ee] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ef] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f0] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f1] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f2] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f3] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f4] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f5] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f6] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f7] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f8] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16f9] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16fa] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16fb] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16fc] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16fd] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16fe] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x16ff] = "SubbusMemberSerialNumber"
UDS_RDBI.dataIdentifiers[0x1701] = "SysTime"
UDS_RDBI.dataIdentifiers[0x170C] = "BoardPowerSupply"
UDS_RDBI.dataIdentifiers[0x171F] = "Certificate"
UDS_RDBI.dataIdentifiers[0x1720] = "SCVersion"
UDS_RDBI.dataIdentifiers[0x1723] = "ActiveResponseDTCs"
UDS_RDBI.dataIdentifiers[0x1724] = "LockableDTCs"
UDS_RDBI.dataIdentifiers[0x172A] = "IPConfiguration"
UDS_RDBI.dataIdentifiers[0x172B] = "MACAddress"
UDS_RDBI.dataIdentifiers[0x1735] = "LifecycleMode"
UDS_RDBI.dataIdentifiers[0x2000] = "dtcShadowMemory"
UDS_RDBI.dataIdentifiers[0x2001] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2002] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2003] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2004] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2005] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2006] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2007] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2008] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2009] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x200f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2010] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2011] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2012] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2013] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2014] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2015] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2016] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2017] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2018] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2019] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x201f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2020] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2021] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2022] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2023] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2024] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2025] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2026] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2027] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2028] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2029] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x202f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2030] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2031] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2032] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2033] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2034] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2035] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2036] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2037] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2038] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2039] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x203f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2040] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2041] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2042] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2043] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2044] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2045] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2046] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2047] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2048] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2049] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x204f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2050] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2051] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2052] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2053] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2054] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2055] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2056] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2057] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2058] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2059] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x205f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2060] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2061] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2062] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2063] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2064] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2065] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2066] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2067] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2068] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2069] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x206f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2070] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2071] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2072] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2073] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2074] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2075] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2076] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2077] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2078] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2079] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x207f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2080] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2081] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2082] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2083] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2084] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2085] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2086] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2087] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2088] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2089] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x208f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2090] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2091] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2092] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2093] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2094] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2095] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2096] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2097] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2098] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2099] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209a] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209b] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209c] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209d] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209e] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x209f] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20a9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20aa] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ab] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ac] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ad] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ae] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20af] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20b9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ba] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20bb] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20bc] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20bd] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20be] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20bf] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20c9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ca] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20cb] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20cc] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20cd] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ce] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20cf] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20d9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20da] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20db] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20dc] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20dd] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20de] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20df] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20e9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ea] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20eb] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ec] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ed] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ee] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ef] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f0] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f1] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f2] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f3] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f4] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f5] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f6] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f7] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f8] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20f9] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20fa] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20fb] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20fc] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20fd] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20fe] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x20ff] = "dtcShadowMemoryEntry"
UDS_RDBI.dataIdentifiers[0x2100] = "dtcHistoryMemory"
UDS_RDBI.dataIdentifiers[0x2101] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2102] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2103] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2104] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2105] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2106] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2107] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2108] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2109] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x210f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2110] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2111] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2112] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2113] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2114] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2115] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2116] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2117] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2118] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2119] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x211f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2120] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2121] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2122] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2123] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2124] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2125] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2126] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2127] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2128] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2129] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x212f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2130] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2131] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2132] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2133] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2134] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2135] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2136] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2137] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2138] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2139] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x213f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2140] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2141] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2142] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2143] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2144] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2145] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2146] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2147] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2148] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2149] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x214f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2150] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2151] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2152] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2153] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2154] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2155] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2156] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2157] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2158] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2159] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x215f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2160] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2161] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2162] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2163] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2164] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2165] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2166] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2167] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2168] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2169] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x216f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2170] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2171] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2172] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2173] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2174] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2175] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2176] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2177] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2178] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2179] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x217f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2180] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2181] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2182] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2183] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2184] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2185] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2186] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2187] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2188] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2189] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x218f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2190] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2191] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2192] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2193] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2194] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2195] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2196] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2197] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2198] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2199] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219a] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219b] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219c] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219d] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219e] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x219f] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21a9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21aa] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ab] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ac] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ad] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ae] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21af] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21b9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ba] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21bb] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21bc] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21bd] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21be] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21bf] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21c9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ca] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21cb] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21cc] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21cd] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ce] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21cf] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21d9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21da] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21db] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21dc] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21dd] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21de] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21df] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21e9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ea] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21eb] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ec] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ed] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ee] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ef] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f0] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f1] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f2] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f3] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f4] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f5] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f6] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f7] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f8] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21f9] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21fa] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21fb] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21fc] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21fd] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21fe] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x21ff] = "dtcHistoryMemoryEntry 2101-21FF"
UDS_RDBI.dataIdentifiers[0x2200] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2201] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2202] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2203] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2204] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2205] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2206] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2207] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2208] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2209] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x220f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2210] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2211] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2212] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2213] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2214] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2215] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2216] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2217] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2218] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2219] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x221f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2220] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2221] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2222] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2223] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2224] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2225] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2226] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2227] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2228] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2229] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x222f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2230] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2231] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2232] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2233] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2234] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2235] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2236] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2237] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2238] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2239] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x223f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2240] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2241] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2242] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2243] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2244] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2245] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2246] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2247] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2248] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2249] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x224f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2250] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2251] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2252] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2253] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2254] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2255] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2256] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2257] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2258] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2259] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x225f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2260] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2261] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2262] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2263] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2264] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2265] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2266] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2267] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2268] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2269] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x226f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2270] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2271] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2272] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2273] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2274] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2275] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2276] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2277] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2278] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2279] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x227f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2280] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2281] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2282] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2283] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2284] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2285] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2286] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2287] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2288] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2289] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x228f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2290] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2291] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2292] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2293] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2294] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2295] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2296] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2297] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2298] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2299] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229a] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229b] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229c] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229d] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229e] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x229f] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22a9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22aa] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ab] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ac] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ad] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ae] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22af] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22b9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ba] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22bb] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22bc] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22bd] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22be] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22bf] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22c9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ca] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22cb] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22cc] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22cd] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ce] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22cf] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22d9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22da] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22db] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22dc] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22dd] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22de] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22df] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22e9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ea] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22eb] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ec] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ed] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ee] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ef] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f0] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f1] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f2] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f3] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f4] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f5] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f6] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f7] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f8] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22f9] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22fa] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22fb] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22fc] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22fd] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22fe] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x22ff] = "afterSalesServiceData_2200_22FF"
UDS_RDBI.dataIdentifiers[0x2300] = "operatingData" # or RDBCI_BETRIEBSDATEN_LESEN_REQ # noqa E501
UDS_RDBI.dataIdentifiers[0x2301] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2302] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2303] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2304] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2305] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2306] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2307] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2308] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2309] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x230f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2310] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2311] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2312] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2313] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2314] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2315] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2316] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2317] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2318] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2319] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x231f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2320] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2321] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2322] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2323] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2324] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2325] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2326] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2327] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2328] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2329] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x232f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2330] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2331] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2332] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2333] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2334] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2335] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2336] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2337] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2338] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2339] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x233f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2340] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2341] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2342] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2343] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2344] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2345] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2346] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2347] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2348] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2349] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x234f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2350] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2351] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2352] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2353] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2354] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2355] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2356] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2357] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2358] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2359] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x235f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2360] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2361] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2362] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2363] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2364] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2365] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2366] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2367] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2368] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2369] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x236f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2370] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2371] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2372] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2373] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2374] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2375] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2376] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2377] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2378] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2379] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x237f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2380] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2381] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2382] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2383] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2384] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2385] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2386] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2387] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2388] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2389] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x238f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2390] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2391] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2392] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2393] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2394] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2395] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2396] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2397] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2398] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2399] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239a] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239b] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239c] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239d] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239e] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x239f] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23a9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23aa] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ab] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ac] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ad] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ae] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23af] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23b9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ba] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23bb] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23bc] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23bd] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23be] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23bf] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23c9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ca] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23cb] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23cc] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23cd] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ce] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23cf] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23d9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23da] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23db] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23dc] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23dd] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23de] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23df] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23e9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ea] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23eb] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ec] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ed] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ee] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ef] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f0] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f1] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f2] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f3] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f4] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f5] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f6] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f7] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f8] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23f9] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23fa] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23fb] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23fc] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23fd] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23fe] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x23ff] = "additionalOperatingData 2301-23FF"
UDS_RDBI.dataIdentifiers[0x2400] = "personalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2401] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2402] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2403] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2404] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2405] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2406] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2407] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2408] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2409] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240a] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240b] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240c] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240d] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240e] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x240f] = "additionalpersonalizationDataDriver0"
UDS_RDBI.dataIdentifiers[0x2410] = "personalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2411] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2412] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2413] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2414] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2415] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2416] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2417] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2418] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2419] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241a] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241b] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241c] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241d] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241e] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x241f] = "additionalPersonalizationDataDriver1"
UDS_RDBI.dataIdentifiers[0x2420] = "personalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2421] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2422] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2423] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2424] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2425] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2426] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2427] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2428] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2429] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242a] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242b] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242c] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242d] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242e] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x242f] = "additionalpersonalizationDataDriver2"
UDS_RDBI.dataIdentifiers[0x2430] = "personalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2431] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2432] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2433] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2434] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2435] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2436] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2437] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2438] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2439] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243a] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243b] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243c] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243d] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243e] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x243f] = "additionalPersonalizationDataDriver3"
UDS_RDBI.dataIdentifiers[0x2500] = "programmReferenzBackup/vehicleManufacturerECUHW_NrBackup" # noqa E501
UDS_RDBI.dataIdentifiers[0x2501] = "MemorySegmentationTable"
UDS_RDBI.dataIdentifiers[0x2502] = "ProgrammingCounter"
UDS_RDBI.dataIdentifiers[0x2503] = "ProgrammingCounterMax"
UDS_RDBI.dataIdentifiers[0x2504] = "FlashTimings"
UDS_RDBI.dataIdentifiers[0x2505] = "MaxBlocklength"
UDS_RDBI.dataIdentifiers[0x2506] = "ReadMemoryAddress" # or maximaleBlockLaenge # noqa E501
UDS_RDBI.dataIdentifiers[0x2507] = "EcuSupportsDeleteSwe"
UDS_RDBI.dataIdentifiers[0x2508] = "GWRoutingStatus"
UDS_RDBI.dataIdentifiers[0x2509] = "RoutingTable"
UDS_RDBI.dataIdentifiers[0x2530] = "SubnetStatus"
UDS_RDBI.dataIdentifiers[0x2541] = "STATUS_CALCVN"
UDS_RDBI.dataIdentifiers[0x3000] = "RDBI_CD_REQ" # or WDBI_CD_REQ
UDS_RDBI.dataIdentifiers[0x300a] = "Codier-VIN"
UDS_RDBI.dataIdentifiers[0x37fe] = "Codierpruefstempel"
UDS_RDBI.dataIdentifiers[0x3f00] = "SVT-Ist"
UDS_RDBI.dataIdentifiers[0x3f01] = "SVT-Soll"
UDS_RDBI.dataIdentifiers[0x3F02] = "VcmEcuListSecurity"
UDS_RDBI.dataIdentifiers[0x3F03] = "VcmEcuListSwt"
UDS_RDBI.dataIdentifiers[0x3F04] = "VcmNotificationTimeStamp"
UDS_RDBI.dataIdentifiers[0x3F05] = "VcmSerialNumberReferenceList"
UDS_RDBI.dataIdentifiers[0x3F06] = "VcmVehicleOrder"
UDS_RDBI.dataIdentifiers[0x3F07] = "VcmEcuListAll"
UDS_RDBI.dataIdentifiers[0x3F08] = "VcmEcuListActiveResponse"
UDS_RDBI.dataIdentifiers[0x3F09] = "VcmVehicleProfile"
UDS_RDBI.dataIdentifiers[0x3F0A] = "VcmEcuListDiffProg"
UDS_RDBI.dataIdentifiers[0x3F0B] = "VcmEcuListNgsc"
UDS_RDBI.dataIdentifiers[0x3F0C] = "VcmEcuListCodingRelevant"
UDS_RDBI.dataIdentifiers[0x3F0D] = "VcmEcuListFlashable"
UDS_RDBI.dataIdentifiers[0x3F0E] = "VcmEcuListKCan"
UDS_RDBI.dataIdentifiers[0x3F0F] = "VcmEcuListBodyCan"
UDS_RDBI.dataIdentifiers[0x3F10] = "VcmEcuListSFCan"
UDS_RDBI.dataIdentifiers[0x3F11] = "VcmEcuListMost"
UDS_RDBI.dataIdentifiers[0x3F12] = "VcmEcuListFaCan"
UDS_RDBI.dataIdentifiers[0x3F13] = "VcmEcuListFlexray"
UDS_RDBI.dataIdentifiers[0x3F14] = "VcmEcuListACan"
UDS_RDBI.dataIdentifiers[0x3F15] = "VcmEcuListIso14229"
UDS_RDBI.dataIdentifiers[0x3F16] = "VcmEcuListSCan"
UDS_RDBI.dataIdentifiers[0x3F17] = "VcmEcuListEthernet"
UDS_RDBI.dataIdentifiers[0x3F18] = "VcmEcuListDCan"
UDS_RDBI.dataIdentifiers[0x3F19] = "VcmVcmIdentification"
UDS_RDBI.dataIdentifiers[0x3F1A] = "VcmSvtVersion"
UDS_RDBI.dataIdentifiers[0x3f1b] = "vehicleOrder_3F00_3FFE"
UDS_RDBI.dataIdentifiers[0x3f1c] = "FA_Teil1"
UDS_RDBI.dataIdentifiers[0x3f1d] = "FA_Teil2"
UDS_RDBI.dataIdentifiers[0x3fff] = "changeIndexOfCodingData"
UDS_RDBI.dataIdentifiers[0x4000] = "GWTableVersion"
UDS_RDBI.dataIdentifiers[0x4001] = "WakeupSource"
UDS_RDBI.dataIdentifiers[0x4020] = "StatusLearnFlexray"
UDS_RDBI.dataIdentifiers[0x4021] = "StatusFlexrayPath"
UDS_RDBI.dataIdentifiers[0x4030] = "EthernetRegisters"
UDS_RDBI.dataIdentifiers[0x4031] = "EthernetStatusInformation"
UDS_RDBI.dataIdentifiers[0x403c] = "STATUS_CALCVN_EA"
UDS_RDBI.dataIdentifiers[0x4040] = "DemLockingMasterState"
UDS_RDBI.dataIdentifiers[0x4050] = "AmbiguousRoutings"
UDS_RDBI.dataIdentifiers[0x4080] = "AirbagLock_NEU"
UDS_RDBI.dataIdentifiers[0x4140] = "BodyComConfig"
UDS_RDBI.dataIdentifiers[0x4ab4] = "Betriebsstundenzaehler"
UDS_RDBI.dataIdentifiers[0x5fc2] = "WDBI_DME_ABGLEICH_PROG_REQ"
UDS_RDBI.dataIdentifiers[0xd114] = "Gesamtweg-Streckenzähler Offset"
UDS_RDBI.dataIdentifiers[0xd387] = "STATUS_DIEBSTAHLSCHUTZ"
UDS_RDBI.dataIdentifiers[0xdb9c] = "InitStatusEngineAngle"
UDS_RDBI.dataIdentifiers[0xEFE9] = "WakeupRegistry"
UDS_RDBI.dataIdentifiers[0xEFE8] = "ClearWakeupRegistry"
UDS_RDBI.dataIdentifiers[0xf000] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf001] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf002] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf003] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf004] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf005] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf006] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf007] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf008] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf009] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00a] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00b] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00c] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00d] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00e] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf00f] = "networkConfigurationDataForTractorTrailerApplication" # noqa E501
UDS_RDBI.dataIdentifiers[0xf010] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf011] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf012] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf013] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf014] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf015] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf016] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf017] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf018] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf019] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf01f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf020] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf021] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf022] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf023] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf024] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf025] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf026] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf027] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf028] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf029] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf02f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf030] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf031] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf032] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf033] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf034] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf035] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf036] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf037] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf038] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf039] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf03f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf040] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf041] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf042] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf043] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf044] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf045] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf046] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf047] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf048] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf049] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf04f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf050] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf051] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf052] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf053] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf054] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf055] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf056] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf057] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf058] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf059] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf05f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf060] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf061] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf062] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf063] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf064] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf065] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf066] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf067] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf068] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf069] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf06f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf070] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf071] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf072] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf073] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf074] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf075] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf076] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf077] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf078] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf079] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf07f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf080] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf081] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf082] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf083] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf084] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf085] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf086] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf087] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf088] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf089] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf08f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf090] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf091] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf092] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf093] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf094] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf095] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf096] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf097] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf098] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf099] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09a] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09b] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09c] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09d] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09e] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf09f] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0a9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0aa] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ab] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ac] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ad] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ae] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0af] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0b9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ba] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0bb] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0bc] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0bd] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0be] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0bf] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0c9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ca] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0cb] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0cc] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0cd] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ce] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0cf] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0d9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0da] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0db] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0dc] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0dd] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0de] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0df] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0e9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ea] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0eb] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ec] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ed] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ee] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ef] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f0] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f1] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f2] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f3] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f4] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f5] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f6] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f7] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f8] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0f9] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0fa] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0fb] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0fc] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0fd] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0fe] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf0ff] = "networkConfigurationData"
UDS_RDBI.dataIdentifiers[0xf100] = "activeSessionState"
UDS_RDBI.dataIdentifiers[0xF101] = "SVKCurrent"
UDS_RDBI.dataIdentifiers[0xF102] = "SVKSystemSupplier"
UDS_RDBI.dataIdentifiers[0xF103] = "SVKFactory"
UDS_RDBI.dataIdentifiers[0xf104] = "SVK_Backup_01"
UDS_RDBI.dataIdentifiers[0xf105] = "SVK_Backup_02"
UDS_RDBI.dataIdentifiers[0xf106] = "SVK_Backup_03"
UDS_RDBI.dataIdentifiers[0xf107] = "SVK_Backup_04"
UDS_RDBI.dataIdentifiers[0xf108] = "SVK_Backup_05"
UDS_RDBI.dataIdentifiers[0xf109] = "SVK_Backup_06"
UDS_RDBI.dataIdentifiers[0xf10a] = "SVK_Backup_07"
UDS_RDBI.dataIdentifiers[0xf10b] = "SVK_Backup_08"
UDS_RDBI.dataIdentifiers[0xf10c] = "SVK_Backup_09"
UDS_RDBI.dataIdentifiers[0xf10d] = "SVK_Backup_10"
UDS_RDBI.dataIdentifiers[0xf10e] = "SVK_Backup_11"
UDS_RDBI.dataIdentifiers[0xf10f] = "SVK_Backup_12"
UDS_RDBI.dataIdentifiers[0xf110] = "SVK_Backup_13"
UDS_RDBI.dataIdentifiers[0xf111] = "SVK_Backup_14"
UDS_RDBI.dataIdentifiers[0xf112] = "SVK_Backup_15"
UDS_RDBI.dataIdentifiers[0xf113] = "SVK_Backup_16"
UDS_RDBI.dataIdentifiers[0xf114] = "SVK_Backup_17"
UDS_RDBI.dataIdentifiers[0xf115] = "SVK_Backup_18"
UDS_RDBI.dataIdentifiers[0xf116] = "SVK_Backup_19"
UDS_RDBI.dataIdentifiers[0xf117] = "SVK_Backup_20"
UDS_RDBI.dataIdentifiers[0xf118] = "SVK_Backup_21"
UDS_RDBI.dataIdentifiers[0xf119] = "SVK_Backup_22"
UDS_RDBI.dataIdentifiers[0xf11a] = "SVK_Backup_23"
UDS_RDBI.dataIdentifiers[0xf11b] = "SVK_Backup_24"
UDS_RDBI.dataIdentifiers[0xf11c] = "SVK_Backup_25"
UDS_RDBI.dataIdentifiers[0xf11d] = "SVK_Backup_26"
UDS_RDBI.dataIdentifiers[0xf11e] = "SVK_Backup_27"
UDS_RDBI.dataIdentifiers[0xf11f] = "SVK_Backup_28"
UDS_RDBI.dataIdentifiers[0xf120] = "SVK_Backup_29"
UDS_RDBI.dataIdentifiers[0xf121] = "SVK_Backup_30"
UDS_RDBI.dataIdentifiers[0xf122] = "SVK_Backup_31"
UDS_RDBI.dataIdentifiers[0xf123] = "SVK_Backup_32"
UDS_RDBI.dataIdentifiers[0xf124] = "SVK_Backup_33"
UDS_RDBI.dataIdentifiers[0xf125] = "SVK_Backup_34"
UDS_RDBI.dataIdentifiers[0xf126] = "SVK_Backup_35"
UDS_RDBI.dataIdentifiers[0xf127] = "SVK_Backup_36"
UDS_RDBI.dataIdentifiers[0xf128] = "SVK_Backup_37"
UDS_RDBI.dataIdentifiers[0xf129] = "SVK_Backup_38"
UDS_RDBI.dataIdentifiers[0xf12a] = "SVK_Backup_39"
UDS_RDBI.dataIdentifiers[0xf12b] = "SVK_Backup_40"
UDS_RDBI.dataIdentifiers[0xf12c] = "SVK_Backup_41"
UDS_RDBI.dataIdentifiers[0xf12d] = "SVK_Backup_42"
UDS_RDBI.dataIdentifiers[0xf12e] = "SVK_Backup_43"
UDS_RDBI.dataIdentifiers[0xf12f] = "SVK_Backup_44"
UDS_RDBI.dataIdentifiers[0xf130] = "SVK_Backup_45"
UDS_RDBI.dataIdentifiers[0xf131] = "SVK_Backup_46"
UDS_RDBI.dataIdentifiers[0xf132] = "SVK_Backup_47"
UDS_RDBI.dataIdentifiers[0xf133] = "SVK_Backup_48"
UDS_RDBI.dataIdentifiers[0xf134] = "SVK_Backup_49"
UDS_RDBI.dataIdentifiers[0xf135] = "SVK_Backup_50"
UDS_RDBI.dataIdentifiers[0xf136] = "SVK_Backup_51"
UDS_RDBI.dataIdentifiers[0xf137] = "SVK_Backup_52"
UDS_RDBI.dataIdentifiers[0xf138] = "SVK_Backup_53"
UDS_RDBI.dataIdentifiers[0xf139] = "SVK_Backup_54"
UDS_RDBI.dataIdentifiers[0xf13a] = "SVK_Backup_55"
UDS_RDBI.dataIdentifiers[0xf13b] = "SVK_Backup_56"
UDS_RDBI.dataIdentifiers[0xf13c] = "SVK_Backup_57"
UDS_RDBI.dataIdentifiers[0xf13d] = "SVK_Backup_58"
UDS_RDBI.dataIdentifiers[0xf13e] = "SVK_Backup_59"
UDS_RDBI.dataIdentifiers[0xf13f] = "SVK_Backup_60"
UDS_RDBI.dataIdentifiers[0xf140] = "SVK_Backup_61"
UDS_RDBI.dataIdentifiers[0xf150] = "SGBDIndex"
UDS_RDBI.dataIdentifiers[0xf15a] = "fingerprint"
UDS_RDBI.dataIdentifiers[0xf180] = "bootSoftwareIdentification"
UDS_RDBI.dataIdentifiers[0xf181] = "applicationSoftwareIdentification"
UDS_RDBI.dataIdentifiers[0xf182] = "applicationDataIdentification"
UDS_RDBI.dataIdentifiers[0xf183] = "bootSoftwareFingerprint"
UDS_RDBI.dataIdentifiers[0xf184] = "applicationSoftwareFingerprint"
UDS_RDBI.dataIdentifiers[0xf185] = "applicationDataFingerprint"
UDS_RDBI.dataIdentifiers[0xf186] = "activeDiagnosticSession"
UDS_RDBI.dataIdentifiers[0xf187] = "vehicleManufacturerSparePartNumber"
UDS_RDBI.dataIdentifiers[0xf188] = "vehicleManufacturerECUSoftwareNumber"
UDS_RDBI.dataIdentifiers[0xf189] = "vehicleManufacturerECUSoftwareVersionNumber" # noqa E501
UDS_RDBI.dataIdentifiers[0xf18a] = "systemSupplierIdentifier"
UDS_RDBI.dataIdentifiers[0xf18b] = "ECUManufacturingDate"
UDS_RDBI.dataIdentifiers[0xf18c] = "ECUSerialNumber"
UDS_RDBI.dataIdentifiers[0xf18d] = "supportedFunctionalUnits"
UDS_RDBI.dataIdentifiers[0xf190] = "VIN"
UDS_RDBI.dataIdentifiers[0xf191] = "vehicleManufacturerECUHardwareNumber"
UDS_RDBI.dataIdentifiers[0xf192] = "systemSupplierECUHardwareNumber"
UDS_RDBI.dataIdentifiers[0xf193] = "systemSupplierECUHardwareVersionNumber"
UDS_RDBI.dataIdentifiers[0xf194] = "systemSupplierECUSoftwareNumber"
UDS_RDBI.dataIdentifiers[0xf195] = "systemSupplierECUSoftwareVersionNumber"
UDS_RDBI.dataIdentifiers[0xf196] = "exhaustRegulationOrTypeApprovalNumber"
UDS_RDBI.dataIdentifiers[0xf197] = "systemNameOrEngineType"
UDS_RDBI.dataIdentifiers[0xf198] = "repairShopCodeOrTesterSerialNumber"
UDS_RDBI.dataIdentifiers[0xf199] = "programmingDate"
UDS_RDBI.dataIdentifiers[0xf19a] = "calibrationRepairShopCodeOrCalibrationEquipmentSerialNumber" # noqa E501
UDS_RDBI.dataIdentifiers[0xf19b] = "calibrationDate"
UDS_RDBI.dataIdentifiers[0xf19c] = "calibrationEquipmentSoftwareNumber"
UDS_RDBI.dataIdentifiers[0xf19d] = "ECUInstallationDate"
UDS_RDBI.dataIdentifiers[0xf19e] = "ODXFileIdentifier"
UDS_RDBI.dataIdentifiers[0xf19f] = "entityIdentifier"
UDS_RDBI.dataIdentifiers[0xf200] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf201] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf202] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf203] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf204] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf205] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf206] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf207] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf208] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf209] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf20f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf210] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf211] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf212] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf213] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf214] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf215] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf216] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf217] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf218] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf219] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf21f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf220] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf221] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf222] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf223] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf224] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf225] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf226] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf227] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf228] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf229] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf22f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf230] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf231] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf232] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf233] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf234] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf235] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf236] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf237] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf238] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf239] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf23f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf240] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf241] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf242] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf243] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf244] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf245] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf246] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf247] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf248] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf249] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf24f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf250] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf251] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf252] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf253] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf254] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf255] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf256] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf257] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf258] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf259] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf25f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf260] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf261] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf262] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf263] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf264] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf265] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf266] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf267] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf268] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf269] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf26f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf270] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf271] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf272] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf273] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf274] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf275] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf276] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf277] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf278] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf279] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf27f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf280] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf281] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf282] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf283] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf284] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf285] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf286] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf287] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf288] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf289] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf28f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf290] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf291] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf292] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf293] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf294] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf295] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf296] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf297] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf298] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf299] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29a] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29b] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29c] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29d] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29e] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf29f] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2a9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2aa] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ab] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ac] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ad] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ae] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2af] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2b9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ba] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2bb] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2bc] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2bd] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2be] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2bf] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2c9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ca] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2cb] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2cc] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2cd] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ce] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2cf] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2d9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2da] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2db] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2dc] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2dd] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2de] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2df] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2e9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ea] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2eb] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ec] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ed] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ee] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ef] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f0] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f1] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f2] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f3] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f4] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f5] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f6] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f7] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f8] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2f9] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2fa] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2fb] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2fc] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2fd] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2fe] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf2ff] = "periodicDataIdentifier_F200_F2FF"
UDS_RDBI.dataIdentifiers[0xf300] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf301] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf302] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf303] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf304] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf305] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf306] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf307] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf308] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf309] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf30f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf310] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf311] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf312] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf313] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf314] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf315] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf316] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf317] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf318] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf319] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf31f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf320] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf321] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf322] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf323] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf324] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf325] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf326] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf327] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf328] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf329] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf32f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf330] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf331] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf332] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf333] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf334] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf335] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf336] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf337] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf338] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf339] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf33f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf340] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf341] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf342] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf343] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf344] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf345] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf346] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf347] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf348] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf349] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf34f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf350] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf351] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf352] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf353] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf354] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf355] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf356] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf357] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf358] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf359] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf35f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf360] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf361] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf362] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf363] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf364] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf365] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf366] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf367] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf368] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf369] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf36f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf370] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf371] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf372] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf373] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf374] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf375] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf376] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf377] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf378] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf379] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf37f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf380] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf381] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf382] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf383] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf384] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf385] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf386] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf387] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf388] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf389] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf38f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf390] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf391] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf392] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf393] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf394] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf395] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf396] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf397] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf398] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf399] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39a] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39b] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39c] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39d] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39e] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf39f] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3a9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3aa] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ab] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ac] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ad] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ae] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3af] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3b9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ba] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3bb] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3bc] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3bd] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3be] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3bf] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3c9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ca] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3cb] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3cc] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3cd] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ce] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3cf] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3d9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3da] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3db] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3dc] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3dd] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3de] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3df] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3e9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ea] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3eb] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ec] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ed] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ee] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ef] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f0] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f1] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f2] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f3] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f4] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f5] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f6] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f7] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f8] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3f9] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3fa] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3fb] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3fc] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3fd] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3fe] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf3ff] = "dynamicallyDefinedDataIdentifier_F300_F3FF"
UDS_RDBI.dataIdentifiers[0xf400] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf401] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf402] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf403] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf404] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf405] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf406] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf407] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf408] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf409] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf40f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf410] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf411] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf412] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf413] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf414] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf415] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf416] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf417] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf418] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf419] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf41f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf420] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf421] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf422] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf423] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf424] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf425] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf426] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf427] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf428] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf429] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf42f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf430] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf431] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf432] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf433] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf434] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf435] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf436] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf437] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf438] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf439] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf43f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf440] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf441] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf442] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf443] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf444] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf445] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf446] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf447] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf448] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf449] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf44f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf450] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf451] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf452] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf453] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf454] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf455] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf456] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf457] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf458] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf459] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf45f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf460] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf461] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf462] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf463] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf464] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf465] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf466] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf467] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf468] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf469] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf46f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf470] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf471] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf472] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf473] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf474] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf475] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf476] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf477] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf478] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf479] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf47f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf480] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf481] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf482] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf483] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf484] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf485] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf486] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf487] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf488] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf489] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf48f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf490] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf491] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf492] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf493] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf494] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf495] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf496] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf497] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf498] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf499] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49a] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49b] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49c] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49d] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49e] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf49f] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4a9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4aa] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ab] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ac] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ad] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ae] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4af] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4b9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ba] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4bb] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4bc] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4bd] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4be] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4bf] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4c9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ca] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4cb] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4cc] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4cd] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ce] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4cf] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4d9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4da] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4db] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4dc] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4dd] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4de] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4df] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4e9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ea] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4eb] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ec] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ed] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ee] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ef] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f0] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f1] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f2] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f3] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f4] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f5] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f6] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f7] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f8] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4f9] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4fa] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4fb] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4fc] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4fd] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4fe] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf4ff] = "OBDPids_F400 - F4FF"
UDS_RDBI.dataIdentifiers[0xf500] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf501] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf502] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf503] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf504] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf505] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf506] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf507] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf508] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf509] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf50f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf510] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf511] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf512] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf513] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf514] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf515] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf516] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf517] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf518] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf519] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf51f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf520] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf521] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf522] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf523] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf524] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf525] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf526] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf527] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf528] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf529] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf52f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf530] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf531] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf532] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf533] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf534] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf535] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf536] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf537] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf538] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf539] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf53f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf540] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf541] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf542] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf543] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf544] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf545] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf546] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf547] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf548] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf549] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf54f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf550] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf551] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf552] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf553] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf554] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf555] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf556] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf557] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf558] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf559] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf55f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf560] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf561] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf562] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf563] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf564] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf565] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf566] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf567] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf568] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf569] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf56f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf570] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf571] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf572] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf573] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf574] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf575] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf576] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf577] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf578] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf579] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf57f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf580] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf581] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf582] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf583] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf584] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf585] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf586] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf587] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf588] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf589] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf58f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf590] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf591] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf592] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf593] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf594] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf595] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf596] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf597] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf598] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf599] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59a] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59b] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59c] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59d] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59e] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf59f] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5a9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5aa] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ab] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ac] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ad] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ae] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5af] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5b9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ba] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5bb] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5bc] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5bd] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5be] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5bf] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5c9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ca] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5cb] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5cc] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5cd] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ce] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5cf] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5d9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5da] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5db] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5dc] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5dd] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5de] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5df] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5e9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ea] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5eb] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ec] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ed] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ee] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ef] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f0] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f1] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f2] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f3] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f4] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f5] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f6] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f7] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f8] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5f9] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5fa] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5fb] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5fc] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5fd] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5fe] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf5ff] = "OBDPids_F500 - F5FF"
UDS_RDBI.dataIdentifiers[0xf600] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf601] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf602] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf603] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf604] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf605] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf606] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf607] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf608] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf609] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf60f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf610] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf611] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf612] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf613] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf614] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf615] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf616] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf617] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf618] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf619] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf61f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf620] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf621] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf622] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf623] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf624] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf625] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf626] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf627] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf628] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf629] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf62f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf630] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf631] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf632] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf633] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf634] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf635] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf636] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf637] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf638] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf639] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf63f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf640] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf641] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf642] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf643] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf644] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf645] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf646] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf647] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf648] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf649] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf64f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf650] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf651] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf652] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf653] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf654] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf655] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf656] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf657] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf658] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf659] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf65f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf660] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf661] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf662] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf663] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf664] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf665] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf666] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf667] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf668] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf669] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf66f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf670] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf671] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf672] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf673] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf674] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf675] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf676] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf677] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf678] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf679] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf67f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf680] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf681] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf682] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf683] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf684] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf685] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf686] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf687] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf688] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf689] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf68f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf690] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf691] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf692] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf693] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf694] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf695] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf696] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf697] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf698] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf699] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69a] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69b] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69c] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69d] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69e] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf69f] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6a9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6aa] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ab] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ac] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ad] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ae] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6af] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6b9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ba] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6bb] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6bc] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6bd] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6be] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6bf] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6c9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ca] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6cb] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6cc] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6cd] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ce] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6cf] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6d9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6da] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6db] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6dc] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6dd] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6de] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6df] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6e9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ea] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6eb] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ec] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ed] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ee] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ef] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f0] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f1] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f2] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f3] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f4] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f5] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f6] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f7] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f8] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6f9] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6fa] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6fb] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6fc] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6fd] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6fe] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf6ff] = "OBDMonitorIds_F600 - F6FF"
UDS_RDBI.dataIdentifiers[0xf700] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf701] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf702] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf703] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf704] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf705] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf706] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf707] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf708] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf709] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf70f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf710] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf711] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf712] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf713] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf714] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf715] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf716] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf717] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf718] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf719] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf71f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf720] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf721] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf722] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf723] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf724] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf725] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf726] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf727] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf728] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf729] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf72f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf730] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf731] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf732] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf733] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf734] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf735] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf736] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf737] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf738] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf739] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf73f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf740] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf741] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf742] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf743] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf744] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf745] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf746] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf747] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf748] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf749] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf74f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf750] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf751] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf752] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf753] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf754] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf755] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf756] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf757] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf758] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf759] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf75f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf760] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf761] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf762] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf763] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf764] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf765] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf766] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf767] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf768] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf769] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf76f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf770] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf771] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf772] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf773] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf774] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf775] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf776] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf777] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf778] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf779] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf77f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf780] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf781] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf782] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf783] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf784] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf785] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf786] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf787] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf788] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf789] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf78f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf790] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf791] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf792] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf793] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf794] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf795] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf796] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf797] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf798] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf799] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79a] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79b] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79c] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79d] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79e] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf79f] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7a9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7aa] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ab] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ac] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ad] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ae] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7af] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7b9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ba] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7bb] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7bc] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7bd] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7be] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7bf] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7c9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ca] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7cb] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7cc] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7cd] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ce] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7cf] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7d9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7da] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7db] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7dc] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7dd] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7de] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7df] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7e9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ea] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7eb] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ec] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ed] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ee] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ef] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f0] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f1] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f2] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f3] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f4] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f5] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f6] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f7] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f8] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7f9] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7fa] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7fb] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7fc] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7fd] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7fe] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf7ff] = "OBDMonitorIds_F700 - F7FF"
UDS_RDBI.dataIdentifiers[0xf800] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf801] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf802] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf803] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf804] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf805] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf806] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf807] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf808] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf809] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf80f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf810] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf811] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf812] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf813] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf814] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf815] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf816] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf817] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf818] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf819] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf81f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf820] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf821] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf822] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf823] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf824] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf825] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf826] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf827] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf828] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf829] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf82f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf830] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf831] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf832] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf833] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf834] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf835] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf836] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf837] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf838] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf839] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf83f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf840] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf841] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf842] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf843] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf844] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf845] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf846] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf847] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf848] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf849] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf84f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf850] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf851] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf852] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf853] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf854] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf855] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf856] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf857] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf858] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf859] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf85f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf860] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf861] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf862] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf863] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf864] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf865] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf866] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf867] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf868] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf869] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf86f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf870] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf871] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf872] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf873] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf874] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf875] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf876] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf877] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf878] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf879] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf87f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf880] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf881] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf882] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf883] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf884] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf885] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf886] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf887] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf888] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf889] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf88f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf890] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf891] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf892] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf893] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf894] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf895] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf896] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf897] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf898] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf899] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89a] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89b] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89c] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89d] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89e] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf89f] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8a9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8aa] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ab] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ac] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ad] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ae] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8af] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8b9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ba] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8bb] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8bc] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8bd] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8be] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8bf] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8c9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ca] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8cb] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8cc] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8cd] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ce] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8cf] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8d9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8da] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8db] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8dc] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8dd] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8de] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8df] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8e9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ea] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8eb] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ec] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ed] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ee] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ef] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f0] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f1] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f2] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f3] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f4] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f5] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f6] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f7] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f8] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8f9] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8fa] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8fb] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8fc] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8fd] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8fe] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf8ff] = "OBDInfoTypes_F800_F8FF"
UDS_RDBI.dataIdentifiers[0xf900] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf901] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf902] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf903] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf904] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf905] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf906] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf907] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf908] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf909] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf90f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf910] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf911] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf912] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf913] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf914] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf915] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf916] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf917] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf918] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf919] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf91f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf920] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf921] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf922] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf923] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf924] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf925] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf926] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf927] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf928] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf929] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf92f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf930] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf931] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf932] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf933] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf934] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf935] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf936] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf937] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf938] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf939] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf93f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf940] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf941] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf942] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf943] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf944] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf945] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf946] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf947] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf948] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf949] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf94f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf950] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf951] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf952] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf953] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf954] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf955] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf956] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf957] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf958] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf959] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf95f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf960] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf961] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf962] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf963] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf964] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf965] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf966] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf967] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf968] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf969] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf96f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf970] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf971] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf972] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf973] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf974] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf975] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf976] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf977] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf978] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf979] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf97f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf980] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf981] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf982] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf983] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf984] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf985] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf986] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf987] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf988] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf989] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf98f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf990] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf991] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf992] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf993] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf994] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf995] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf996] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf997] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf998] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf999] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99a] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99b] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99c] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99d] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99e] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf99f] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9a9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9aa] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ab] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ac] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ad] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ae] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9af] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9b9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ba] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9bb] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9bc] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9bd] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9be] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9bf] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9c9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ca] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9cb] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9cc] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9cd] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ce] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9cf] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9d9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9da] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9db] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9dc] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9dd] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9de] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9df] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9e9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ea] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9eb] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ec] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ed] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ee] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ef] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f0] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f1] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f2] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f3] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f4] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f5] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f6] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f7] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f8] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9f9] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9fa] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9fb] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9fc] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9fd] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9fe] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xf9ff] = "tachographPIds_F900_F9FF"
UDS_RDBI.dataIdentifiers[0xfa00] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa01] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa02] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa03] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa04] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa05] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa06] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa07] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa08] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa09] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa0f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa10] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa11] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa12] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa13] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa14] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa15] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa16] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa17] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa18] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa19] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa1f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa20] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa21] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa22] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa23] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa24] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa25] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa26] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa27] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa28] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa29] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa2f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa30] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa31] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa32] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa33] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa34] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa35] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa36] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa37] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa38] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa39] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa3f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa40] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa41] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa42] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa43] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa44] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa45] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa46] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa47] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa48] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa49] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa4f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa50] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa51] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa52] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa53] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa54] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa55] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa56] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa57] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa58] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa59] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa5f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa60] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa61] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa62] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa63] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa64] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa65] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa66] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa67] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa68] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa69] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa6f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa70] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa71] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa72] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa73] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa74] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa75] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa76] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa77] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa78] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa79] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa7f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa80] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa81] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa82] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa83] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa84] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa85] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa86] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa87] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa88] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa89] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa8f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa90] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa91] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa92] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa93] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa94] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa95] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa96] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa97] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa98] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa99] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9a] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9b] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9c] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9d] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9e] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfa9f] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaa9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaaa] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaab] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaac] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaad] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaae] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaaf] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfab9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaba] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfabb] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfabc] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfabd] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfabe] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfabf] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfac9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaca] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfacb] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfacc] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfacd] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xface] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfacf] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfad9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfada] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfadb] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfadc] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfadd] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfade] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfadf] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfae9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaea] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaeb] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaec] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaed] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaee] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaef] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf0] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf1] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf2] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf3] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf4] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf5] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf6] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf7] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf8] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaf9] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfafa] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfafb] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfafc] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfafd] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfafe] = "safetySystemPIds_FA00_FAFF"
UDS_RDBI.dataIdentifiers[0xfaff] = "safetySystemPIds_FA00_FAFF"
UDS_DSC.diagnosticSessionTypes[0x81] = "defaultMode-StandardDiagnosticMode-OBDIIMode" # noqa E501
UDS_DSC.diagnosticSessionTypes[0x82] = "periodicTransmissions"
UDS_DSC.diagnosticSessionTypes[0x83] = "BMW_NOTtoBeImplemented_endOfLineVehicleManufacturerMode" # noqa E501
UDS_DSC.diagnosticSessionTypes[0x84] = "endOfLineSystemSupplierMode"
UDS_DSC.diagnosticSessionTypes[0x85] = "ECUProgrammingMode"
UDS_DSC.diagnosticSessionTypes[0x86] = "ECUDevelopmentMode"
UDS_DSC.diagnosticSessionTypes[0x87] = "ECUAdjustmentMode"
UDS_DSC.diagnosticSessionTypes[0x88] = "ECUVariantCodingMode"
UDS_DSC.diagnosticSessionTypes[0x89] = "BMW_ECUsafetyMode"
UDS_IOCBI.dataIdentifiers = UDS_RDBI.dataIdentifiers
UDS_RC.routineControlIdentifiers[0x0000] = "BMW_linearAddressRange"
UDS_RC.routineControlIdentifiers[0x0001] = "BMW_ROM_EPROM_internal"
UDS_RC.routineControlIdentifiers[0x0002] = "BMW_ROM_EPROM_external"
UDS_RC.routineControlIdentifiers[0x0003] = "BMW_NVRAM_characteristicZones_DTCmemory" # noqa E501
UDS_RC.routineControlIdentifiers[0x0004] = "BMW_RAM_internal_shortMOV"
UDS_RC.routineControlIdentifiers[0x0005] = "BMW_RAM_external_xDataMOV"
UDS_RC.routineControlIdentifiers[0x0006] = "BMW_flashEPROM_internal"
UDS_RC.routineControlIdentifiers[0x0007] = "BMW_UIFmemory"
UDS_RC.routineControlIdentifiers[0x0008] = "BMW_vehicleOrderDataMemory"
UDS_RC.routineControlIdentifiers[0x0009] = "BMW_flashEPROM_external"
UDS_RC.routineControlIdentifiers[0x000b] = "BMW_RAM_internal_longMOVatRegister"
UDS_RC.routineControlIdentifiers[0x0100] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0101] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0102] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0103] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0104] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0105] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0106] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0107] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0108] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0109] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x010f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0110] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0111] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0112] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0113] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0114] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0115] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0116] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0117] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0118] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0119] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x011f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0120] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0121] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0122] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0123] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0124] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0125] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0126] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0127] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0128] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0129] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x012f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0130] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0131] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0132] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0133] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0134] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0135] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0136] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0137] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0138] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0139] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x013f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0140] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0141] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0142] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0143] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0144] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0145] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0146] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0147] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0148] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0149] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x014f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0150] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0151] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0152] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0153] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0154] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0155] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0156] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0157] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0158] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0159] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x015f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0160] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0161] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0162] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0163] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0164] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0165] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0166] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0167] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0168] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0169] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x016f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0170] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0171] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0172] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0173] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0174] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0175] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0176] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0177] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0178] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0179] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x017f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0180] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0181] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0182] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0183] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0184] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0185] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0186] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0187] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0188] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0189] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x018f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0190] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0191] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0192] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0193] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0194] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0195] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0196] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0197] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0198] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0199] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019a] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019b] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019c] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019d] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019e] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x019f] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01a9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01aa] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ab] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ac] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ad] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ae] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01af] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01b9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ba] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01bb] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01bc] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01bd] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01be] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01bf] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01c9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ca] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01cb] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01cc] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01cd] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ce] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01cf] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01d9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01da] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01db] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01dc] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01dd] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01de] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01df] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01e9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ea] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01eb] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ec] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ed] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ee] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ef] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f0] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f1] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f2] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f3] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f4] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f5] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f6] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f7] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f8] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01f9] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01fa] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01fb] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01fc] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01fd] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01fe] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x01ff] = "tachographTestIds_0100_01FF"
UDS_RC.routineControlIdentifiers[0x0200] = "VCM_SVT"
UDS_RC.routineControlIdentifiers[0x0202] = "checkMemory"
UDS_RC.routineControlIdentifiers[0x0203] = "checkProgrammingPreCondition"
UDS_RC.routineControlIdentifiers[0x0204] = "readSWEProgrammingStatus"
UDS_RC.routineControlIdentifiers[0x0205] = "readSWEDevelopmentInfo"
UDS_RC.routineControlIdentifiers[0x0206] = "checkProgrammingPower"
UDS_RC.routineControlIdentifiers[0x0207] = "VCM_Generiere_SVT"
UDS_RC.routineControlIdentifiers[0x020b] = "Steuergerätetausch"
UDS_RC.routineControlIdentifiers[0x020c] = "KeyExchange"
UDS_RC.routineControlIdentifiers[0x020d] = "FingerprintExchange"
UDS_RC.routineControlIdentifiers[0x020e] = "InternalAuthentication"
UDS_RC.routineControlIdentifiers[0x020f] = "CyclicSignatureCheck"
UDS_RC.routineControlIdentifiers[0x0210] = "TeleServiceLogin"
UDS_RC.routineControlIdentifiers[0x0211] = "ExternalAuthentication"
UDS_RC.routineControlIdentifiers[0x0212] = "StoreTransportKeyList"
UDS_RC.routineControlIdentifiers[0x0213] = "InitSignalKeyDeployment"
UDS_RC.routineControlIdentifiers[0x0214] = "N10GetState"
UDS_RC.routineControlIdentifiers[0x0215] = "GetParameterN11"
UDS_RC.routineControlIdentifiers[0x0220] = "RequestDeleteSwPackage"
UDS_RC.routineControlIdentifiers[0x0230] = "ResetState"
UDS_RC.routineControlIdentifiers[0x0231] = "GetState"
UDS_RC.routineControlIdentifiers[0x0232] = "ResetStateFsCSM"
UDS_RC.routineControlIdentifiers[0x0233] = "GetParameterN11"
UDS_RC.routineControlIdentifiers[0x0234] = "ExternerInit"
UDS_RC.routineControlIdentifiers[0x02a5] = "RequestListEntry"
UDS_RC.routineControlIdentifiers[0x0303] = "DiagLoopbackStart"
UDS_RC.routineControlIdentifiers[0x0304] = "DTC"
UDS_RC.routineControlIdentifiers[0x0305] = "STEUERN_DM_FSS_MASTER"
UDS_RC.routineControlIdentifiers[0x0f01] = "codingChecksum"
UDS_RC.routineControlIdentifiers[0x0f02] = "clearMemory"
UDS_RC.routineControlIdentifiers[0x0f04] = "selfTest"
UDS_RC.routineControlIdentifiers[0x0f05] = "powerDown"
UDS_RC.routineControlIdentifiers[0x0f06] = "clearDTCSecondaryMemory"
UDS_RC.routineControlIdentifiers[0x0f07] = "requestForAuthentication"
UDS_RC.routineControlIdentifiers[0x0f08] = "releaseAuthentication"
UDS_RC.routineControlIdentifiers[0x0f09] = "checkSignature"
UDS_RC.routineControlIdentifiers[0x0f0a] = "checkProgrammingStatus"
UDS_RC.routineControlIdentifiers[0x0f0b] = "ExecuteDiagnosticService"
UDS_RC.routineControlIdentifiers[0x0f0c] = "SetEnergyMode" # or controlEnergySavingMode # noqa E501
UDS_RC.routineControlIdentifiers[0x0f0d] = "resetSystemFaultMessage"
UDS_RC.routineControlIdentifiers[0x0f0e] = "timeControlledPowerDown"
UDS_RC.routineControlIdentifiers[0x0f0f] = "disableCommunicationOverGateway"
UDS_RC.routineControlIdentifiers[0x0f1f] = "SwtRoutine"
UDS_RC.routineControlIdentifiers[0x1002] = "Individualdatenrettung"
UDS_RC.routineControlIdentifiers[0x1003] = "SetExtendedMode"
UDS_RC.routineControlIdentifiers[0x1007] = "MasterVIN"
UDS_RC.routineControlIdentifiers[0x100d] = "ActivateCodingMode"
UDS_RC.routineControlIdentifiers[0x100e] = "ActivateProgrammingMode"
UDS_RC.routineControlIdentifiers[0x100f] = "ActivateApplicationMode"
UDS_RC.routineControlIdentifiers[0x1010] = "SetDefaultBus"
UDS_RC.routineControlIdentifiers[0x1011] = "GetActualConfig"
UDS_RC.routineControlIdentifiers[0x1013] = "RequestListEntryGWTB"
UDS_RC.routineControlIdentifiers[0x1021] = "requestPreferredProtcol"
UDS_RC.routineControlIdentifiers[0x1022] = "checkConnection"
UDS_RC.routineControlIdentifiers[0x1024] = "ResetActivationlineLogical"
UDS_RC.routineControlIdentifiers[0x1042] = "EthernetARLTable"
UDS_RC.routineControlIdentifiers[0x1045] = "EthernetIPConfiguration"
UDS_RC.routineControlIdentifiers[0x104e] = "EthernetARLTableExtended"
UDS_RC.routineControlIdentifiers[0x4000] = "Diagnosemaster"
UDS_RC.routineControlIdentifiers[0x4001] = "SetGWRouting"
UDS_RC.routineControlIdentifiers[0x4002] = "HDDDownload"
UDS_RC.routineControlIdentifiers[0x4004] = "KeepBussesAlive"
UDS_RC.routineControlIdentifiers[0x4007] = "updateMode"
UDS_RC.routineControlIdentifiers[0x4008] = "httpUpdate"
UDS_RC.routineControlIdentifiers[0x7000] = "ProcessingApplicationData"
UDS_RC.routineControlIdentifiers[0xa07c] = "RequestDeactivateHddSafeMode"
UDS_RC.routineControlIdentifiers[0xa0b2] = "RequestSteuernApixReinitMode"
UDS_RC.routineControlIdentifiers[0xab8f] = "setEngineAngle"
UDS_RC.routineControlIdentifiers[0xe000] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe001] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe002] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe003] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe004] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe005] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe006] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe007] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe008] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe009] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe00f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe010] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe011] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe012] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe013] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe014] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe015] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe016] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe017] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe018] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe019] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe01f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe020] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe021] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe022] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe023] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe024] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe025] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe026] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe027] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe028] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe029] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe02f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe030] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe031] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe032] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe033] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe034] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe035] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe036] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe037] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe038] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe039] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe03f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe040] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe041] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe042] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe043] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe044] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe045] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe046] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe047] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe048] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe049] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe04f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe050] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe051] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe052] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe053] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe054] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe055] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe056] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe057] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe058] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe059] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe05f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe060] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe061] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe062] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe063] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe064] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe065] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe066] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe067] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe068] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe069] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe06f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe070] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe071] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe072] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe073] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe074] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe075] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe076] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe077] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe078] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe079] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe07f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe080] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe081] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe082] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe083] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe084] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe085] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe086] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe087] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe088] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe089] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe08f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe090] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe091] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe092] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe093] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe094] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe095] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe096] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe097] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe098] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe099] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe09f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0a9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0aa] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ab] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ac] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ad] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ae] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0af] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0b9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ba] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0bb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0bc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0bd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0be] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0bf] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0c9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ca] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0cb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0cc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0cd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ce] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0cf] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0d9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0da] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0db] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0dc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0dd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0de] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0df] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0e9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ea] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0eb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ec] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ed] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ee] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ef] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0f9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0fa] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0fb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0fc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0fd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0fe] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe0ff] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe100] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe101] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe102] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe103] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe104] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe105] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe106] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe107] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe108] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe109] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe10f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe110] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe111] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe112] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe113] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe114] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe115] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe116] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe117] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe118] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe119] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe11f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe120] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe121] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe122] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe123] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe124] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe125] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe126] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe127] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe128] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe129] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe12f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe130] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe131] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe132] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe133] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe134] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe135] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe136] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe137] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe138] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe139] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe13f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe140] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe141] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe142] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe143] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe144] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe145] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe146] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe147] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe148] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe149] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe14f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe150] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe151] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe152] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe153] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe154] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe155] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe156] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe157] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe158] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe159] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe15f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe160] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe161] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe162] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe163] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe164] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe165] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe166] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe167] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe168] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe169] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe16f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe170] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe171] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe172] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe173] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe174] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe175] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe176] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe177] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe178] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe179] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe17f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe180] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe181] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe182] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe183] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe184] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe185] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe186] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe187] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe188] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe189] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe18f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe190] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe191] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe192] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe193] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe194] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe195] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe196] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe197] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe198] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe199] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19a] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19b] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19c] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19d] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19e] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe19f] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1a9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1aa] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ab] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ac] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ad] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ae] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1af] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1b9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ba] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1bb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1bc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1bd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1be] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1bf] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1c9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ca] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1cb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1cc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1cd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ce] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1cf] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1d9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1da] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1db] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1dc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1dd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1de] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1df] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1e9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ea] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1eb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ec] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ed] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ee] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ef] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f0] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f1] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f2] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f3] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f4] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f5] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f6] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f7] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f8] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1f9] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1fa] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1fb] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1fc] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1fd] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1fe] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xe1ff] = "OBDTestIDs"
UDS_RC.routineControlIdentifiers[0xf013] = "DeactivateSegeln"
UDS_RC.routineControlIdentifiers[0xf043] = "RequestDeactivateMontagemodus"
UDS_RC.routineControlIdentifiers[0xF720] = "ControlSniffingHuPort"
UDS_RC.routineControlIdentifiers[0xF759] = "ControlHeadUnitActivationLine"
UDS_RC.routineControlIdentifiers[0xF760] = "ResetHeadUnitActivationLine"
UDS_RC.routineControlIdentifiers[0xF761] = "ClearFilterCAN"
UDS_RC.routineControlIdentifiers[0xF762] = "SetFilterCAN"
UDS_RC.routineControlIdentifiers[0xF764] = "MessageLogging"
UDS_RC.routineControlIdentifiers[0xF765] = "ReceiveCANFrame"
UDS_RC.routineControlIdentifiers[0xF766] = "SendCANFrame"
UDS_RC.routineControlIdentifiers[0xF767] = "ReceiveFlexrayFrame"
UDS_RC.routineControlIdentifiers[0xF768] = "SendFlexrayFrame"
UDS_RC.routineControlIdentifiers[0xF769] = "SetFilterFlexray"
UDS_RC.routineControlIdentifiers[0xF770] = "ClearFilterFlexray"
UDS_RC.routineControlIdentifiers[0xF774] = "GetStatusLogging"
UDS_RC.routineControlIdentifiers[0xF776] = "MessageTunnelDeauthenticator"
UDS_RC.routineControlIdentifiers[0xF777] = "ControlTransDiagSend"
UDS_RC.routineControlIdentifiers[0xF778] = "ClearFilterAll"
UDS_RC.routineControlIdentifiers[0xF779] = "GetFilterCAN"
UDS_RC.routineControlIdentifiers[0xF77B] = "SteuernFlexrayAutoDetectDisable"
UDS_RC.routineControlIdentifiers[0xF77C] = "SteuernFlexrayPath"
UDS_RC.routineControlIdentifiers[0xF77D] = "SteuernResetLernFlexray"
UDS_RC.routineControlIdentifiers[0xF77F] = "SteuernLernFlexray"
UDS_RC.routineControlIdentifiers[0xF780] = "ClearFilterLIN"
UDS_RC.routineControlIdentifiers[0xF781] = "GetFilterLIN"
UDS_RC.routineControlIdentifiers[0xF782] = "SetFilterLIN"
UDS_RC.routineControlIdentifiers[0xff00] = "eraseMemory"
UDS_RC.routineControlIdentifiers[0xff01] = "checkProgrammingDependencies"
UDS_RD.dataFormatIdentifiers[0x0001] = "BMW_ROM_EPROM_internal"
UDS_RD.dataFormatIdentifiers[0x0002] = "BMW_ROM_EPROM_external"
UDS_RD.dataFormatIdentifiers[0x0003] = "BMW_NVRAM_characteristicZones_DTCmemory" # noqa E501
UDS_RD.dataFormatIdentifiers[0x0004] = "BMW_RAM_internal_shortMOV"
UDS_RD.dataFormatIdentifiers[0x0005] = "BMW_RAM_external_xDataMOV"
UDS_RD.dataFormatIdentifiers[0x0006] = "BMW_flashEPROM_internal"
UDS_RD.dataFormatIdentifiers[0x0007] = "BMW_UIFmemory"
UDS_RD.dataFormatIdentifiers[0x0008] = "BMW_vehicleOrderDataMemory_onlyToBeUsedByDS2_ECUs" # noqa E501
UDS_RD.dataFormatIdentifiers[0x0009] = "BMW_flashEPROM_external"
UDS_RD.dataFormatIdentifiers[0x000b] = "BMW_RAM_internal_longMOVatRegister"
UDS_RD.dataFormatIdentifiers[0x0010] = "NRV and noEncryptingMethod"
UDS_RSDBI.dataIdentifiers = UDS_RDBI.dataIdentifiers
| 1 | 20,125 | Wondering where this breaks btw (although pretty understandable) | secdev-scapy | py |
@@ -64,6 +64,7 @@ public class Spark3BinPackStrategy extends BinPackStrategy {
Dataset<Row> scanDF = cloneSession.read().format("iceberg")
.option(SparkReadOptions.FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkReadOptions.SPLIT_SIZE, splitSize(inputFileSize(filesToRewrite)))
+ .option(SparkReadOptions.LOOKBACK, 10)
.option(SparkReadOptions.FILE_OPEN_COST, "0")
.load(table.name());
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.actions.BinPackStrategy;
import org.apache.iceberg.spark.FileRewriteCoordinator;
import org.apache.iceberg.spark.FileScanTaskSetManager;
import org.apache.iceberg.spark.SparkReadOptions;
import org.apache.iceberg.spark.SparkWriteOptions;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.internal.SQLConf;
public class Spark3BinPackStrategy extends BinPackStrategy {
private final Table table;
private final SparkSession spark;
private final FileScanTaskSetManager manager = FileScanTaskSetManager.get();
private final FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get();
public Spark3BinPackStrategy(Table table, SparkSession spark) {
this.table = table;
this.spark = spark;
}
@Override
public Table table() {
return table;
}
@Override
public Set<DataFile> rewriteFiles(List<FileScanTask> filesToRewrite) {
String groupID = UUID.randomUUID().toString();
try {
manager.stageTasks(table, groupID, filesToRewrite);
// Disable Adaptive Query Execution as this may change the output partitioning of our write
SparkSession cloneSession = spark.cloneSession();
cloneSession.conf().set(SQLConf.ADAPTIVE_EXECUTION_ENABLED().key(), false);
Dataset<Row> scanDF = cloneSession.read().format("iceberg")
.option(SparkReadOptions.FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkReadOptions.SPLIT_SIZE, splitSize(inputFileSize(filesToRewrite)))
.option(SparkReadOptions.FILE_OPEN_COST, "0")
.load(table.name());
// write the packed data into new files where each split becomes a new file
scanDF.write()
.format("iceberg")
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkWriteOptions.TARGET_FILE_SIZE_BYTES, writeMaxFileSize())
.option(SparkWriteOptions.DISTRIBUTION_MODE, "none")
.mode("append")
.save(table.name());
return rewriteCoordinator.fetchNewDataFiles(table, groupID);
} finally {
manager.removeTasks(table, groupID);
rewriteCoordinator.clearRewrite(table, groupID);
}
}
}
| 1 | 43,717 | is this needed? 10 is already the default | apache-iceberg | java |
@@ -35,9 +35,13 @@ class TextPlot(ElementPlot):
data = dict(x=[element.x], y=[element.y])
self._categorize_data(data, ('x', 'y'), element.dimensions())
data['text'] = [element.text]
- style['text_align'] = element.halign
+ if 'text_align' not in style:
+ style['text_align'] = element.halign
baseline = 'middle' if element.valign == 'center' else element.valign
- style['text_baseline'] = baseline
+ if 'text_baseline' not in style:
+ style['text_baseline'] = baseline
+ if 'text_font_size' not in style:
+ style['text_font_size'] = '%dPt' % element.fontsize
if 'color' in style:
style['text_color'] = style.pop('color')
return (data, mapping, style) | 1 | from collections import defaultdict
import numpy as np
from bokeh.models import Span, Arrow
try:
from bokeh.models.arrow_heads import TeeHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': TeeHead, '-|>': NormalHead,
'-': None}
except:
from bokeh.models.arrow_heads import OpenHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': OpenHead, '-|>': NormalHead,
'-': None}
from ...element import HLine
from ...core.util import datetime_types
from .element import (ElementPlot, CompositeElementPlot,
text_properties, line_properties)
from .util import date_to_integer
class TextPlot(ElementPlot):
style_opts = text_properties+['color']
_plot_methods = dict(single='text', batched='text')
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y', text='text')
if self.static_source:
return dict(x=[], y=[], text=[]), mapping, style
if self.invert_axes:
data = dict(x=[element.y], y=[element.x])
else:
data = dict(x=[element.x], y=[element.y])
self._categorize_data(data, ('x', 'y'), element.dimensions())
data['text'] = [element.text]
style['text_align'] = element.halign
baseline = 'middle' if element.valign == 'center' else element.valign
style['text_baseline'] = baseline
if 'color' in style:
style['text_color'] = style.pop('color')
return (data, mapping, style)
def get_batched_data(self, element, ranges=None):
data = defaultdict(list)
zorders = self._updated_zorders(element)
for (key, el), zorder in zip(element.data.items(), zorders):
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
for k, eld in eldata.items():
data[k].extend(eld)
return data, elmapping, style
def get_extents(self, element, ranges=None):
return None, None, None, None
class LineAnnotationPlot(ElementPlot):
style_opts = line_properties
_plot_methods = dict(single='Span')
def get_data(self, element, ranges, style):
data, mapping = {}, {}
dim = 'width' if isinstance(element, HLine) else 'height'
if self.invert_axes:
dim = 'width' if dim == 'height' else 'height'
mapping['dimension'] = dim
loc = element.data
if isinstance(loc, datetime_types):
loc = date_to_integer(loc)
mapping['location'] = loc
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = Span(level='annotation', **mapping)
plot.renderers.append(box)
return None, box
def get_extents(self, element, ranges=None):
return None, None, None, None
class SplinePlot(ElementPlot):
"""
Draw the supplied Spline annotation (see Spline docstring).
Does not support matplotlib Path codes.
"""
style_opts = line_properties
_plot_methods = dict(single='bezier')
def get_data(self, element, ranges, style):
if self.invert_axes:
data_attrs = ['y0', 'x0', 'cy0', 'cx0', 'cy1', 'cx1', 'y1', 'x1']
else:
data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1']
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs}
skipped = False
for vs in np.split(verts, inds[1:]):
if len(vs) != 4:
skipped = len(vs) > 1
continue
for x, y, xl, yl in zip(vs[:, 0], vs[:, 1], data_attrs[::2], data_attrs[1::2]):
data[xl].append(x)
data[yl].append(y)
if skipped:
self.warning('Bokeh SplitPlot only support cubic splines, '
'unsupported splines were skipped during plotting.')
data = {da: data[da] for da in data_attrs}
return (data, dict(zip(data_attrs, data_attrs)), style)
class ArrowPlot(CompositeElementPlot):
style_opts = (['arrow_%s' % p for p in line_properties+['size']] + text_properties)
_style_groups = {'arrow': 'arrow', 'label': 'text'}
def get_data(self, element, ranges, style):
plot = self.state
label_mapping = dict(x='x', y='y', text='text')
# Compute arrow
x1, y1 = element.x, element.y
axrange = plot.x_range if self.invert_axes else plot.y_range
span = (axrange.end - axrange.start) / 6.
if element.direction == '^':
x2, y2 = x1, y1-span
label_mapping['text_baseline'] = 'top'
elif element.direction == '<':
x2, y2 = x1+span, y1
label_mapping['text_align'] = 'left'
label_mapping['text_baseline'] = 'middle'
elif element.direction == '>':
x2, y2 = x1-span, y1
label_mapping['text_align'] = 'right'
label_mapping['text_baseline'] = 'middle'
else:
x2, y2 = x1, y1+span
label_mapping['text_baseline'] = 'bottom'
arrow_opts = {'x_end': x1, 'y_end': y1,
'x_start': x2, 'y_start': y2}
# Define arrowhead
arrow_opts['arrow_start'] = arrow_start.get(element.arrowstyle, None)
arrow_opts['arrow_end'] = arrow_end.get(element.arrowstyle, NormalHead)
# Compute label
if self.invert_axes:
label_data = dict(x=[y2], y=[x2])
else:
label_data = dict(x=[x2], y=[y2])
label_data['text'] = [element.text]
return ({'label': label_data},
{'arrow': arrow_opts, 'label': label_mapping}, style)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties.pop('legend', None)
if key == 'arrow':
properties.pop('source')
arrow_end = mapping.pop('arrow_end')
arrow_start = mapping.pop('arrow_start')
start = arrow_start(**properties) if arrow_start else None
end = arrow_end(**properties) if arrow_end else None
glyph = Arrow(start=start, end=end, **dict(**mapping))
else:
properties = {p if p == 'source' else 'text_'+p: v
for p, v in properties.items()}
glyph, _ = super(ArrowPlot, self)._init_glyph(
plot, mapping, properties, 'text_1')
plot.renderers.append(glyph)
return None, glyph
def get_extents(self, element, ranges=None):
return None, None, None, None
| 1 | 20,336 | One day we can try to allow the user to specify font sizes in something other than points. For now this is fine though... | holoviz-holoviews | py |
@@ -29,7 +29,13 @@ namespace rtps {
WriterProxyData::WriterProxyData()
+#if HAVE_SECURITY
+ : security_attributes_(0)
+ , plugin_security_attributes_(0)
+ , m_userDefinedId(0)
+#else
: m_userDefinedId(0)
+#endif
, m_typeMaxSerialized(0)
, m_isAlive(true)
, m_topicKind(NO_KEY) | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file WriterProxyData.cpp
*
*/
#include <fastrtps/rtps/builtin/data/WriterProxyData.h>
#include <fastrtps/rtps/common/CDRMessage_t.h>
#include <fastrtps/log/Log.h>
namespace eprosima {
namespace fastrtps{
namespace rtps {
WriterProxyData::WriterProxyData()
: m_userDefinedId(0)
, m_typeMaxSerialized(0)
, m_isAlive(true)
, m_topicKind(NO_KEY)
, m_topicDiscoveryKind(NO_CHECK)
{
// TODO Auto-generated constructor stub
}
WriterProxyData::WriterProxyData(const WriterProxyData& writerInfo)
: m_guid(writerInfo.m_guid)
, m_unicastLocatorList(writerInfo.m_unicastLocatorList)
, m_multicastLocatorList(writerInfo.m_multicastLocatorList)
, m_key(writerInfo.m_key)
, m_RTPSParticipantKey(writerInfo.m_RTPSParticipantKey)
, m_typeName(writerInfo.m_typeName)
, m_topicName(writerInfo.m_topicName)
, m_userDefinedId(writerInfo.m_userDefinedId)
, m_typeMaxSerialized(writerInfo.m_typeMaxSerialized)
, m_isAlive(writerInfo.m_isAlive)
, m_topicKind(writerInfo.m_topicKind)
, persistence_guid_(writerInfo.persistence_guid_)
, m_topicDiscoveryKind(writerInfo.m_topicDiscoveryKind)
, m_type_id(writerInfo.m_type_id)
, m_type(writerInfo.m_type)
{
m_qos.setQos(writerInfo.m_qos, true);
}
WriterProxyData::~WriterProxyData() {
// TODO Auto-generated destructor stub
logInfo(RTPS_PROXY_DATA,this->m_guid);
}
WriterProxyData& WriterProxyData::operator=(const WriterProxyData& writerInfo)
{
m_guid = writerInfo.m_guid;
m_unicastLocatorList = writerInfo.m_unicastLocatorList;
m_multicastLocatorList = writerInfo.m_multicastLocatorList;
m_key = writerInfo.m_key;
m_RTPSParticipantKey = writerInfo.m_RTPSParticipantKey;
m_typeName = writerInfo.m_typeName;
m_topicName = writerInfo.m_topicName;
m_userDefinedId = writerInfo.m_userDefinedId;
m_typeMaxSerialized = writerInfo.m_typeMaxSerialized;
m_isAlive = writerInfo.m_isAlive;
m_topicKind = writerInfo.m_topicKind;
persistence_guid_ = writerInfo.persistence_guid_;
m_qos.setQos(writerInfo.m_qos, true);
m_topicDiscoveryKind = writerInfo.m_topicDiscoveryKind;
m_type_id = writerInfo.m_type_id;
m_type = writerInfo.m_type;
return *this;
}
ParameterList_t WriterProxyData::toParameterList()
{
ParameterList_t parameter_list;
for(LocatorListIterator lit = m_unicastLocatorList.begin();
lit!=m_unicastLocatorList.end();++lit)
{
ParameterLocator_t* p = new ParameterLocator_t(PID_UNICAST_LOCATOR,PARAMETER_LOCATOR_LENGTH,*lit);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
for(LocatorListIterator lit = m_multicastLocatorList.begin();
lit!=m_multicastLocatorList.end();++lit)
{
ParameterLocator_t* p = new ParameterLocator_t(PID_MULTICAST_LOCATOR,PARAMETER_LOCATOR_LENGTH,*lit);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterGuid_t* p = new ParameterGuid_t(PID_PARTICIPANT_GUID,PARAMETER_GUID_LENGTH,m_RTPSParticipantKey);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterString_t * p = new ParameterString_t(PID_TOPIC_NAME, 0, m_topicName);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterString_t * p = new ParameterString_t(PID_TYPE_NAME,0,m_typeName);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterKey_t * p = new ParameterKey_t(PID_KEY_HASH,16,m_key);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterGuid_t * p = new ParameterGuid_t(PID_ENDPOINT_GUID,16,m_guid);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterPort_t* p = new ParameterPort_t(PID_TYPE_MAX_SIZE_SERIALIZED,4,m_typeMaxSerialized);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterProtocolVersion_t* p = new ParameterProtocolVersion_t(PID_PROTOCOL_VERSION,4);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
{
ParameterVendorId_t*p = new ParameterVendorId_t(PID_VENDORID,4);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(persistence_guid_ != c_Guid_Unknown)
{
ParameterGuid_t * p = new ParameterGuid_t(PID_PERSISTENCE_GUID, 16, persistence_guid_);
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if( m_qos.m_durability.sendAlways() || m_qos.m_durability.hasChanged)
{
DurabilityQosPolicy*p = new DurabilityQosPolicy();
*p = m_qos.m_durability;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_durabilityService.sendAlways() || m_qos.m_durabilityService.hasChanged)
{
DurabilityServiceQosPolicy*p = new DurabilityServiceQosPolicy();
*p = m_qos.m_durabilityService;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_deadline.sendAlways() || m_qos.m_deadline.hasChanged)
{
DeadlineQosPolicy*p = new DeadlineQosPolicy();
*p = m_qos.m_deadline;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_latencyBudget.sendAlways() || m_qos.m_latencyBudget.hasChanged)
{
LatencyBudgetQosPolicy*p = new LatencyBudgetQosPolicy();
*p = m_qos.m_latencyBudget;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_durability.sendAlways() || m_qos.m_liveliness.hasChanged)
{
LivelinessQosPolicy*p = new LivelinessQosPolicy();
*p = m_qos.m_liveliness;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_reliability.sendAlways() || m_qos.m_reliability.hasChanged)
{
ReliabilityQosPolicy*p = new ReliabilityQosPolicy();
*p = m_qos.m_reliability;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_lifespan.sendAlways() || m_qos.m_lifespan.hasChanged)
{
LifespanQosPolicy*p = new LifespanQosPolicy();
*p = m_qos.m_lifespan;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if( m_qos.m_userData.sendAlways() || m_qos.m_userData.hasChanged)
{
UserDataQosPolicy*p = new UserDataQosPolicy();
*p = m_qos.m_userData;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_timeBasedFilter.sendAlways() || m_qos.m_timeBasedFilter.hasChanged)
{
TimeBasedFilterQosPolicy*p = new TimeBasedFilterQosPolicy();
*p = m_qos.m_timeBasedFilter;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_ownership.sendAlways() || m_qos.m_ownership.hasChanged)
{
OwnershipQosPolicy*p = new OwnershipQosPolicy();
*p = m_qos.m_ownership;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_durability.sendAlways() || m_qos.m_ownershipStrength.hasChanged)
{
OwnershipStrengthQosPolicy*p = new OwnershipStrengthQosPolicy();
*p = m_qos.m_ownershipStrength;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_destinationOrder.sendAlways() || m_qos.m_destinationOrder.hasChanged)
{
DestinationOrderQosPolicy*p = new DestinationOrderQosPolicy();
*p = m_qos.m_destinationOrder;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_presentation.sendAlways() || m_qos.m_presentation.hasChanged)
{
PresentationQosPolicy*p = new PresentationQosPolicy();
*p = m_qos.m_presentation;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_partition.sendAlways() || m_qos.m_partition.hasChanged)
{
PartitionQosPolicy*p = new PartitionQosPolicy();
*p = m_qos.m_partition;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_topicData.sendAlways() || m_qos.m_topicData.hasChanged)
{
TopicDataQosPolicy*p = new TopicDataQosPolicy();
*p = m_qos.m_topicData;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if(m_qos.m_groupData.sendAlways() || m_qos.m_groupData.hasChanged)
{
GroupDataQosPolicy*p = new GroupDataQosPolicy();
*p = m_qos.m_groupData;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if (m_topicDiscoveryKind != NO_CHECK)
{
if (m_type_id.m_type_identifier->_d() != 0)
{
TypeIdV1 * p = new TypeIdV1();
*p = m_type_id;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
if (m_type.m_type_object->_d() != 0)
{
TypeObjectV1 * p = new TypeObjectV1();
*p = m_type;
parameter_list.m_parameters.push_back((Parameter_t*)p);
}
}
logInfo(RTPS_PROXY_DATA," with " << parameter_list.m_parameters.size()<< " parameters");
return parameter_list;
}
bool WriterProxyData::readFromCDRMessage(CDRMessage_t* msg)
{
ParameterList_t parameter_list;
if(ParameterList::readParameterListfromCDRMsg(msg, ¶meter_list, NULL, true) > 0)
{
for(std::vector<Parameter_t*>::iterator it = parameter_list.m_parameters.begin();
it!=parameter_list.m_parameters.end();++it)
{
switch((*it)->Pid)
{
case PID_DURABILITY:
{
DurabilityQosPolicy * p = (DurabilityQosPolicy*)(*it);
m_qos.m_durability = *p;
break;
}
case PID_DURABILITY_SERVICE:
{
DurabilityServiceQosPolicy * p = (DurabilityServiceQosPolicy*)(*it);
m_qos.m_durabilityService = *p;
break;
}
case PID_DEADLINE:
{
DeadlineQosPolicy * p = (DeadlineQosPolicy*)(*it);
m_qos.m_deadline = *p;
break;
}
case PID_LATENCY_BUDGET:
{
LatencyBudgetQosPolicy * p = (LatencyBudgetQosPolicy*)(*it);
m_qos.m_latencyBudget = *p;
break;
}
case PID_LIVELINESS:
{
LivelinessQosPolicy * p = (LivelinessQosPolicy*)(*it);
m_qos.m_liveliness = *p;
break;
}
case PID_RELIABILITY:
{
ReliabilityQosPolicy * p = (ReliabilityQosPolicy*)(*it);
m_qos.m_reliability = *p;
break;
}
case PID_LIFESPAN:
{
LifespanQosPolicy * p = (LifespanQosPolicy*)(*it);
m_qos.m_lifespan = *p;
break;
}
case PID_USER_DATA:
{
UserDataQosPolicy * p = (UserDataQosPolicy*)(*it);
m_qos.m_userData = *p;
break;
}
case PID_TIME_BASED_FILTER:
{
TimeBasedFilterQosPolicy * p = (TimeBasedFilterQosPolicy*)(*it);
m_qos.m_timeBasedFilter = *p;
break;
}
case PID_OWNERSHIP:
{
OwnershipQosPolicy * p = (OwnershipQosPolicy*)(*it);
m_qos.m_ownership = *p;
break;
}
case PID_OWNERSHIP_STRENGTH:
{
OwnershipStrengthQosPolicy * p = (OwnershipStrengthQosPolicy*)(*it);
m_qos.m_ownershipStrength = *p;
break;
}
case PID_DESTINATION_ORDER:
{
DestinationOrderQosPolicy * p = (DestinationOrderQosPolicy*)(*it);
m_qos.m_destinationOrder = *p;
break;
}
case PID_PRESENTATION:
{
PresentationQosPolicy * p = (PresentationQosPolicy*)(*it);
m_qos.m_presentation = *p;
break;
}
case PID_PARTITION:
{
PartitionQosPolicy * p = (PartitionQosPolicy*)(*it);
m_qos.m_partition = *p;
break;
}
case PID_TOPIC_DATA:
{
TopicDataQosPolicy * p = (TopicDataQosPolicy*)(*it);
m_qos.m_topicData = *p;
break;
}
case PID_GROUP_DATA:
{
GroupDataQosPolicy * p = (GroupDataQosPolicy*)(*it);
m_qos.m_groupData = *p;
break;
}
case PID_TOPIC_NAME:
{
ParameterString_t*p = (ParameterString_t*)(*it);
m_topicName = std::string(p->getName());
break;
}
case PID_TYPE_NAME:
{
ParameterString_t*p = (ParameterString_t*)(*it);
m_typeName = std::string(p->getName());
break;
}
case PID_PARTICIPANT_GUID:
{
ParameterGuid_t * p = (ParameterGuid_t*)(*it);
for(uint8_t i = 0; i < 16; ++i)
{
if(i < 12)
m_RTPSParticipantKey.value[i] = p->guid.guidPrefix.value[i];
else
m_RTPSParticipantKey.value[i] = p->guid.entityId.value[i - 12];
}
break;
}
case PID_ENDPOINT_GUID:
{
ParameterGuid_t * p = (ParameterGuid_t*)(*it);
m_guid = p->guid;
for(uint8_t i=0;i<16;++i)
{
if(i<12)
m_key.value[i] = p->guid.guidPrefix.value[i];
else
m_key.value[i] = p->guid.entityId.value[i - 12];
}
break;
}
case PID_PERSISTENCE_GUID:
{
ParameterGuid_t * p = (ParameterGuid_t*)(*it);
persistence_guid_ = p->guid;
}
break;
case PID_UNICAST_LOCATOR:
{
ParameterLocator_t* p = (ParameterLocator_t*)(*it);
m_unicastLocatorList.push_back(p->locator);
break;
}
case PID_MULTICAST_LOCATOR:
{
ParameterLocator_t* p = (ParameterLocator_t*)(*it);
m_multicastLocatorList.push_back(p->locator);
break;
}
case PID_KEY_HASH:
{
ParameterKey_t*p=(ParameterKey_t*)(*it);
m_key = p->key;
iHandle2GUID(m_guid,m_key);
break;
}
case PID_TYPE_IDV1:
{
TypeIdV1 * p = (TypeIdV1*)(*it);
m_type_id = *p;
m_topicDiscoveryKind = MINIMAL;
if (m_type_id.m_type_identifier->_d() == EK_COMPLETE)
{
m_topicDiscoveryKind = COMPLETE;
}
break;
}
case PID_TYPE_OBJECTV1:
{
TypeObjectV1 * p = (TypeObjectV1*)(*it);
m_type = *p;
m_topicDiscoveryKind = MINIMAL;
if (m_type.m_type_object->_d() == EK_COMPLETE)
{
m_topicDiscoveryKind = COMPLETE;
}
break;
}
default:
{
//logInfo(RTPS_PROXY_DATA,"Parameter with ID: " << (uint16_t)(*it)->Pid <<" NOT CONSIDERED");
break;
}
}
if(m_guid.entityId.value[3] == 0x03)
m_topicKind = NO_KEY;
else if(m_guid.entityId.value[3] == 0x02)
m_topicKind = WITH_KEY;
}
return true;
}
return false;
}
void WriterProxyData::clear()
{
m_guid = c_Guid_Unknown;
m_unicastLocatorList.clear();
m_multicastLocatorList.clear();
m_key = InstanceHandle_t();
m_RTPSParticipantKey = InstanceHandle_t();
m_typeName = "";
m_topicName = "";
m_userDefinedId = 0;
m_qos = WriterQos();
m_typeMaxSerialized = 0;
m_isAlive = true;
m_topicKind = NO_KEY;
persistence_guid_ = c_Guid_Unknown;
}
void WriterProxyData::copy(WriterProxyData* wdata)
{
m_guid = wdata->m_guid;
m_unicastLocatorList = wdata->m_unicastLocatorList;
m_multicastLocatorList = wdata->m_multicastLocatorList;
m_key = wdata->m_key;
m_RTPSParticipantKey = wdata->m_RTPSParticipantKey;
m_typeName = wdata->m_typeName;
m_topicName = wdata->m_topicName;
m_userDefinedId = wdata->m_userDefinedId;
m_qos = wdata->m_qos;
m_typeMaxSerialized = wdata->m_typeMaxSerialized;
m_isAlive = wdata->m_isAlive;
m_topicKind = wdata->m_topicKind;
persistence_guid_ = wdata->persistence_guid_;
m_topicDiscoveryKind = wdata->m_topicDiscoveryKind;
if (m_topicDiscoveryKind != NO_CHECK)
{
m_type_id = wdata->m_type_id;
m_type = wdata->m_type;
}
}
void WriterProxyData::update(WriterProxyData* wdata)
{
m_unicastLocatorList = wdata->m_unicastLocatorList;
m_multicastLocatorList = wdata->m_multicastLocatorList;
m_qos.setQos(wdata->m_qos,false);
m_isAlive = wdata->m_isAlive;
}
RemoteWriterAttributes WriterProxyData::toRemoteWriterAttributes() const
{
RemoteWriterAttributes remoteAtt;
remoteAtt.guid = m_guid;
remoteAtt.livelinessLeaseDuration = m_qos.m_liveliness.lease_duration;
remoteAtt.ownershipStrength = (uint16_t)m_qos.m_ownershipStrength.value;
remoteAtt.endpoint.durabilityKind = m_qos.m_durability.durabilityKind();
remoteAtt.endpoint.endpointKind = WRITER;
remoteAtt.endpoint.topicKind = m_topicKind;
remoteAtt.endpoint.reliabilityKind = m_qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS ? RELIABLE : BEST_EFFORT;
remoteAtt.endpoint.unicastLocatorList = this->m_unicastLocatorList;
remoteAtt.endpoint.multicastLocatorList = this->m_multicastLocatorList;
remoteAtt.endpoint.persistence_guid = (persistence_guid_ == c_Guid_Unknown) ? m_guid : persistence_guid_;
return remoteAtt;
}
}
} /* namespace rtps */
} /* namespace eprosima */
| 1 | 13,000 | duplicated in both block of preprocessor condition. Can you join in one? | eProsima-Fast-DDS | cpp |
@@ -50,9 +50,10 @@ function getXPathArray(node, path) {
var element = {};
element.str = node.nodeName.toLowerCase();
// add the id and the count so we can construct robust versions of the xpath
- if(node.getAttribute && node.getAttribute('id') &&
- node.ownerDocument.querySelectorAll('#' + axe.utils.escapeSelector(node.id)).length === 1) {
-
+ if(
+ node.getAttribute && node.getAttribute('id') &&
+ node.ownerDocument.querySelectorAll('#' + axe.utils.escapeSelector(node.getAttribute('id'))).length === 1
+ ) {
element.id = node.getAttribute('id');
}
if(count > 1) { | 1 | /*global axe */
//jshint maxstatements: false, maxcomplexity: false
function getXPathArray(node, path) {
var sibling, count;
// Gets an XPath for an element which describes its hierarchical location.
if(!node) {
return [];
}
if (!path && node.nodeType === 9) {
// special case for when we are called and give the document itself as the starting node
path = [{
str: 'html'
}];
return path;
}
path = path || [];
if (node.parentNode && node.parentNode !== node) {
path = getXPathArray(node.parentNode, path);
}
if (node.previousSibling) {
count = 1;
sibling = node.previousSibling;
do {
if(sibling.nodeType === 1 && sibling.nodeName === node.nodeName) {
count++;
}
sibling = sibling.previousSibling;
}
while (sibling);
if(count === 1) {
count = null;
}
} else if (node.nextSibling) {
sibling = node.nextSibling;
do {
if(sibling.nodeType === 1 && sibling.nodeName === node.nodeName) {
count = 1;
sibling = null;
} else {
count = null;
sibling = sibling.previousSibling;
}
} while (sibling);
}
if(node.nodeType === 1) {
var element = {};
element.str = node.nodeName.toLowerCase();
// add the id and the count so we can construct robust versions of the xpath
if(node.getAttribute && node.getAttribute('id') &&
node.ownerDocument.querySelectorAll('#' + axe.utils.escapeSelector(node.id)).length === 1) {
element.id = node.getAttribute('id');
}
if(count > 1) {
element.count = count;
}
path.push(element);
}
return path;
}
// Robust is intended to allow xpaths to be robust to changes in the HTML structure of the page
// This means always use the id when present
// Non robust means always use the count (i.e. the exact position of the element)
// Ironically this is a bit of a misnomer because in very, very dynamic pages (e.g. where ids are generated on the fly)
// the non-ribust Xpaths will work whereas the robust ones will not work
function xpathToString(xpathArray) {
return xpathArray.reduce((str, elm) => {
if(elm.id) {
return `/${elm.str}[@id='${elm.id}']`;
} else {
return str + `/${elm.str}` + (elm.count > 0 ? `[${elm.count}]` : '');
}
}, '');
}
axe.utils.getXpath = function getXpath(node) {
var xpathArray = getXPathArray(node);
return xpathToString(xpathArray);
};
| 1 | 11,200 | Can we put the first part of the expression inline with the `if(` for consistency's sake? | dequelabs-axe-core | js |
@@ -26,9 +26,15 @@ import "github.com/yarpc/yarpc-go/transport"
type CallReqMeta interface {
Procedure(string) CallReqMeta
Headers(Headers) CallReqMeta
+ ShardKey(string) CallReqMeta
+ RoutingKey(string) CallReqMeta
+ RoutingDelegate(string) CallReqMeta
GetProcedure() string
GetHeaders() Headers
+ GetShardKey() string
+ GetRoutingKey() string
+ GetRoutingDelegate() string
}
// ReqMeta contains information about an incoming YARPC request. | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
import "github.com/yarpc/yarpc-go/transport"
// CallReqMeta contains information about an outgoing YARPC request.
type CallReqMeta interface {
Procedure(string) CallReqMeta
Headers(Headers) CallReqMeta
GetProcedure() string
GetHeaders() Headers
}
// ReqMeta contains information about an incoming YARPC request.
type ReqMeta interface {
Caller() string
Encoding() transport.Encoding
Headers() Headers
Procedure() string
Service() string
}
// NewReqMeta constructs a CallReqMeta with the given Context.
func NewReqMeta() CallReqMeta {
return &callReqMeta{}
}
type callReqMeta struct {
procedure string
headers Headers
}
func (r *callReqMeta) Procedure(p string) CallReqMeta {
r.procedure = p
return r
}
func (r *callReqMeta) Headers(h Headers) CallReqMeta {
r.headers = h
return r
}
func (r *callReqMeta) GetProcedure() string {
return r.procedure
}
func (r *callReqMeta) GetHeaders() Headers {
return r.headers
}
| 1 | 10,572 | Do servers have no need of accessing this information? | yarpc-yarpc-go | go |
@@ -51,7 +51,7 @@
// @vue/component
var commonFormattersMixin = {
methods: {
- formatTimeAgo: countlyCommon.formatTimeAgo,
+ formatTimeAgo: countlyCommon.formatTimeAgoText,
formatNumber: countlyCommon.formatNumber,
getShortNumber: countlyCommon.getShortNumber
} | 1 | /* global countlyCommon, jQuery, Vue, Vuex, T, countlyView, Promise, VueCompositionAPI, app, countlyGlobal */
(function(countlyVue, $) {
// @vue/component
var autoRefreshMixin = {
mounted: function() {
var self = this;
this.$root.$on("cly-refresh", function() {
self.refresh();
});
},
methods: {
refresh: function() {}
},
beforeDestroy: function() {
this.$root.$off();
}
};
var _i18n = function() {
return jQuery.i18n.prop.apply(null, arguments);
};
var _$ = {
ajax: function(request, options) {
options = options || {};
var ajaxP = new Promise(function(resolve, reject) {
$.ajax(request).done(resolve).fail(reject);
});
if (!options.disableAutoCatch) {
return ajaxP.catch(function(err) {
// eslint-disable-next-line no-console
console.log("AJAX Promise error:", err);
});
}
return ajaxP;
}
};
// @vue/component
var i18nMixin = {
methods: {
i18n: _i18n,
i18nM: function(key) {
return jQuery.i18n.map[key];
}
}
};
// @vue/component
var commonFormattersMixin = {
methods: {
formatTimeAgo: countlyCommon.formatTimeAgo,
formatNumber: countlyCommon.formatNumber,
getShortNumber: countlyCommon.getShortNumber
}
};
// @vue/component
var refreshOnParentActiveMixin = {
watch: {
isParentActive: function(newState) {
if (newState) {
this.refresh();
}
}
},
methods: {
refresh: function() {}
}
};
var _mixins = {
'autoRefresh': autoRefreshMixin,
'refreshOnParentActive': refreshOnParentActiveMixin,
'i18n': i18nMixin,
'commonFormatters': commonFormattersMixin
};
var _globalVuexStore = new Vuex.Store({
modules: {
countlyCommon: {
namespaced: true,
state: {
period: countlyCommon.getPeriod(),
periodLabel: countlyCommon.getDateRangeForCalendar(),
activeApp: null
},
getters: {
period: function(state) {
return state.period;
},
periodLabel: function(state) {
return state.periodLabel;
},
getActiveApp: function(state) {
return state.activeApp;
}
},
mutations: {
setPeriod: function(state, period) {
state.period = period;
},
setPeriodLabel: function(state, periodLabel) {
state.periodLabel = periodLabel;
},
setActiveApp: function(state, activeApp) {
state.activeApp = activeApp;
}
},
actions: {
updatePeriod: function(context, obj) {
context.commit("setPeriod", obj.period);
context.commit("setPeriodLabel", obj.label);
},
updateActiveApp: function(context, id) {
var appObj = countlyGlobal.apps[id];
if (appObj) {
context.commit("setActiveApp", Object.freeze(JSON.parse(JSON.stringify(appObj))));
}
}
}
}
}
});
$(document).ready(function() {
app.addAppSwitchCallback(function(appId) {
_globalVuexStore.dispatch("countlyCommon/updateActiveApp", appId);
});
});
var _uniqueCopiedStoreId = 0;
var _vuex = {
getGlobalStore: function() {
return _globalVuexStore;
},
registerGlobally: function(wrapper, copy, force) {
var store = _globalVuexStore;
var name = wrapper.name;
if (copy) {
name += "_" + _uniqueCopiedStoreId;
_uniqueCopiedStoreId += 1;
}
if (!store.hasModule(name) || force) {
store.registerModule(name, wrapper.module);
}
return name;
},
unregister: function(name) {
_globalVuexStore.unregisterModule(name);
}
};
var BackboneRouteAdapter = function() {};
Vue.prototype.$route = new BackboneRouteAdapter();
var DummyCompAPI = VueCompositionAPI.defineComponent({
name: "DummyCompAPI",
template: '<div></div>',
setup: function() {}
});
var TemplateLoader = function(templates) {
this.templates = templates;
this.elementsToBeRendered = [];
};
TemplateLoader.prototype.load = function() {
var self = this;
var getDeferred = function(fName, elId) {
if (!elId) {
return T.get(fName, function(src) {
self.elementsToBeRendered.push(src);
});
}
else {
return T.get(fName, function(src) {
self.elementsToBeRendered.push("<script type='text/x-template' id='" + elId + "'>" + src + "</script>");
});
}
};
if (this.templates) {
var templatesDeferred = [];
this.templates.forEach(function(item) {
if (typeof item === "string") {
templatesDeferred.push(getDeferred(item));
return;
}
for (var name in item.mapping) {
var fileName = item.mapping[name];
var elementId = item.namespace + "-" + name;
templatesDeferred.push(getDeferred(fileName, elementId));
}
});
return $.when.apply(null, templatesDeferred);
}
return true;
};
TemplateLoader.prototype.mount = function(parentSelector) {
parentSelector = parentSelector || "#vue-templates";
this.elementsToBeRendered.forEach(function(el) {
var jqEl = $(el);
var elId = jqEl.get(0).id;
if ($(parentSelector).find("#" + elId).length === 0) {
$(parentSelector).append(jqEl);
}
else {
// eslint-disable-next-line no-console
console.log("Duplicate component templates are not allowed. Please check the template with \"" + elId + "\" id.");
}
});
};
TemplateLoader.prototype.destroy = function() {
this.elementsToBeRendered = [];
};
var VuexLoader = function(vuex) {
this.vuex = vuex;
this.loadedModuleIds = [];
};
VuexLoader.prototype.load = function() {
var self = this;
this.vuex.forEach(function(item) {
var module = item.clyModel.getVuexModule();
_vuex.registerGlobally(module);
self.loadedModuleIds.push(module.name);
});
};
VuexLoader.prototype.destroy = function() {
this.loadedModuleIds.forEach(function(mid) {
_vuex.unregister(mid);
});
this.loadedModuleIds = [];
};
var countlyVueWrapperView = countlyView.extend({
constructor: function(opts) {
this.component = opts.component;
this.defaultArgs = opts.defaultArgs;
this.vuex = opts.vuex;
this.templates = opts.templates;
this.templateLoader = new TemplateLoader(this.templates);
this.vuexLoader = new VuexLoader(this.vuex);
},
beforeRender: function() {
return this.templateLoader.load();
},
renderCommon: function(isRefresh) {
if (!isRefresh) {
$(this.el).html("<div><div class='vue-wrapper'></div><div id='vue-templates'></div></div>");
$("body").addClass("cly-vue-theme-clydef");
this.templateLoader.mount();
}
},
refresh: function() {
var self = this;
if (self.vm) {
self.vm.$emit("cly-refresh");
}
},
afterRender: function() {
var el = $(this.el).find('.vue-wrapper').get(0),
self = this;
if (self.vuex) {
self.vuexLoader.load();
}
/*
Some 3rd party components such as echarts, use Composition API.
It is not clear why, but when a view with those components destroyed,
they leave some memory leaks. Instantiating DummyCompAPI triggers memory cleanups.
*/
self.vm = new Vue({
el: el,
store: _vuex.getGlobalStore(),
components: {
DummyCompAPI: DummyCompAPI,
MainView: self.component
},
template: '<div>\
<MainView></MainView>\
<DummyCompAPI></DummyCompAPI>\
</div>',
beforeCreate: function() {
this.$route.params = self.params;
}
});
self.vm.$on("cly-date-change", function() {
self.vm.$emit("cly-refresh");
});
},
destroy: function() {
var self = this;
this.templateLoader.destroy();
if (self.vm) {
$("body").removeClass("cly-vue-theme-clydef");
self.vm.$destroy();
self.vm.$off();
$(self.vm.$el).remove();
self.vm = null;
}
this.vuexLoader.destroy();
}
});
var _uniqueComponentId = 0;
var countlyBaseComponent = Vue.extend({
computed: {
componentId: function() {
return "cly-cmp-" + _uniqueComponentId;
}
},
beforeCreate: function() {
this.ucid = _uniqueComponentId.toString();
_uniqueComponentId += 1;
}
});
var countlyBaseView = countlyBaseComponent.extend(
// @vue/component
{
mixins: [
_mixins.autoRefresh,
_mixins.i18n,
_mixins.commonFormatters
],
props: {
name: { type: String, default: null},
id: { type: String, default: null }
},
computed: {
isParentActive: function() {
return this.$parent.isActive !== false;
},
vName: function() {
return this.name;
},
vId: function() {
return this.id;
}
}
}
);
var BaseContentMixin = countlyBaseComponent.extend(
// @vue/component
{
inheritAttrs: false,
mixins: [
_mixins.i18n
],
props: {
name: { type: String, default: null},
id: { type: String, default: null },
alwaysMounted: { type: Boolean, default: true },
alwaysActive: { type: Boolean, default: false },
role: { type: String, default: "default" }
},
data: function() {
return {
isContent: true
};
},
computed: {
isActive: function() {
return this.alwaysActive || (this.role === "default" && this.$parent.activeContentId === this.id);
},
tName: function() {
return this.name;
},
tId: function() {
return this.id;
},
elementId: function() {
return this.componentId + "-" + this.id;
}
}
}
);
var templateUtil = {
stage: function(fileName) {
return {
fileName: fileName
};
},
load: function(fileName) {
return new Promise(function(resolve) {
T.get(fileName, function(src) {
resolve(src);
});
/*
// eslint-disable-next-line no-console
console.log("Async component template load error:", err);
resolve(opts.component);
*/
});
}
};
var asyncCreate = function(base) {
return function(opts, baseOverride) {
var finalBase = baseOverride || base;
if (typeof opts.template === "string") {
return finalBase.extend(opts);
}
return function() {
return templateUtil.load(opts.template.fileName).then(function(template) {
opts.template = template;
return finalBase.extend(opts);
});
};
};
};
_mixins.BaseContent = BaseContentMixin;
var _components = {
BaseComponent: countlyBaseComponent,
create: asyncCreate(countlyBaseComponent)
};
var _views = {
BackboneWrapper: countlyVueWrapperView,
BaseView: countlyBaseView,
create: asyncCreate(countlyBaseView)
};
var rootElements = {
i18n: _i18n,
$: _$,
mixins: _mixins,
views: _views,
components: _components,
vuex: _vuex,
T: templateUtil.stage
};
for (var key in rootElements) {
countlyVue[key] = rootElements[key];
}
window.CV = countlyVue;
}(window.countlyVue = window.countlyVue || {}, jQuery));
| 1 | 14,171 | It feels like this will break some stuff, but let it break. We need "text" version anyway. | Countly-countly-server | js |
@@ -717,7 +717,7 @@ static ItemExpr *intersectColumns(const RETDesc &leftTable,
ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr();
ItemExpr *rightExpr = rightTable.getValueId(i).getItemExpr();
BiRelat *compare = new (bindWA->wHeap())
- BiRelat(ITM_EQUAL, leftExpr, rightExpr);
+ BiRelat(ITM_EQUAL, leftExpr, rightExpr, TRUE);
if (predicate)
predicate = new (bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare);
else | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: BindRelExpr.C
* Description: Relational expressions (both physical and logical operators)
* Methods related to the SQL binder
*
* Created: 5/17/94
* Language: C++
*
*
*
* It is the secret sympathy,
* The silver link, the silken tie,
* Which heart to heart, and mind to mind,
* In body and in soul can bind.
* -- Sir Walter Scott,
* "The Lay of the Last Minstrel"
*
******************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#define SQLPARSERGLOBALS_NADEFAULTS
#include "Platform.h"
#include "NAWinNT.h"
#include "Sqlcomp.h"
#include "AllItemExpr.h"
#include "AllRelExpr.h"
#include "BindWA.h"
#include "ComOperators.h"
#include "ComTransInfo.h"
#include "ComLocationNames.h"
#include "ControlDB.h"
#include "Debug.h"
#include "ex_error.h"
#include "GroupAttr.h"
#include "ParNameLocList.h"
#include "parser.h"
#include "Rel3GL.h"
#include "RelDCL.h"
#include "RelPackedRows.h"
#include "RelSequence.h"
#include "ShowSchema.h" // GetControlDefaults class
#include "StmtDDLAddConstraintCheck.h"
#include "StmtDDLCreateView.h"
#include "ElemDDLColRefArray.h"
#include "ElemDDLSaltOptions.h"
#include "desc.h"
#include "UdrErrors.h"
#include "SequenceGeneratorAttributes.h"
#include "wstr.h"
#include "Inlining.h"
#include "Triggers.h"
#include "TriggerDB.h"
#include "MVInfo.h"
#include "Refresh.h"
#include "ChangesTable.h"
#include "MvRefreshBuilder.h"
#include "OptHints.h"
#include "CmpStatement.h"
#include "OptimizerSimulator.h"
#include "charinfo.h"
#include "UdfDllInteraction.h"
#include "SqlParserGlobals.h" // must be last #include
#include "ItmFlowControlFunction.h"
#include "ComSchemaName.h" // for ComSchemaName
#include "ItemSample.h"
#include "NAExecTrans.h"
#include "HDFSHook.h"
#include "CmpSeabaseDDL.h"
#include "ComUser.h"
#include "ComSqlId.h"
#include "PrivMgrCommands.h"
#include "PrivMgrComponentPrivileges.h"
#include "PrivMgrDefs.h"
#include "PrivMgrMD.h"
#define SLASH_C '/'
NAWchar *SQLTEXTW();
// -----------------------------------------------------------------------
// external declarations
// -----------------------------------------------------------------------
//
// -----------------------------------------------------------------------
// static functions
// -----------------------------------------------------------------------
#ifdef NDEBUG
THREAD_P NABoolean GU_DEBUG = FALSE;
#else
THREAD_P NABoolean GU_DEBUG;
#endif
static void GU_DEBUG_Display(BindWA *bindWA, GenericUpdate *gu,
const char *text,
RelExpr *reDown = NULL,
NABoolean preEndl = FALSE,
NABoolean postEndl = FALSE)
{
#ifndef NDEBUG
if (!GU_DEBUG)
return;
// LCOV_EXCL_START - dpm
if (preEndl) cerr << endl;
cerr << "---" << endl;
if (gu->getTableDesc()) {
NAString tmp;
ValueIdList vtmp(gu->getTableDesc()->getColumnList());
vtmp.unparse(tmp);
cerr << gu->getUpdTableNameText() << " this>td(" << text << ") "
<< gu->getTableDesc()->getCorrNameObj().getExposedNameAsAnsiString()
<< " " << tmp << endl;
}
RETDesc *rd = gu->getRETDesc();
if (rd) {
cerr << gu->getUpdTableNameText() << " this>grd(" << text << ") " << flush;
rd->display();
}
if (reDown) RETDesc::displayDown(reDown);
if (bindWA->getCurrentScope()->getRETDesc() &&
bindWA->getCurrentScope()->getRETDesc() != rd) {
cerr << gu->getUpdTableNameText() << " bwa>cs>grd(" << text << ") " <<flush;
bindWA->getCurrentScope()->getRETDesc()->display();
}
// LCOV_EXCL_STOP
if (postEndl) cerr << endl;
#endif
} // GU_DEBUG_Display()
#pragma nowarn(770) // warning elimination
static RETDesc *bindRowValues(BindWA *bindWA,
ItemExpr *exprTree,
ValueIdList &vidList,
RelExpr *parent,
NABoolean inTrueRoot)
{
// Before we convert the row value expressions into a ValueIdList, save the
// original value expression root nodes in an ItemExprList.
//
ItemExprList exprList(exprTree, bindWA->wHeap());
//
// Bind the row value expressions and create a ValueIdList.
//
exprTree->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, parent);
if (bindWA->errStatus()) return NULL;
// Set up context flags.
// We are in a subquery if the previous scope's flag is set, note.
//
BindScope *currScope = bindWA->getCurrentScope();
BindScope *prevScope = bindWA->getPreviousScope(currScope);
NABoolean inSelectList = currScope->context()->inSelectList();
NABoolean inInsert = currScope->context()->inInsert();
NABoolean inSubquery = FALSE;
if (prevScope)
inSubquery = prevScope->context()->inSubquery();
// See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the
// default is ON, or if the default is SYSTEM and ALLOW_UDF is ON.
NABoolean udfSubqInAggGrby_Enabled = FALSE;
DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS);
if ((udfSubqTok == DF_ON) ||
(udfSubqTok == DF_SYSTEM))
udfSubqInAggGrby_Enabled = TRUE;
// See if ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST is enabled. It is
// enabled if the default is ON, or if the default is SYSTEM and
// ALLOW_UDF is ON.
NABoolean allowMultiDegSubqInSelect_Enabled = FALSE;
DefaultToken allowMultiDegreeTok =
CmpCommon::getDefault(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST);
if ((allowMultiDegreeTok == DF_ON) ||
(allowMultiDegreeTok == DF_SYSTEM))
allowMultiDegSubqInSelect_Enabled = TRUE;
//
// Create the result table.
// If a row value expression is not a column reference and does not have
// a rename AS clause, the column is an unnamed expression.
//
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
CollIndex j = 0;
for (CollIndex i = 0; i < exprList.entries(); i++, j++)
{
ItemExpr *itemExpr = (ItemExpr *) exprList[i];
ValueId valId = itemExpr->getValueId();
ValueId boundValId = vidList[j];
CMPASSERT(boundValId != NULL_VALUE_ID);
if (inSelectList && inTrueRoot &&
(boundValId.getType().getTypeQualifier() == NA_UNKNOWN_TYPE)&&
(boundValId.getItemExpr()->getOperatorType() == ITM_CONSTANT))
{
ConstValue * constItemExpr = (ConstValue*) boundValId.getItemExpr();
if (constItemExpr->isNull())
boundValId.coerceType(NA_NUMERIC_TYPE) ;
}
switch (itemExpr->getOperatorType())
{
case ITM_REFERENCE: {
ColReference *colRef = (ColReference *) itemExpr;
const ColRefName &colRefName = colRef->getColRefNameObj();
CMPASSERT(valId != NULL_VALUE_ID || colRefName.isStar());
if (colRefName.isStar()) {
const ColumnDescList *star = colRef->getStarExpansion();
CMPASSERT(star != NULL);
const ColumnDescList &starExpansion = *star;
CMPASSERT(starExpansion.entries() > 0); // ColRef::bind chked this alrdy
CMPASSERT(inSelectList);
resultTable->addColumns(bindWA, starExpansion);
j += starExpansion.entries() - 1;
} // isStar
else {
// Do another xcnm lookup so the column we add to our resultTable
// will have its CorrName object correct
// (e.g., in "SELECT TL.B,* FROM TA TL,TA TR ORDER BY B;"
// colref TL.B will resolve to TL.B, not CAT.SCH.TL.B)
// and its heading (Genesis 10-980126-5495).
BindScope *bindScope;
ColumnNameMap *xcnmEntry = bindWA->findColumn(colRefName, bindScope);
if (NOT xcnmEntry) // ## I don't recall when this case occurs...
resultTable->addColumn(bindWA,
colRefName,
boundValId,
colRef->getTargetColumnClass());
else
resultTable->addColumn(bindWA,
xcnmEntry->getColRefNameObj(),
boundValId,
colRef->getTargetColumnClass(), // MV --
xcnmEntry->getColumnDesc()->getHeading());
}
break;
}
case ITM_RENAME_COL:
{
RenameCol *renameCol = (RenameCol *) itemExpr;
const ColRefName &colRefName = *renameCol->getNewColRefName();
CMPASSERT(NOT colRefName.isStar());
const char * heading = NULL;
// if this rename was for a BLOB/CLOB column from JDBC, return
// the heading of the child base column. This is needed for JDBC
// as it uses the heading to figure out if the column is a LOB
// column.
if (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON)
{
ItemExpr * childExpr = itemExpr->child(0)->castToItemExpr();
if (childExpr->getOperatorType() == ITM_BASECOLUMN)
{
heading = ((BaseColumn *)childExpr)->getNAColumn()->getHeading();
if (heading)
{
if ((strcmp(heading, "JDBC_BLOB_COLUMN -") != 0) &&
(strcmp(heading, "JDBC_CLOB_COLUMN -") != 0))
heading = NULL;
}
}
}
// No heading is passed here (whole point of SQL derived-column is rename)
// unless it is a jdbc blob/clob heading.
resultTable->addColumn(bindWA,
colRefName,
boundValId,
renameCol->getTargetColumnClass(),
heading);
break;
}
case ITM_ROW_SUBQUERY:
case ITM_USER_DEF_FUNCTION: {
// Deal with multi Valued User Defined Functions or Subqueries with
// degree > 1.
//
// In order to have the correct degree during the bind phase,
// since we don't have all the information until after the transform
// phase, we need to put entries into the RETDesc early.
//
// Say you have a query like this:
// select mvf(a,b) from t1;
// and assume mvf outputs 2 values.
//
// at bind time, the select list will only have 1 entry in it, namely
// the ITM_USER_DEF_FUNCTION.
// Since we do degree checking at bind time, we need to know now that
// mvf() actually produces 2 values.
//
// So what we do here, is that we substitute the original
// ITM_USER_DEF_FUNCTION with ValueIdProxies. One for each output of
// the original function. The selectList of the RelRoot as well as the
// retDESC are updated with the additional elements.
//
// Similarly if we have a subquery like this:
//
// select (select max(a),max(b) from t2), a from t1;
//
// we will wrap the subquery in a ValeIdProxy representing the
// subquery from a transformation point of view, but representing
// max(a) from an output point of view. A second ValueIdProxy will be
// added for max(b), so the select list of the outer query would look
// like this:
//
// [ ValueIdProxy(Subq:max(a)), ValueIdProxy(Subq:max(b)), a ]
//
// instead of just
//
// [ Subq, a ]
//
// like we are used to.
//
// At transform time the valueIdProxies, will disappear and we will
// transform the UDF/Subquery carried inside the valueIdProxy
// marked to be transformed. Some might hang around until Normalization.
// Only the ValueIdProxy representing the first output will be marked
// to be transformed, so we only transform the UDF/Subquery once.
//
// Similarly, we update the outer query's retDESC.
NABoolean isSubquery =
(itemExpr->getOperatorType() == ITM_ROW_SUBQUERY) ?
TRUE : FALSE;
NAColumnArray outCols;
ValueIdList outColVids;
CollIndex currIndex = j;
if (isSubquery)
{
Subquery * subq = (Subquery *) itemExpr;
const RETDesc *retDesc = subq->getSubquery()->getRETDesc();
if( retDesc )
{
retDesc->getColumnList()->getValueIdList(outColVids);
}
}
else
{
UDFunction * udf = (UDFunction *) itemExpr;
CMPASSERT(udf->getRoutineDesc());
const RoutineDesc *rDesc = udf->getRoutineDesc();
// Get the outputs of this UDF, these are as defined in metadata
// including names etc.
outCols = rDesc->getEffectiveNARoutine()->getOutParams();
outColVids = rDesc->getOutputColumnList();
}
if ( (outColVids.entries() == 1) ||
( isSubquery &&
(!allowMultiDegSubqInSelect_Enabled)
))
{
// Do exactly what we used to do if the degree is 1.
// or we have disallowed subqueries of degree > 1.
if (isSubquery)
{
// ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c.
ColRefName colRefName;
resultTable->addColumn(bindWA, colRefName, boundValId);
}
else
{
NAColumn *col = outCols[0];
const char * heading = col->getHeading();
ColRefName colRefName( col->getColName());
ColumnClass colClass( col->getColumnClass());
resultTable->addColumn(bindWA,
colRefName,
boundValId,
colClass,
heading);
}
break;
}
// Wrap all the outputs with a ValueIdProxy
// so that we can deal with multiple outputs
// If we didn't have a RETDesc or a RoutineDesc, outColVids
// will be empty and we don't do anything.
// Also we do not need to worry about recursing through the
// RETDesc entries as the call to convertToValueIdList() above
// did that already.
for (CollIndex idx = 0; idx < outColVids.entries(); idx++)
{
NAColumn *col;
NABoolean isRealOrRenameColumn =
(outColVids[idx].getItemExpr()->getOperatorType() ==
ITM_BASECOLUMN) ||
(outColVids[idx].getItemExpr()->getOperatorType() ==
ITM_RENAME_COL) ||
!isSubquery ? TRUE : FALSE;
if (isSubquery)
{
col = ((NAColumn *) outColVids[idx].getItemExpr());
}
else
{
col = ((NAColumn *) outCols[idx]);
}
const char * heading = isRealOrRenameColumn ?
col->getHeading() : "";
ColRefName colRefName( isRealOrRenameColumn ?
col->getColName() : "");
ColumnClass colClass( isRealOrRenameColumn ?
col->getColumnClass() : USER_COLUMN);
// We are wrapping the MVF/Subquery and its additional outputs
// with a ValueIdProxy. This way we don't end up flattening or
// expanding the outputs of the MVF multiple times.
// The valueId of the RoutineParam corresponding to the
// metadata column is used for the output valueId.
// So if you had a query like this:
//
// select swap2(a,b) from t1;
//
// and swap2() returns 2 outputs (basically the inputs swapped)
//
// The new select list for the query would be:
//
// 1: ValueIdProxy with the derivedNode being swap2, and output
// valueId containing the first output parameter of swap2.
// Also the transformDerivedFrom flag would be set
// 2: ValueIdProxy with the derivedNode being swap2, and output
// valueId containing the second output parameter of swap2.
//
// These ValueIdProxy nodes will go away at transform time..
ValueIdProxy *proxyOutput = new (CmpCommon::statementHeap())
ValueIdProxy( boundValId,
outColVids[idx],
idx);
// The type of the proxy is the same as the output valueId associated
// with it.
proxyOutput = (ValueIdProxy *) proxyOutput->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
// Make sure we transform the MVF
if (idx == 0) proxyOutput->setTransformChild(TRUE);
if (!isSubquery || isRealOrRenameColumn)
{
resultTable->addColumn(bindWA,
colRefName,
proxyOutput->getValueId(),
colClass,
heading);
}
else
{
resultTable->addColumn(bindWA, colRefName,
proxyOutput->getValueId());
}
if (idx == 0)
{
vidList.removeAt(currIndex); // we need to delete the old valueId
}
else
j++; // The first entry simply replaces the original
// Update the list with the new value.
// insertAt has the nice feature that it will push
// the residual elements to the right, so we do not need to
// manage the valueIds we haven't processed yet as long as we
// update the index (j++ above) correctly.
vidList.insertAt(currIndex++,proxyOutput->getValueId());
}
break;
}
default:
{
// ## Here we ought to manufacture a unique name per Ansi 7.9 SR 9c.
ColRefName colRefName;
resultTable->addColumn(bindWA, colRefName, boundValId);
break;
}
} // switch
} // for
// need this for static cursor declaration
cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree();
// Before we can return the result table, we need to check for the possible
// syntax error below, in which we can't use the definition of "inSubquery"
// that we calculate above. Our example case is, if we're directly below
// a GroupByAgg, then we need to look at the scope *before* the GroupByAgg
// to determine if we satisfy the error condition below. This is a problem
// with how our plan trees don't sync completely with SQL syntax.
// Here's the error case (Genesis 10-980518-0765):
//
// >> select (select distinct 1,2 from T1 t) from T1;
//
// First of all, yes, it's a really stupid query. Oh well! :-)
//
// It's pretty clear that the "1,2" is part of a "select list inside the
// subquery of a select list." However, the parser creates a GroupByAgg
// for the distinct keyword (sigh), which means that we have an
// additional scope between the scope of the SQLRecord (1,2) and the
// scope of the "TRUE" parent, the inner-select. This additional scope
// is for the GroupByAgg. So in the case of a GroupByAgg (and possibly
// another case will arise later ...?), we need to look at the
// GroupByAgg's parent to determine if we satisfy this error condition.
//
// To recap: To handle this one (stupid) case we've added a ton of
// comments and code here and in GroupByAgg::bindNode(), plus created
// the new functions/members BindWA::getSubqueryScope(), and
// BindContext::lookAboveToDecideSubquery_/(). Wonderful!
//
if (prevScope) {
BindScope *subQScope = bindWA->getSubqueryScope(currScope);
//
// subQScope should be non-NULL when prevScope is non-NULL
//
CMPASSERT(subQScope);
NABoolean inSubqueryInSelectList = subQScope->context()->inSubquery() &&
subQScope->context()->inSelectList();
NABoolean inSubqueryInGroupByClause = subQScope->context()->inSubquery() &&
subQScope->context()->inGroupByClause() &&
(CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS) == DF_ON);
//10-060602-6930 Begin
//Added a check to not enter this condition when we are in bindView scope
if (inSelectList &&
(inSubqueryInSelectList ||
inSubqueryInGroupByClause) &&
!bindWA->inViewExpansion()) {
//10-060602-6930 End
// We now can check for the syntax error that we've done so much work
// above (and in GroupByAgg::bindNode(), BindWA.h & BindWA.cpp)
// to detect:
if ((j > 1) &&
(!allowMultiDegSubqInSelect_Enabled) ) {
// 4019 The select list of a subquery in a select list must be scalar
*CmpCommon::diags() << DgSqlCode(-4019);
bindWA->setErrStatus();
return NULL;
}
}
} // prevScope
return resultTable;
} // bindRowValues()
#pragma warn(770) // warning elimination
// Bind a constraint (MP Check Constraint).
// Returns NULL if error in constraint *OR* we can safely ignore the constraint
// (e.g., a NOT NULL NONDROPPABLE constraint); caller must check bindWA errsts.
//
static ItemExpr* bindCheckConstraint(
BindWA *bindWA,
CheckConstraint *constraint,
const NATable *naTable,
NABoolean catmanCollectUsages = FALSE,
ItemExpr *viewCheckPred = NULL)
{
ItemExpr *constraintPred = NULL;
if (viewCheckPred) {
// view WITH CHECK OPTION: the view's where-clause was already parsed
// in bindView
CMPASSERT(constraint->getConstraintText().isNull()); // sanity check
constraintPred = viewCheckPred;
}
else {
Parser parser(bindWA->currentCmpContext());
constraintPred = parser.getItemExprTree(constraint->getConstraintText().data(),
constraint->getConstraintText().length(),
CharInfo::UTF8 // ComGetNameInterfaceCharSet()
);
}
if (constraintPred) {
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
if (!catmanCollectUsages ||
!bindWA->getUsageParseNodePtr() ||
bindWA->getUsageParseNodePtr()->getOperatorType() == DDL_CREATE_VIEW)
bindWA->setNameLocListPtr(NULL);
CMPASSERT(!bindWA->getCurrentScope()->context()->inCheckConstraint());
bindWA->getCurrentScope()->context()->inCheckConstraint() = constraint;
constraintPred->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->getCurrentScope()->context()->inCheckConstraint() = NULL;
if (bindWA->errStatus()) {
delete constraintPred;
constraintPred = NULL;
}
}
// A NOT NULL constraint on a single column which never allows nulls
// (has no null indicator bytes)
// -- i.e., the common case of a column declared NOT NULL NONDROPPABLE --
// does not need to be separately enforced as a constraint, because
// Executor will raise a numeric-overflow error if someone tries to
// put a NULL into such a column.
//
// So we don't need to put this constraint into the list, but we do need
// to save its name, for run-time error diags.
//
// ##To be done:
// ## GenRelUpdate DP2Insert/Update: for each col in newRecExpr(),
// ## if getNotNullViolationCode(), then
// ## save the SqlCode and the getNotNullConstraintName()...asAnsiString()
// ## and some column identifier (pos or offset) in some per-query struct
// ## Executor: if error 8411, if truly a NULL violation, look up that column
// ## in the nnconstraint struct and populate diags with the info there.
//
if (constraintPred) {
ItemExprList nncols(bindWA->wHeap());
constraintPred->getColumnsIfThisIsISNOTNULL(nncols);
for (CollIndex i = 0; i < nncols.entries(); i++) {
NAColumn *nacol = nncols[i]->getValueId().getNAColumn();
if (!nacol->getType()->supportsSQLnullPhysical()) {
nacol->setNotNullNondroppable(constraint);
//
// DO *NOT* do: delete constraintPred;
// -- it deletes a whole tree of stuff referenced elsewhere!
//
constraintPred = NULL;
} else {
// Leaving the column's type's supportsSQLnullPhysical() as is (TRUE),
// set its supportsSQLnullLogical() to FALSE,
// for the Transform phase.
nacol->mutateType()->setNullable(TRUE/*supports physical nulls*/,
FALSE/*but not logical nulls */);
}
}
}
else {
*CmpCommon::diags() << DgSqlCode(-4025)
<< DgConstraintName(ToAnsiIdentifier(constraint->getConstraintName().getObjectName()))
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
return constraintPred;
} // bindCheckConstraint()
// LCOV_EXCL_START - cnu
static ItemExpr *intersectColumns(const RETDesc &leftTable,
const RETDesc &rightTable,
BindWA* bindWA)
{
ItemExpr *predicate = NULL;
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr();
ItemExpr *rightExpr = rightTable.getValueId(i).getItemExpr();
BiRelat *compare = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, leftExpr, rightExpr);
if (predicate)
predicate = new (bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare);
else
predicate = compare;
}
// Binding this predicate must be done in caller's context/scope, not here...
return predicate;
} // intersectColumns()
// LCOV_EXCL_STOP
static ItemExpr *joinCommonColumns(const RelExpr *const leftRelExpr,
const RelExpr *const rightRelExpr,
BindWA* bindWA)
{
const RETDesc &leftTable = *leftRelExpr->getRETDesc();
const RETDesc &rightTable = *rightRelExpr->getRETDesc();
//
// Find the common column names between two tables and create a predicate
// that joins the columns. For example, if tables T1 and T2 have common
// column names A and B, return the predicate T1.A = T2.A AND T1.B = T2.B.
// The checking for ambiguous common columns will be done when they are
// are coalesced for the output list.
//
ItemExpr *predicate = NULL;
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ColRefName simpleColRefName(leftTable.getColRefNameObj(i).getColName()); //
if (NOT simpleColRefName.isEmpty()) { //
ColumnNameMap *commonCol = rightTable.findColumn(simpleColRefName); //
if (commonCol) { //
ItemExpr *leftExpr = leftTable.getValueId(i).getItemExpr();
ItemExpr *rightExpr = commonCol->getValueId().getItemExpr(); //
bindWA->markAsReferencedColumn(leftExpr);
bindWA->markAsReferencedColumn(rightExpr);
BiRelat *compare = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, leftExpr, rightExpr);
if (predicate)
predicate = new(bindWA->wHeap()) BiLogic(ITM_AND, predicate, compare);
else
predicate = compare;
}
}
}
// Binding this predicate is being done in caller, Join::bindNode()
return predicate;
} // joinCommonColumns()
// Functions findNonCommonColumns() and coalesceCommonColumns()
//
// These create the column descriptors for the result of a natural join.
// A natural join is equivalent to
//
// SELECT SLCC, SLT1, SLT2 FROM T1, T2
//
// where SLCC represents the list of coalesced common columns of T1 and T2,
// SLT1 represents the list of non-common columns of T1, and
// SLT2 represents the list of non-common columns of T2.
//
// A coalesced common column C is equivalent to
//
// COALESCE (T1.C, T2.C) AS C -- i.e. there is no table name; CorrName is ""
//
// where COALESCE (T1.C, T2.C) is equivalent to
//
// CASE WHEN T1.C IS NOT NULL THEN T1.C ELSE T2.C END
//
// Function findNonCommonColumns(), on the first call, coalesces common
// columns into the resultTable, and collects non-common columns.
// On the second call it continues to collect non-common columns.
//
// Function coalesceCommonColumns() adds SLCC, SLT1, SLT2 to the
// resultTable in the proper order.
//
static void findNonCommonColumns(BindWA *bindWA,
OperatorTypeEnum joinType,
const RETDesc &sourceTable,
const RETDesc &targetTable,
RETDesc &resultTable,
ColumnDescList &nonCommonCols)
{
// Used for ANSI 6.4 SR 3aii below.
CorrName implemDependCorr(bindWA->fabricateUniqueName(), TRUE);
//
for (CollIndex i = 0; i < sourceTable.getDegree(); i++) {
const ColRefName &sourceColRefName = sourceTable.getColRefNameObj(i);
ValueId sourceId = sourceTable.getValueId(i);
ColRefName simpleColRefName(sourceColRefName.getColName());
//
// If a column is an unnamed expression, it is a non-common column.
//
if (simpleColRefName.isEmpty())
nonCommonCols.insert(new (bindWA->wHeap())
ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap()));
else {
ColumnNameMap *commonCol = targetTable.findColumn(simpleColRefName);
//
// If the named column does not have a corresponding column in the
// target table, it is a non-common column.
//
if (NOT commonCol)
nonCommonCols.insert(new (bindWA->wHeap())
ColumnDesc(sourceColRefName, sourceId, NULL, bindWA->wHeap()));
//
// If the target table has more than one corresponding column, error.
//
else if (commonCol->isDuplicate()) {
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
targetTable.getTableList(xtnmList, &fmtdList); // Tables in the RETDesc
*CmpCommon::diags() << DgSqlCode(-4004)
<< DgColumnName(simpleColRefName.getColName())
<< DgTableName(commonCol->getColRefNameObj().getCorrNameObj().
getExposedNameAsAnsiString())
<< DgString0(fmtdList)
<< DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString());
bindWA->setErrStatus();
return;
}
else if (joinType != ITM_NO_OP) {
//
// Coalesce the common columns and add them to the result table.
//
ValueId resultId;
switch(joinType) {
case REL_JOIN:
case REL_LEFT_JOIN:
resultId = sourceId;
break;
case REL_RIGHT_JOIN:
resultId = commonCol->getValueId();
break;
default: {
ItemExpr *sourceExpr = sourceId.getItemExpr();
ItemExpr *targetExpr = commonCol->getValueId().getItemExpr();
UnLogic *test = new (bindWA->wHeap())
UnLogic(ITM_IS_NULL, sourceExpr);
ItemExpr *coalesce = new (bindWA->wHeap())
Case(NULL, new (bindWA->wHeap())
IfThenElse(test,
targetExpr,
sourceExpr));
coalesce = coalesce->bindNode(bindWA)->castToItemExpr();
if (bindWA->errStatus()) {
delete test;
delete coalesce;
return;
}
resultId = coalesce->getValueId();
break;
} // default case (braces required since vars are initialized here)
} // switch
//
// ANSI 6.4 SR 3aii:
// We've fabricated a unique implementation-dependent CorrName
// outside the loop; the common columns have this basically
// invisible CorrName, the point of which seems to be that
// select * from
// ta natural join tb
// join -- not natural!
// (ta tx natural join tb ty)
// on 1=1;
// should not generate an ambiguous column reference error
// from the star-expansion. So according to ANSI,
// the two natural joins produce, respectively,
// fab1.slcc, ta.slt1, tb.slt2
// fab2.slcc, tx.slt1, ty.slt2
// so the join produces
// fab1.slcc, ta.slt1, tb.slt2, fab2.slcc, tx.slt1, ty.slt2
// i.e. the two SLCC's are unambiguous.
//
ColRefName implemDepend(simpleColRefName.getColName(),implemDependCorr);
resultTable.addColumn(bindWA, implemDepend, resultId);
} // coalesce SLCC into resultTable
} // named column
} // for
} // findNonCommonColumns()
// Comments for this function can be found above the preceding function.
static void coalesceCommonColumns(BindWA *bindWA,
OperatorTypeEnum joinType,
const RETDesc &leftTable,
const RETDesc &rightTable,
RETDesc &resultTable)
{
ColumnDescList nonCommonCols(bindWA->wHeap());
// non-common columns of the left table
//
// Coalesce the common column names of the left and right tables and add
// them to the result table.
// Collect the non-common column names from the left.
//
findNonCommonColumns(bindWA,
joinType,
leftTable,
rightTable,
resultTable,
nonCommonCols);
if (bindWA->errStatus()) return;
//
// Collect the non-common column names from the right.
//
RETDesc irrelevantOnThisCall;
findNonCommonColumns(bindWA,
ITM_NO_OP, // do not add SLCC to resultTable
rightTable,
leftTable,
irrelevantOnThisCall,
nonCommonCols);
if (bindWA->errStatus()) return;
//
// Add the non-common columns from the left and right to the result table.
//
resultTable.addColumns(bindWA, nonCommonCols);
nonCommonCols.clearAndDestroy();
//
// Add the system columns from the left and right to the result table.
//
resultTable.addColumns(bindWA, *leftTable.getSystemColumnList(), SYSTEM_COLUMN);
resultTable.addColumns(bindWA, *rightTable.getSystemColumnList(), SYSTEM_COLUMN);
} // coalesceCommonColumns()
// For Catalog Manager, this function:
// 1) Fixes up the name location list to help with computing of the view text,
// check constraint search condition text, etc.
// 2) Collects the table (base table, view, etc.) usages information for
// view definitions, check constraint definitions, etc.
//
// ** Some of this could be implemented, perhaps more simply,
// ** using BindWA::viewCount() and BindWA::tableViewUsageList().
//
static void BindUtil_CollectTableUsageInfo(BindWA *bindWA,
const CorrName& corrName)
{
// Task (1)
//
ParNameLocList *pNameLocList = bindWA->getNameLocListPtr();
if (pNameLocList)
{
ParNameLoc * pNameLoc
= pNameLocList->getNameLocPtr(corrName.getNamePosition());
if (pNameLoc)
{
if (NOT pNameLoc->getExpandedName(FALSE).isNull())
CMPASSERT(pNameLoc->getExpandedName() ==
corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString());
pNameLoc->setExpandedName(
corrName.getQualifiedNameObj().getQualifiedNameAsAnsiString());
}
//
// Task (2)
//
ExprNode *pUsageParseNode = bindWA->getUsageParseNodePtr();
if (pUsageParseNode)
{
if (pUsageParseNode->getOperatorType() == DDL_CREATE_VIEW)
{
StmtDDLCreateView &cvpn = *pUsageParseNode->castToElemDDLNode()
->castToStmtDDLCreateView();
ParTableUsageList &vtul = cvpn.getViewUsages().getViewTableUsageList();
vtul.insert(corrName.getExtendedQualNameObj());
}
else if (pUsageParseNode->getOperatorType()
== DDL_ALTER_TABLE_ADD_CONSTRAINT_CHECK)
{
StmtDDLAddConstraintCheck &node = *pUsageParseNode->castToElemDDLNode()
->castToStmtDDLAddConstraintCheck();
ParTableUsageList &tul = node.getTableUsageList();
tul.insert(corrName.getQualifiedNameObj());
}
}
} // if (pNameLocList)
} // BindUtil_CollectTableUsageInfo()
void castComputedColumnsToAnsiTypes(BindWA *bindWA,
RETDesc *rd,
ValueIdList &compExpr)
{
const ColumnDescList &cols = *rd->getColumnList();
CollIndex i = cols.entries();
CMPASSERT(i == compExpr.entries());
while (i--) {
ColumnDesc *col = cols[i];
if (col->getValueId().getType().getTypeQualifier() == NA_ROWSET_TYPE) {
return;
}
NAType *naType = &(NAType&)col->getValueId().getType();
//
// Note: the unsupported and DATETIME cases are mutually exclusive with the LARGEDEC case below.
//
if (!naType->isSupportedType()) {
// Unsupported types are displayed as strings of '#' to their display length
ItemExpr *theRepeat =
new (bindWA->wHeap()) Repeat(new (bindWA->wHeap()) SystemLiteral("#"),
new (bindWA->wHeap()) SystemLiteral(
naType->getDisplayLength(
naType->getFSDatatype(),
0,
naType->getPrecision(),
naType->getScale(),
0)));
theRepeat = theRepeat->bindNode(bindWA);
col->setValueId(theRepeat->getValueId());
compExpr[i] = theRepeat->getValueId();
}
else if ((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) &&
(NOT bindWA->inViewDefinition()) &&
(NOT bindWA->inMVDefinition()) &&
(NOT bindWA->inCTAS()) &&
(naType->getTypeQualifier()== NA_DATETIME_TYPE &&
((const DatetimeType *)naType)->getSubtype() ==
DatetimeType::SUBTYPE_SQLDate) &&
(! CmpCommon::context()->getSqlmxRegress()) &&
(strcmp(ActiveSchemaDB()->getDefaults().getValue(OUTPUT_DATE_FORMAT),
"ANSI") != 0))
{ // Special1 DATE, return as YY/MM/DD
ItemExpr * newChild =
new (bindWA->wHeap())
Format(col->getValueId().getItemExpr(), "YY/MM/DD", FALSE);
newChild = newChild->bindNode(bindWA);
col->setValueId(newChild->getValueId());
compExpr[i] = newChild->getValueId();
}
if ((naType->getFSDatatype() == REC_BIN64_UNSIGNED) &&
(CmpCommon::getDefault(TRAF_LARGEINT_UNSIGNED_IO) == DF_OFF) &&
(NOT bindWA->inCTAS()) &&
(NOT bindWA->inViewDefinition()))
{
NumericType *nTyp = (NumericType *)naType;
ItemExpr * cast = new (bindWA->wHeap())
Cast(col->getValueId().getItemExpr(),
new (bindWA->wHeap())
SQLBigNum(MAX_HARDWARE_SUPPORTED_UNSIGNED_NUMERIC_PRECISION,
nTyp->getScale(),
FALSE,
FALSE,
naType->supportsSQLnull(),
NULL));
cast = cast->bindNode(bindWA);
if (bindWA->errStatus())
return;
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
naType = (NAType*)&cast->getValueId().getType();
}
if ((naType->getFSDatatype() == REC_BOOLEAN) &&
(CmpCommon::getDefault(TRAF_BOOLEAN_IO) == DF_OFF) &&
(NOT bindWA->inCTAS()) &&
(NOT bindWA->inViewDefinition()))
{
NumericType *nTyp = (NumericType *)naType;
ItemExpr * cast = new (bindWA->wHeap())
Cast(col->getValueId().getItemExpr(),
new (bindWA->wHeap())
SQLChar(SQL_BOOLEAN_DISPLAY_SIZE, naType->supportsSQLnull()));
cast = cast->bindNode(bindWA);
if (bindWA->errStatus())
return;
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
naType = (NAType*)&cast->getValueId().getType();
}
// if OFF, return tinyint as smallint.
// This is needed until all callers/drivers have full support to
// handle IO of tinyint datatypes.
if (((naType->getFSDatatype() == REC_BIN8_SIGNED) ||
(naType->getFSDatatype() == REC_BIN8_UNSIGNED)) &&
(NOT bindWA->inCTAS()) &&
(NOT bindWA->inViewDefinition()) &&
((CmpCommon::getDefault(TRAF_TINYINT_SUPPORT) == DF_OFF) ||
(CmpCommon::getDefault(TRAF_TINYINT_RETURN_VALUES) == DF_OFF)))
{
NumericType *srcNum = (NumericType*)naType;
NumericType * newType;
if (srcNum->getScale() == 0)
newType = new (bindWA->wHeap())
SQLSmall(NOT srcNum->isUnsigned(),
naType->supportsSQLnull());
else
newType = new (bindWA->wHeap())
SQLNumeric(sizeof(short), srcNum->getPrecision(),
srcNum->getScale(),
NOT srcNum->isUnsigned(),
naType->supportsSQLnull());
ItemExpr * cast = new (bindWA->wHeap())
Cast(col->getValueId().getItemExpr(), newType);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus())
return;
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
}
else if (naType->getTypeQualifier() == NA_NUMERIC_TYPE &&
!((NumericType &)col->getValueId().getType()).binaryPrecision()) {
NumericType *nTyp = (NumericType *)naType;
ItemExpr * ie = col->getValueId().getItemExpr();
NAType *newTyp = NULL;
Lng32 newPrec;
Lng32 newScale;
Lng32 oflow = -1;
Lng32 bignumOflow = -1;
NABoolean bignumIO = FALSE;
if (CmpCommon::getDefault(BIGNUM_IO) == DF_ON)
bignumIO = TRUE; // explicitely set to ON
else if (CmpCommon::getDefault(BIGNUM_IO) == DF_OFF)
bignumIO = FALSE; // explicitely set to OFF
else if (CmpCommon::getDefault(BIGNUM_IO) == DF_SYSTEM)
{
if ((nTyp->isBigNum()) &&
(((SQLBigNum*)nTyp)->isARealBigNum()))
bignumIO = TRUE;
}
if (CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED) ==
MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION)
bignumIO = FALSE;
if (bignumIO)
bignumOflow = nTyp->getPrecision() -
(Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED);
else
{
if (nTyp->isSigned())
oflow = nTyp->getPrecision() - MAX_HARDWARE_SUPPORTED_SIGNED_NUMERIC_PRECISION;
else
oflow = nTyp->getPrecision() - MAX_HARDWARE_SUPPORTED_UNSIGNED_NUMERIC_PRECISION;
}
if ((bignumOflow > 0) || (oflow > 0))
{
if (bignumOflow > 0) {
newPrec =
(Lng32)CmpCommon::getDefaultNumeric(MAX_NUMERIC_PRECISION_ALLOWED);
Lng32 orgMagnitude = nTyp->getPrecision() - nTyp->getScale();
// set the newScale
// IF there is overflow in magnitude set the scale to 0.
// ELSE set the accomodate the magnitude part and truncate the scale
newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ;
if (newScale > newPrec)
{
*CmpCommon::diags() << DgSqlCode(-3015)
<< DgInt0(newScale) << DgInt1(newPrec);
bindWA->setErrStatus();
return;
}
newTyp = new (bindWA->wHeap())
SQLBigNum(newPrec,
newScale,
((SQLBigNum &)col->getValueId().getType()).isARealBigNum(),
nTyp->isSigned(),
nTyp->supportsSQLnull(),
NULL);
}
else if (oflow > 0) {
// If it's not a computed expr, but a column w/ a legal type, re-loop
if (col->getValueId().getNAColumn(TRUE/*don't assert*/)) {
//CMPASSERT(!nTyp->isInternalType());
//continue;
}
OperatorTypeEnum op = ie->origOpType();
CMPASSERT(op != NO_OPERATOR_TYPE && // Init'd correctly?
op != ITM_RENAME_COL && // Expect these to have
op != ITM_REFERENCE); // been bound, vanished.
ItemExpr *ie2 = ie;
while (op == ITM_INSTANTIATE_NULL)
{
ie2 = ie2->child(0).getPtr();
op = ie2->origOpType();
}
// ANSI 6.5 SR 7 - 9: aggregates must be exact if column is exact.
newPrec = MAX_NUMERIC_PRECISION;
Lng32 orgMagnitude = (nTyp->getMagnitude() + 9) / 10;
// set the newScale
// IF there is overflow in magnitude set the scale to 0.
// ELSE set the accomodate the magnitude part and truncate the scale
newScale = (orgMagnitude >= newPrec) ? 0 : newPrec - orgMagnitude ;
// Based on the CQD set the scale to MIN value.
// CQD specifies the MIN scale that has to be preserved in case
// of overflow.
NADefaults &defs = ActiveSchemaDB()->getDefaults();
Lng32 minScale = defs.getAsLong(PRESERVE_MIN_SCALE);
newScale = MAXOF(minScale, newScale);
if (op == ITM_SUM || op == ITM_AVG) {
// AVG = DIVIDE( SUM(), COUNT() )
ItemExpr *tmp = (op == ITM_SUM) ?
ie2 : ie2->child(0).getPtr();
//
// Now that we support OLAP functions, this may be
// a pointer to an ITM_NOTCOVERED node. If so, we
// need to check its child(0) node rather than
// the ITM_NOTCOVERED node.
//
if (tmp->getOperatorType() == ITM_NOTCOVERED )
tmp = (Aggregate *)(ItemExpr *)tmp->child(0);
CMPASSERT(tmp->isAnAggregate());
Aggregate *sum = (Aggregate *)tmp;
ItemExpr *arg = (sum->getOriginalChild()) ?
sum->getOriginalChild() : sum->child(0).getPtr();
if (arg->getValueId() == NULL_VALUE_ID)
arg = sum->child(0).getPtr();
CMPASSERT(arg->getValueId() != NULL_VALUE_ID);
Lng32 needScale = arg->getValueId().getType().getScale();
if (needScale > newPrec)
needScale = newPrec;
if (newScale < needScale || op == ITM_SUM) // ANSI 6.5 SR 9 b + c
newScale = needScale;
}
if (newScale == 0)
newTyp = new (bindWA->wHeap())
SQLLargeInt(TRUE, // hardware only supports signed
nTyp->supportsSQLnull());
else
newTyp = new (bindWA->wHeap())
SQLNumeric(sizeof(Int64),
newPrec,
newScale,
nTyp->isSigned(),
nTyp->supportsSQLnull());
} // overflow
ItemExpr *cast = new (bindWA->wHeap())
Cast(ie, newTyp, ITM_CAST, TRUE/*checkForTrunc*/);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus()) return;
if (!col->getColRefNameObj().getColName().isNull()) {
// We get here via CREATE VIEW v AS SELECT (expr op expr) AS nam ...;
// ColumnDesc::setValueId() makes the RETDesc's XCNM inconsistent --
// but this is ok because name lookup over this XCNM doesn't happen
// after the point we've gotten to here --
// a) if caller is StmtDDLCreateView::bindNode via RelRoot::bindNode,
// there's no further lookup at all;
// b) if caller is bindView(), then thanks to the way RenameTable
// and RETDesc work, the inconsistent XCNM is not consulted
// so we don't have to worry about this issue ... (for now anyhow!)
}
col->setValueId(cast->getValueId());
compExpr[i] = cast->getValueId();
} // overflow (bignum or regular)
} // numeric
} // loop over cols in RETDesc
} // castComputedColumnsToAnsiTypes()
desc_struct *generateSpecialDesc(const CorrName& corrName)
{
desc_struct * desc = NULL;
if (corrName.getSpecialType() == ExtendedQualName::VIRTUAL_TABLE)
{
if (corrName.getQualifiedNameObj().getObjectName() == ExplainFunc::getVirtualTableNameStr())
{
ExplainFunc ef;
desc = ef.createVirtualTableDesc();
}
else if (corrName.getQualifiedNameObj().getObjectName() == StatisticsFunc::getVirtualTableNameStr())
{
StatisticsFunc sf;
desc = sf.createVirtualTableDesc();
}
else if (corrName.getQualifiedNameObj().getObjectName() == ExeUtilRegionStats::getVirtualTableNameStr())
{
ExeUtilRegionStats eudss;
desc = eudss.createVirtualTableDesc();
}
}
return desc;
} // generateSpecialDesc()
// -----------------------------------------------------------------------
// member functions for class BindWA
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
/*
static NABoolean checkForReservedObjectName(QualifiedName &inName)
{
if ((inName.getCatalogName() == "NEO") &&
(inName.getSchemaName() == "PUBLIC_ACCESS_SCHEMA") &&
(inName.getObjectName() == "_MAINTAIN_CONTROL_INFO_"))
{
return TRUE;
}
return FALSE;
}
*/
// LCOV_EXCL_STOP
NARoutine *BindWA::getNARoutine ( const QualifiedName &name )
{
NARoutineDBKey key(name, wHeap());
NARoutine * naRoutine = getSchemaDB()->getNARoutineDB()->get(this, &key);
if (!naRoutine)
{
desc_struct *udfMetadata = NULL;
CmpSeabaseDDL cmpSBD(STMTHEAP);
udfMetadata = cmpSBD.getSeabaseRoutineDesc(
name.getCatalogName(),
name.getSchemaName(),
name.getObjectName());
if (!udfMetadata)
return NULL;
NAHeap *routineHeap;
if (getSchemaDB()->getNARoutineDB()->cachingMetaData())
{
const Lng32 size = 16 * 1024; // The initial size
routineHeap = new CTXTHEAP NAHeap("NARoutine Heap", (NAHeap *)CTXTHEAP,
size);
routineHeap->setJmpBuf(CmpInternalErrorJmpBufPtr);
}
else
routineHeap=CmpCommon::statementHeap();
Int32 errors=0;
naRoutine = new (routineHeap)
NARoutine(name,
udfMetadata,
this,
errors,
routineHeap);
if ( NULL == naRoutine || errors != 0)
{
setErrStatus();
return NULL;
}
// Add NARoutine to the NARoutineDB cache.
if (getSchemaDB()->getNARoutineDB()->cachingMetaData())
getSchemaDB()->getNARoutineDB()->put(naRoutine);
}
return naRoutine;
}
NATable *BindWA::getNATable(CorrName& corrName,
NABoolean catmanCollectTableUsages, // default TRUE
desc_struct *inTableDescStruct) // default NULL
{
BindWA *bindWA = this; // for coding convenience
NATable * table = NULL;
// Search in volatile schema first. If not found, search in regular cat/sch.
NABoolean volatileTableFound = FALSE;
NAString userName;
if ((CmpCommon::context()->sqlSession()->volatileSchemaInUse()) &&
(! inTableDescStruct) &&
(corrName.getSpecialType() != ExtendedQualName::VIRTUAL_TABLE))
{
CorrName newCorrName =
CmpCommon::context()->sqlSession()->getVolatileCorrName
(corrName);
if (bindWA->errStatus())
return NULL;
//get NATable from cache
table = bindWA->getSchemaDB()->getNATableDB()->
get(newCorrName, bindWA, inTableDescStruct);
if (!table)
{
// now search in regular cat/sch.
// clear diags area.
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
}
else
{
NABoolean isValid =
CmpCommon::context()->sqlSession()->validateVolatileCorrName
(corrName);
// if this table is found in volatile schema, then
// make sure it is a volatile table.
if ((isValid) &&
(NOT table->isVolatileTable()))
{
*CmpCommon::diags() << DgSqlCode(-4190) <<
DgTableName(table->getTableName().
getQualifiedNameAsAnsiString(TRUE));
bindWA->setErrStatus();
return NULL;
}
if (isValid)
{
newCorrName.setIsVolatile(TRUE);
corrName = newCorrName;
}
else
{
// table was found in the volatile schema but it is
// not a valid volatile name.
// Look for it in regular schema.
table = NULL;
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
// remember that volatile table was found so we
// can generate a better error message later.
volatileTableFound = TRUE;
}
}
}
if (! table)
{
// Expand the table (base table, view, etc.) name with
// the default catalog and schema parts if the specified
// table name does not include these parts.
// This method will also first apply any prototype value (from a host var)
// into the corrName's qualifiedName.
//
NABoolean catNameSpecified =
(NOT corrName.getQualifiedNameObj().getCatalogName().isNull());
NABoolean schNameSpecified =
(NOT corrName.getQualifiedNameObj().getSchemaName().isNull());
// try PUBLIC SCHEMA only when no schema was specified
// and CQD PUBLIC_SCHEMA_NAME is specified
NAString publicSchema = "";
CmpCommon::getDefault(PUBLIC_SCHEMA_NAME, publicSchema, FALSE);
ComSchemaName pubSchema(publicSchema);
NAString pubSchemaIntName = "";
if ( !schNameSpecified && !pubSchema.getSchemaNamePart().isEmpty() )
{
pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName();
}
corrName.applyDefaults(bindWA, bindWA->getDefaultSchema());
if (bindWA->errStatus())
return NULL; // prototype value parse error
// override schema
if ( ( overrideSchemaEnabled() )
// not volatile table
&& ( ! volatileTableFound )
)
{
doOverrideSchema(corrName);
}
// if DEFAULT_SCHEMA_ACCESS_ONLY, can only access default and public schemas
if (corrName.getSpecialType()==ExtendedQualName::NORMAL_TABLE)
// NORMAL_TABLE also covers synonym, view and MV
{
if (violateAccessDefaultSchemaOnly(corrName.getQualifiedNameObj()))
return NULL;
}
// make sure that schema name is not a VOLATILE SCHEMA
if ((! bindWA->inDDL()) ||
((bindWA->inViewDefinition()) ||
(bindWA->inMVDefinition())))
{
if (! CmpCommon::context()->sqlSession()->validateVolatileQualifiedSchemaName
(corrName.getQualifiedNameObj()))
{
bindWA->setErrStatus();
return NULL;
}
}
//get NATable (from cache or from metadata)
table = bindWA->getSchemaDB()->getNATableDB()->
get(corrName, bindWA, inTableDescStruct);
//try the public schema if not found
if ( !table && !pubSchemaIntName.isNull() )
{
CorrName pCorrName(corrName);
pCorrName.getQualifiedNameObj().setSchemaName(pubSchemaIntName);
if ( !pubSchema.getCatalogNamePart().isEmpty() )
{
pCorrName.getQualifiedNameObj().setCatalogName(
pubSchema.getCatalogNamePart().getInternalName());
}
bindWA->resetErrStatus();
table = bindWA->getSchemaDB()->getNATableDB()->
get(pCorrName, bindWA, inTableDescStruct);
if ( !bindWA->errStatus() && table )
{ // if found in public schema, do not show previous error
// and replace corrName
CmpCommon::diags()->clear();
corrName.getQualifiedNameObj().setCatalogName(
pCorrName.getQualifiedNameObj().getCatalogName());
corrName.getQualifiedNameObj().setSchemaName(
pCorrName.getQualifiedNameObj().getSchemaName());
}
}
// move to here, after public schema try because BindUtil_CollectTableUsageInfo
// saves table info for mv definition, etc.
// Conditionally (usually) do stuff for Catalog Manager (static func above).
if (catmanCollectTableUsages)
if (corrName.getSpecialType() != ExtendedQualName::TRIGTEMP_TABLE)
BindUtil_CollectTableUsageInfo(bindWA, corrName);
if (!table)
{
if (volatileTableFound)
{
if ((CmpCommon::diags()->mainSQLCODE() == -1003) &&
(NOT catNameSpecified))
{
// the name is in true USER_NAME.VOL_TAB_NAME form
// where the USER_NAME doesn't match current name.
// Clear errors and return an appropriate message.
CmpCommon::diags()->clear();
CmpCommon::context()->sqlSession()->validateVolatileCorrName
(corrName);
bindWA->setErrStatus();
}
}
return NULL;
}
}
// if a volatile table is found, make sure that volatile schema is in
// use and volatile tables are allowed.
if ((table) && (table->isVolatileTable()))
{
// set volatile table indication in table's tablename
((QualifiedName&)(table->getTableName())).setIsVolatile(TRUE);
}
// For now, don't allow access through the Trafodion external name created for
// native HIVE or HBASE objects unless the allowExternalTables flag is set.
// allowExternalTables is set for drop table and SHOWDDL statements.
// TDB - may want to merge the Trafodion version with the native version.
if ((table) && table->isExternalTable() && (! bindWA->allowExternalTables()))
{
*CmpCommon::diags() << DgSqlCode(-4258)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// If the table is an external table and has an associated native table,
// check to see if the external table structure still matches the native table.
// If not, return an error
if ((table) && table->isExternalTable())
{
NAString adjustedName =ComConvertTrafNameToNativeName
(table->getTableName().getCatalogName(),
table->getTableName().getUnqualifiedSchemaNameAsAnsiString(),
table->getTableName().getUnqualifiedObjectNameAsAnsiString());
// Get a description of the associated Trafodion table
Int32 numNameParts = 3;
QualifiedName adjustedQualName(adjustedName,numNameParts,STMTHEAP, bindWA);
CorrName externalCorrName(adjustedQualName, STMTHEAP);
NATable *nativeNATable = bindWA->getSchemaDB()->getNATableDB()->
get(externalCorrName, bindWA, inTableDescStruct);
// Compare column lists
// TBD - return what mismatches
if ( nativeNATable && !(table->getNAColumnArray() == nativeNATable->getNAColumnArray()) &&
(NOT bindWA->externalTableDrop()))
{
*CmpCommon::diags() << DgSqlCode(-3078)
<< DgString0(adjustedName)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
nativeNATable->setRemoveFromCacheBNC(TRUE);
return NULL;
}
}
HostVar *proto = corrName.getPrototype();
if (proto && proto->isPrototypeValid())
corrName.getPrototype()->bindNode(bindWA);
// This test is not "inAnyConstraint()" because we DO want to increment
// the count for View With Check Option constraints.
if (!getCurrentScope()->context()->inTableCheckConstraint() &&
!getCurrentScope()->context()->inRIConstraint())
table->incrReferenceCount();
if (table)
OSIM_captureTableOrView(table);
return table;
} // BindWA::getNATable()
static TableDesc *createTableDesc2(BindWA *bindWA,
const NATable *naTable,
CorrName &corrName, Hint *hint)
{
// Allocate a base table descriptor.
//
TableDesc *tdesc = new (bindWA->wHeap()) TableDesc(bindWA, naTable, corrName);
// Insert the table name into the XTNM.
//
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, corrName);
if (bindWA->errStatus()) return NULL;
// For each NAColumn, allocate a BaseColumn, bind the BaseColumn, and
// add the ValueId to the TableDesc.
//
CollIndex i = 0;
for (i = 0; i < naTable->getColumnCount(); i++) {
BaseColumn *baseCol = new (bindWA->wHeap()) BaseColumn(tdesc, i);
baseCol->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
ValueId valId = baseCol->getValueId();
tdesc->addToColumnList(valId);
}
// set primary key for this table
tdesc->setPrimaryKeyColumns();
// For each index, create an IndexDesc.
//
NAString indexChoice;
NADefaults &defs = ActiveSchemaDB()->getDefaults();
defs.getValue(HIDE_INDEXES,indexChoice);
for (i = 0; i < naTable->getIndexList().entries(); i++)
{
NAFileSet *nfs=naTable->getIndexList()[i];
IndexDesc *idesc = new (bindWA->wHeap())
IndexDesc(tdesc, nfs, bindWA->currentCmpContext());
if (naTable->getClusteringIndex()->getFileSetName() ==
idesc->getIndexName()) {
tdesc->setClusteringIndex(idesc);
idesc->markAsClusteringIndex();
}
if(indexChoice.compareTo("NONE") ==0
OR indexChoice.compareTo("VERTICAL") ==0
OR (indexChoice.compareTo("KEYINDEXES") ==0 AND
tdesc->isKeyIndex(idesc))
OR naTable->getClusteringIndex()->getFileSetName() ==
nfs->getFileSetName())
{
tdesc->addIndex(idesc);
// implementation of optimizer hints
if (hint AND hint->hasIndexHint
(idesc->getNAFileSet()->getExtFileSetName()))
{
tdesc->addHintIndex(idesc);
}
if (idesc->isUniqueIndex() )
tdesc->addUniqueIndex(idesc);
}
else
{
delete idesc;
}
}
// For each vertical partition, create an IndexDesc.
// Add this VP to the list of VPs for the TableDesc.
for (i = 0; i < naTable->getVerticalPartitionList().entries(); i++) {
if(indexChoice.compareTo("NONE") ==0
OR indexChoice.compareTo("INDEXES")==0
OR indexChoice.compareTo("KEYINDEXES")==0)
{
IndexDesc *idesc = new (bindWA->wHeap())
IndexDesc(tdesc, naTable->getVerticalPartitionList()[i],
bindWA->currentCmpContext());
tdesc->addVerticalPartition(idesc);
}
}
// Allocate a RETDesc, attach it to the BindScope.
//
bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap())
RETDesc(bindWA, tdesc));
// Do not include tables-referenced-in-a-constraint (when/if we allow them)
// in the view-contains-table list; if we did include them, then
// TableViewUsageList::getViewsOnTable() would give wrong results
// for where it's used to prevent the Halloween problem.
//
// If we end up needing this extra info, I advise either a separate list,
// or a new field in TableViewUsage indicating usage type (containment
// versus reference), enhancing method getViewsOnTable() accordingly.
//
if (!bindWA->getCurrentScope()->context()->inAnyConstraint())
bindWA->tableViewUsageList().insert(new (bindWA->wHeap())
TableViewUsage(
tdesc->getCorrNameObj().getQualifiedNameObj(),
tdesc->getCorrNameObj().getSpecialType(),
naTable->getViewText() != NULL,
bindWA->viewCount()));
return tdesc;
} // static createTableDesc2()
TableDesc *BindWA::createTableDesc(const NATable *naTable,
CorrName &corrName,
NABoolean catmanCollectUsages, Hint *hint)
{
BindWA *bindWA = this; // for coding convenience
TableDesc *tdesc = createTableDesc2(bindWA, naTable, corrName, hint);
if (bindWA->errStatus()) return NULL;
// Now bind any table check constraints and attach them to our new tdesc.
// These constraints must be processed for UPDATE and INSERT.
// DELETEs must clear them; see Delete::bindNode.
//
// For SELECTs, NOT NULL constraints are marked on the NAColumn::allowsNulls
// allowing more elaborate Transformations. For SELECTs, other types of
// constraints are not currently used, but could be in future,
// to optimize by providing additional predicate/selectivity info.
//
// ## We ought to write some regression test cases like
// INSERT INTO T (SELECT * FROM S) -- T's constraints yes, S irrelevant
// INSERT INTO T VALUES ((SELECT A FROM S WHERE..),..)
// INSERT INTO V3 ... -- underlying basetbl's constrts yes
// -- V3 atop VA atop T: let the views be
// -- WITH CHECK OPTION, then viewpred-constrt yes
//
const CheckConstraintList &ccl = naTable->getCheckConstraints();
if (ccl.entries()) {
// Table check constraint text is stored in the metadata tables
// with the underlying table/view name (e.g. "CHECK (C.S.T.COL > 0)"),
// whereas any correlation name in a query
// (e.g. "SELECT * FROM C.S.T FOO WHERE COL < 10")
// is irrelevant to the persistent constraint text --
// when binding the check constraint, we want to find column C.S.T.COL,
// while the TableDesc/RETDesc just built only exposes the column
// under names COL and FOO.COL.
//
// So, if we have a correlation name, we must:
// - rename our TableDesc (rename FOO to C.S.T)
// - create a temporary table name scope for C.S.T that will hide FOO
// - construct a temporary RETDesc with names COL, T.COL, S.T.COL, C.S.T.COL
// but the same ValueId's they had before
//
// Then we bind the constraints using that RETDesc for name lookups.
//
// Then for the non-empty correlation, reset/undo the temporary stuff.
RETDesc *savedRETDesc = NULL;
NABoolean corrNameIsNonEmpty = !corrName.getCorrNameAsString().isNull();
CorrName synonymReferenceCorrName;
if(naTable->getIsSynonymTranslationDone()){
QualifiedName baseQualifiedName(naTable->getSynonymReferenceName(),3);
synonymReferenceCorrName=baseQualifiedName;
}
if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())) {
CorrName baseCorrName;
baseCorrName = (naTable->getIsSynonymTranslationDone()) ? synonymReferenceCorrName : naTable->getTableName();
tdesc->setCorrName(baseCorrName);
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA, baseCorrName);
if (bindWA->errStatus()) return NULL;
savedRETDesc = bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(new (bindWA->wHeap())
RETDesc(bindWA, tdesc));
if (bindWA->errStatus()) return NULL;
}
for (CollIndex i = 0; i < ccl.entries(); i++) {
ItemExpr *constraintPred =
bindCheckConstraint(bindWA, ccl[i], naTable, catmanCollectUsages);
if (constraintPred)
tdesc->addCheckConstraint(bindWA, naTable, ccl[i], constraintPred);
else if (bindWA->errStatus())
break;
}
if ((corrNameIsNonEmpty) || (naTable->getIsSynonymTranslationDone())){ // reset temporaries
tdesc->setCorrName(corrName);
delete bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(savedRETDesc);
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
}
} // check constraint processing required
// if the table contains computed columns, bind the expressions to compute the columns
for (CollIndex c = 0; c < naTable->getColumnCount(); c++) {
NAColumn *nac = tdesc->getNATable()->getNAColumnArray()[c];
if (nac->isComputedColumn()) {
ItemExpr *computedColumnExpr = NULL;
Parser parser(bindWA->currentCmpContext());
// parse the text stored in the NAColumn
computedColumnExpr = parser.getItemExprTree(
nac->getComputedColumnExprString(),
str_len(nac->getComputedColumnExprString()),
CharInfo::UTF8);
if (computedColumnExpr) {
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
bindWA->setNameLocListPtr(NULL);
bindWA->getCurrentScope()->context()->inComputedColumnExpr() = TRUE;
computedColumnExpr = computedColumnExpr->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->getCurrentScope()->context()->inComputedColumnExpr() = FALSE;
if (bindWA->errStatus()) {
delete computedColumnExpr;
computedColumnExpr = NULL;
return NULL;
}
else {
// Store the expression tree in the base column
((BaseColumn *) tdesc->getColumnList()[c].getItemExpr())->
setComputedColumnExpr(computedColumnExpr->getValueId());
}
}
}
}
return tdesc;
} // BindWA::createTableDesc()
// QSTUFF - helper for BindWA::bindView.
static void propagateDeleteAndStream(RelExpr *re, GroupAttributes *ga)
{
if (ga->isEmbeddedUpdateOrDelete())
re->getGroupAttr()->setEmbeddedIUD(
ga->getEmbeddedIUD());
if (ga->isStream())
re->getGroupAttr()->setStream(TRUE);
if (ga->isSkipInitialScan())
re->getGroupAttr()->setSkipInitialScan(TRUE);
Int32 arity = re->getArity();
for (Int32 i = 0; i < arity; i++) {
if (re->child(i))
propagateDeleteAndStream(re->child(i), ga);
}
}
RelExpr *BindWA::bindView(const CorrName &viewName,
const NATable *naTable,
const StmtLevelAccessOptions &accessOptions,
ItemExpr *predicate,
GroupAttributes *groupAttrs,
NABoolean catmanCollectUsages)
{
BindWA *bindWA = this; // for coding convenience
CMPASSERT(viewName.getQualifiedNameObj() == naTable->getTableName());
NABoolean inViewExpansion = bindWA->setInViewExpansion(TRUE); // QSTUFF
// set a flag for overrride_schema
//if (overrideSchemaEnabled())
bindWA->getCurrentScope()->setInViewExpansion(TRUE);
if (!bindWA->getCurrentScope()->context()->inAnyConstraint())
bindWA->tableViewUsageList().insert(new (bindWA->wHeap())
TableViewUsage(
viewName.getQualifiedNameObj(),
viewName.getSpecialType(),
TRUE/*isView*/,
bindWA->viewCount()));
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
// allow funny characters in the tablenames used in the select list.
// This enables views to be created on 'internal' secret table
// so they could be accessed.
// At view creation time, the caller still need to set this
// parserflag from the sql interface(mxci, etc) otherwise the view
// creation will fail. Since parserflags can only be set by super
// users, the view with special tablenames could only have been created
// by a super user.
Set_SqlParser_Flags(ALLOW_FUNNY_IDENTIFIER);
// Parse the view text.
//
// isolation level and order by are allowed in create view, if
// the corresponding cqds are set.
// These cqds are only valid during 'create view' time. Once the views
// are created, we don't need to look at them.
// During view expansion when we reach this method, turn the cqds on if
// they are not already on, so parser doesn't return an error.
// Reset them back, if they were set here.
NABoolean allowIsolationLevelWasSet = FALSE;
NABoolean allowOrderByWasSet = FALSE;
if (CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF)
{
allowIsolationLevelWasSet = TRUE;
NAString op("ON");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE);
}
if (CmpCommon::getDefault(ALLOW_ORDER_BY_IN_CREATE_VIEW) == DF_OFF)
{
allowOrderByWasSet = TRUE;
NAString op("ON");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE);
}
Parser parser(bindWA->currentCmpContext());
ExprNode *viewTree = parser.parseDML(naTable->getViewText(),
naTable->getViewLen(),
naTable->getViewTextCharSet());
// Restore parser flags settings to what they originally were
Set_SqlParser_Flags (savedParserFlags);
if (allowIsolationLevelWasSet)
{
NAString op("OFF");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW", op, FALSE);
}
if (allowOrderByWasSet)
{
NAString op("OFF");
ActiveSchemaDB()->getDefaults().validateAndInsert
("ALLOW_ORDER_BY_IN_CREATE_VIEW", op, FALSE);
}
if (NOT viewTree) {
bindWA->setErrStatus();
return NULL;
}
// Remove the StmtQuery node.
// Clear the root flag in the RelRoot node since this not the topmost
// RelRoot in the query tree.
//
CMPASSERT(viewTree->getOperatorType() == STM_QUERY);
RelExpr *queryTree = viewTree->castToStatementExpr()->getQueryExpression();
CMPASSERT(queryTree->getOperatorType() == REL_ROOT);
((RelRoot *)queryTree)->setRootFlag(FALSE);
CMPASSERT(queryTree->getChild(0)->getOperatorType() == REL_DDL);
StmtDDLCreateView *createViewTree = ((DDLExpr *)(queryTree->getChild(0)))->
getDDLNode()->castToStmtDDLNode()->castToStmtDDLCreateView();
CMPASSERT(createViewTree);
queryTree = createViewTree->getQueryExpression();
CMPASSERT(queryTree->getOperatorType() == REL_ROOT);
((RelRoot *)queryTree)->setRootFlag(FALSE);
RelRoot *viewRoot = (RelRoot *)queryTree; // save for add'l binding below
ParNameLocList *saveNameLocList = bindWA->getNameLocListPtr();
// This was put here for Genesis 10-980217-0467.
// Now with the fix for 10-980408-5149, we even more strongly need to bypass
// or ignore any accessOpts from the view, for a consistent access model.
if ((CmpCommon::getDefault(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW) == DF_OFF) ||
(viewRoot->accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_))
{
// if cqd is set and view options were explicitely specified,
// then do not overwrite it with accessOptions.
viewRoot->accessOptions() = accessOptions;
}
// Set the WCO context (Genesis 10-971112-7028 + 10-990518-8420):
// If this view is WITH CHECK OPTION, then all views below it acquire
// check-option-ness, per Ansi 11.19 GR 9-11a
// (we implement only CASCADED -- see further notes later on in this func);
// if some view above this one is WCO, then this view effectively is too,
// regardless of its getViewCheck() value.
// Genesis 10-990518-8420 fix in particular:
// with-check-option views of the form
// SELECT..FROM(SELECT..WHERE p1)REN WHERE p2
// were emitting a bind error on pred p1, and ignoring pred p2!
//
NABoolean topmostViewWithCheckOption = FALSE;
if (naTable->getViewCheck() &&
bindWA->getCurrentScope()->context()->inUpdateOrInsert() &&
!bindWA->inViewWithCheckOption()) {
topmostViewWithCheckOption = TRUE;
bindWA->inViewWithCheckOption() = naTable;
}
// QSTUFF
// Give the new query tree the pubsub group attrs before
// binding, so that binder checks are applied to the new tree.
if ((groupAttrs) &&
(groupAttrs->isEmbeddedUpdateOrDelete() || groupAttrs->isStream()))
propagateDeleteAndStream(queryTree,groupAttrs);
// ************ THE FIRST OF TWO BINDNODE'S ************
// Bind the basic queryTree first (before Rename), for stoi_ security stuff.
// Cascade the WCO-ness down to RelExpr::bindSelf which captures predicates.
// On this bind, unconditionally we never collect usages.
//
bindWA->viewCount()++;
bindWA->setNameLocListPtr(NULL); // do not collect usages for catman
queryTree = queryTree->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->setNameLocListPtr(saveNameLocList);
bindWA->viewCount()--;
if (bindWA->errStatus())
return NULL;
// if RelRoot has an order by, insert a Logical Sort node below it
// and move the order by expr from view root to this sort node.
// The view root node is eliminated during transformation/normalization
// and the sortlogical node provides a place to 'hold' the order by expr.
// During transformation, this sort key is moved from the sortlogical node
// to the root node of the query, if there is no explicit order by
// specified as part of the query.
// SortLogical node is a shortlived node and is eliminated during
// the normalization phase.
if (viewRoot->hasOrderBy())
{
RelExpr * sortNode = new (bindWA->wHeap())
SortLogical(queryTree->child(0)->castToRelExpr(),
viewRoot->reqdOrder(),
bindWA->wHeap());
sortNode = sortNode->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
viewRoot->removeOrderByTree();
viewRoot->reqdOrder().clear();
viewRoot->setChild(0, sortNode);
}
// Insert a RenameTable node above the view tree.
//
const NAColumnArray &columns = naTable->getNAColumnArray();
ItemExpr *columnList = new (bindWA->wHeap())
RenameCol(NULL, new (bindWA->wHeap())
ColRefName(columns[0]->getColName(), bindWA->wHeap()));
//
CollIndex i = 1;
for (i = 1; i < naTable->getColumnCount(); i++)
columnList = new (bindWA->wHeap())
ItemList(columnList, new (bindWA->wHeap())
RenameCol(NULL, new (bindWA->wHeap())
ColRefName(columns[i]->getColName(), bindWA->wHeap())));
//
queryTree = new (bindWA->wHeap())
RenameTable(TRUE/*copy tableName as is*/,
queryTree->castToRelExpr(),
viewName,
columnList,
bindWA->wHeap(),
TRUE/*isView*/);
if (predicate) queryTree->addSelPredTree(predicate);
((RenameTable *) queryTree)->setViewNATable(naTable);
// this query used this view
appendViewName
(viewName.getQualifiedNameObj().getQualifiedNameAsAnsiString().data());
// set a flag for overrride_schema
// with the call to bindNode below, only the Rename node will be bound.
// Since the view has already been expanded we reset the viewExpansion flag here.
//if (overrideSchemaEnabled())
bindWA->getCurrentScope()->setInViewExpansion(inViewExpansion);
// ************ THE SECOND OF TWO BINDNODE'S ************
// Bind the view tree whose top is this new RenameTable.
// If we are the topmost WCO, then do NOT cascade the incoming predicate!
// Collect usages only if CatMan caller requested it.
//
if (topmostViewWithCheckOption) bindWA->inViewWithCheckOption() = NULL;
if (!catmanCollectUsages) bindWA->setNameLocListPtr(NULL);
queryTree = queryTree->bindNode(bindWA);
bindWA->setNameLocListPtr(saveNameLocList);
if (bindWA->errStatus()) return NULL;
((RenameTable *) queryTree)->setViewNATable(NULL);
// Genesis 10-980126-5495:
// Now that we have the RenameTable's RETDesc, set its view column headings.
// We know that the NATable and the RenameTable column lists are in lockstep.
//
const ColumnDescList &columnsRET = *queryTree->getRETDesc()->getColumnList();
CMPASSERT(columns.entries() == naTable->getColumnCount() &&
columns.entries() == columnsRET.entries());
for (i = 0; i < naTable->getColumnCount(); i++)
{
columnsRET[i]->setHeading(columns[i]->getHeading());
}
// If it's a view that is WITH CHECK OPTION, and this is an UPDATE/INSERT,
// bind/transform/normalize the view predicate and place it as a constraint
// on the base table's TableDesc. This is equivalent to the default kind
// of check clause, WITH CASCADED CHECK OPTION, which is all we need provide
// up through Intermediate-Level SQL'92.
//
// (ANSI says that all CHECK OPTION views must be updatable (11.19 SR12)
// which means it references exactly one updatable view or, at bottom,
// exactly one base table (7.9 SR12).
// MP guarantees that all CHECK OPTION views must be protection views, and
// all pviews reference exactly one base table.)
//
// Notice that since (Genesis 10-990518-8420) we now bind and collect the
// view preds in bindSelf -- i.e. pushed down below here --
// only this topmost WCO can set up the constraint(s).
// Thus we have lost the nice, but not mandated by Ansi, ability to specify
// which cascaded-down-to view causes which exact pred violation --
// i.e. error EXE_CHECK_OPTION_VIOLATION_CASCADED (8104)
// no longer appears, only EXE_CHECK_OPTION_VIOLATION (8105).
if (topmostViewWithCheckOption) {
CheckConstraint *constraint = NULL;
ItemExpr *viewCheckPred = NULL;
if (bindWA->predsOfViewWithCheckOption().entries()) {
constraint = new (bindWA->wHeap())
CheckConstraint(viewName.getQualifiedNameObj(), // this view name
naTable->getTableName(), // no parsing needed
bindWA->wHeap());
viewCheckPred = bindWA->predsOfViewWithCheckOption().rebuildExprTree();
}
// if at least one predicate exists in the view or what underlies it
if (constraint) {
RelExpr *underlyingTableOrView = viewRoot->child(0);
RETDesc *saveRETDesc = bindWA->getCurrentScope()->getRETDesc();
RETDesc *underlyingRETDesc = underlyingTableOrView->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(underlyingRETDesc);
CMPASSERT(underlyingTableOrView);
CMPASSERT(underlyingTableOrView->getOperatorType() == REL_RENAME_TABLE ||
underlyingTableOrView->getOperatorType() == REL_SCAN);
ItemExpr *constraintPred =
bindCheckConstraint(bindWA,
constraint,
naTable,
catmanCollectUsages,
viewCheckPred);
if (constraintPred)
queryTree->getScanNode()->getTableDesc()->addCheckConstraint(
bindWA,
naTable, // topmost WCO view
constraint, // this view name
constraintPred);
bindWA->getCurrentScope()->setRETDesc(saveRETDesc);
} // at least one predicate exists
bindWA->inViewWithCheckOption() = NULL;
bindWA->predsOfViewWithCheckOption().clear();
} // topmost WCO view
// QSTUFF
bindWA->setInViewExpansion(inViewExpansion);
bindWA->getUpdateToScanValueIds().clear();
// QSTUFF
return queryTree;
} // BindWA::bindView()
// -----------------------------------------------------------------------
// member functions for class RelExpr
// -----------------------------------------------------------------------
void RelExpr::bindChildren(BindWA *bindWA)
{
// Increment the trigger recursion counter.
if (getInliningInfo().isTriggerRoot())
getInliningInfo().getTriggerObject()->incRecursionCounter();
// TSJ's flow their data from left child to right child;
// some can also share binding scope column info from left to right.
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++) {
if (child(i)) {
// If doing a non-first child and the operator is
// NOT one in which values/names can flow from one scope
// the sibling scope, then we must clear the current RETDesc
// (so as to disallow the illegal query in the Binder internals document,
// section 1.5.3, also in TEST028).
//
if (i && !getOperator().match(REL_ANY_TSJ))
bindWA->getCurrentScope()->setRETDesc(NULL);
child(i) = child(i)->bindNode(bindWA);
if (bindWA->errStatus()) return;
}
}
synthPropForBindChecks(); // QSTUFF
// Decrement the trigger recursion counter.
if (getInliningInfo().isTriggerRoot())
getInliningInfo().getTriggerObject()->decRecursionCounter();
} // RelExpr::bindChildren()
void RelExpr::synthPropForBindChecks() // QSTUFF
{
// synthesis of delete and stream properties to
// allow for binder checks. We assume that all
// operators are rejected when binding the respective node
// -- except UNIONS -- in which more than one child has
// has any of those attributes. If both attributes are
// specified both must be specified for the same
// result-set/base table.
for (Int32 j = 0; j < getArity(); j++) {
if (child(j)) {
if (child(j)->getGroupAttr()->isStream())
{
getGroupAttr()->setStream(TRUE);
if (child(j)->getGroupAttr()->isSkipInitialScan())
getGroupAttr()->setSkipInitialScan(TRUE);
}
if (child(j)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(j)->getGroupAttr()->isEmbeddedInsert())
getGroupAttr()->setEmbeddedIUD(
child(j)->getGroupAttr()->getEmbeddedIUD());
if (child(j)->getGroupAttr()->reorderNeeded())
getGroupAttr()->setReorderNeeded(TRUE);
}
}
}
RelExpr *RelExpr::bindSelf(BindWA *bindWA)
{
// create the group attributes
//
if (NOT getGroupAttr())
setGroupAttr(new (bindWA->wHeap()) GroupAttributes);
//
// Detach the item expression tree for the predicate, bind it, convert it to
// a ValueIdSet, and attach it to the RelExpr node.
//
ItemExpr *predTree = removeSelPredTree();
if (predTree) {
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
predTree->convertToValueIdSet(selectionPred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
if (bindWA->errStatus()) return this;
// If this is an embedded insert, then subquery predicates are not
// allowed.
// For example: To handle this query and issue an error stating
// subqueries are not allowed in embedded inserts
//
// select a from (insert into t901t01 values(22,22,222))t(a,b,c)
// where t.a IN (select m from t901t03 where t901t03.m = 77);
if (getGroupAttr()->isEmbeddedInsert())
{
if (!selectionPred().isEmpty() && selectionPred().containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-4337);
bindWA->setErrStatus();
return this;
}
}
// Genesis 10-990518-8420.
if (bindWA->inViewWithCheckOption())
bindWA->predsOfViewWithCheckOption() += selectionPred();
}
// ++MV
// Bind the uniqueColumnsTree expression.
//
ItemExpr *uniqueColumnsTree = removeUniqueColumnsTree();
if (uniqueColumnsTree)
{
uniqueColumnsTree->
convertToValueIdSet(getUniqueColumns(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
}
// --MV
// set flag here if an Insert/Update/Delete operation is below this node
if( bindWA->isBindingIUD() )
{
setSeenIUD();
}
//
// This mechanism is used to set InliningInfo flags on an entire subtree.
getInliningInfo().setFlags(bindWA->getInliningInfoFlagsToSetRecursivly());
//
// Add the values in the Outer References Set as the input values
// that must be supplied to this RelExpr.
//
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
markAsBound();
return this;
} // RelExpr::bindSelf()
RelExpr *RelExpr::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
return bindSelf(bindWA);
}
RETDesc *RelExpr::getRETDesc() const
{
if (RETDesc_)
return RETDesc_;
if (getArity() == 1)
return child(0)->getRETDesc();
else
return NULL;
}
// When there is a view atop a view atop a ... atop a single base table,
// this will follow the chain of RenameTable-RelRoot-... down till it finds
// the bottom, the single base table's Scan node.
//
// This method does check to ensure exactly one single base table.
//
Scan *RelExpr::getScanNode(NABoolean assertExactlyOneScanNode) const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if ((result->getOperatorType() == REL_SCAN) ||
(result->getOperatorType() == REL_HBASE_ACCESS))
break;
if (result->getArity() > 1) {
if (assertExactlyOneScanNode)
{
CMPASSERT(result->getArity() <= 1);
}
else return NULL;
}
result = result->child(0);
}
if (assertExactlyOneScanNode) { CMPASSERT(result); }
return (Scan *)result;
}
Scan *RelExpr::getLeftmostScanNode() const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if (result->getOperatorType() == REL_SCAN) break;
result = result->child(0);
}
return (Scan *)result;
}
// QSTUFF
// We use this method for finding the scan node of an updatable view.
// This may either be a base table scan or a RenameTable node inserted
// by a previous index expansion.
RelExpr *RelExpr::getViewScanNode(NABoolean isTopLevelUpdateInView) const
{
RelExpr *result = (RelExpr *)this; // cast away constness, big whoop
while (result) {
if (result->getOperatorType() == REL_SCAN) break;
if (result->getOperatorType() == REL_RENAME_TABLE &&
((RenameTable *)result)->isView()) break;
result = result->child(0);
}
return result;
}
// -----------------------------------------------------------------------
// getFirstIUDNode
//
// Return the first node that is an insert, update, or delete.
// Only search down left side from the starting point (currentNode)
//
// If an IUD node is not found, return NULL
// -----------------------------------------------------------------------
GenericUpdate * Join::getFirstIUDNode(RelExpr *currentNode)
{
while(currentNode)
{
if( currentNode->getOperator().match(REL_ANY_GEN_UPDATE))
{
break;
}
currentNode = currentNode->child(0);
}
return (GenericUpdate*)currentNode;
}
// -----------------------------------------------------------------------
// member functions for class Join
//
// When we implement "JOIN USING (column list)", we need to: ##
// - disallow both NATURAL and USING in the same query (syntax err in Parser?)
// - ensure that the named USING cols are indeed common cols
// - coalesce common cols for USING just as we do for NATURAL,
// including ensuring that common cols are marked as referenced
// (as done in joinCommonColumns)
// -----------------------------------------------------------------------
RelExpr *Join::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Do not support for general NEO users.
if ( (getOperatorType() == REL_FULL_JOIN) &&
(CmpCommon::getDefault(COMP_BOOL_192) == DF_ON) ) {
RelExpr *leftJoin = this;
leftJoin->setOperatorType(REL_LEFT_JOIN);
RelExpr *antiJoin = leftJoin->copyTree(bindWA->wHeap());
antiJoin->setOperatorType(REL_RIGHT_JOIN);
NAString leftName("ALJ", bindWA->wHeap());
// Make it unique.
//
leftName += bindWA->fabricateUniqueName();
RelExpr *rename = new (bindWA->wHeap())
RenameTable(antiJoin, leftName);
RelExpr *unionAll = new (bindWA->wHeap()) Union(leftJoin, rename);
unionAll->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Make sure there is at least one null instantiated
// value that is suitable for use as a filter.
// To be suitable, it must be null instantiated and
// it's child must not be nullable. We want to filter
// the NULL that are a result of null instantiation, not
// original null values.
//
ItemExpr *cval = new (bindWA->wHeap()) SystemLiteral(1);
cval->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Null instantiate the value.
//
ValueId niCval = cval->getValueId().nullInstantiate(bindWA, TRUE);
// Add it to the RETDesc of the Join.
//
ColRefName cvalName("", bindWA->wHeap());
antiJoin->getRETDesc()->addColumn(bindWA, cvalName , niCval, USER_COLUMN);
// Add it to the list of null instantiated outputs.
//
((Join *)antiJoin)->nullInstantiatedOutput().insert(niCval);
ItemExpr *nullCheck = niCval.getItemExpr();
CMPASSERT(nullCheck);
ItemExpr *filter = new (bindWA->wHeap())
UnLogic(ITM_IS_NULL, nullCheck );
filter->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Add filter to Join
//
antiJoin->selectionPred() += filter->getValueId();
return unionAll;
}
Join *saveInJ = bindWA->getCurrentScope()->context()->inJoin();
bindWA->getCurrentScope()->context()->inJoin() = this;
NABoolean savedPrivSetting = FALSE;
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// MV logging push-down
if( getInliningInfo().isDrivingMvLogInsert() )
{
GenericUpdate *rightSideIUD = getFirstIUDNode(this->child(1));
if( NULL != rightSideIUD )
{
TableDesc *tdesc = rightSideIUD->getTableDesc();
CMPASSERT(tdesc);
const NATable *table = tdesc->getNATable();
// only for MV logs
if( ExtendedQualName::IUD_LOG_TABLE == table->getSpecialType() )
{
updateTableDesc_ = tdesc;
updateSelectValueIdMap_ = new (bindWA->wHeap())
ValueIdMap(rightSideIUD->updateToSelectMap());
}
}
}
// Controlled availability of Full Outer Join support
// The COMP_BOOL_199 must be removed when full outer join
// becomes general availability.
// Full outer joins are not currently supported.
// But can enabled by setting COMP_BOOL_199 to ON.
if ((getOperatorType() == REL_FULL_JOIN &&
(CmpCommon::getDefault(COMP_BOOL_199) == DF_OFF))
|| //OR
(getOperatorType() == REL_UNION_JOIN )){
// 3022 Feature not yet supported
*CmpCommon::diags() << DgSqlCode(-3022)
<< DgString0(
(getOperatorType() == REL_FULL_JOIN) ?
"FULL OUTER JOIN" : "UNION JOIN");
bindWA->setErrStatus();
return this;
}
//
// Bind the ON clause of the join.
//
RelExpr *leftRelExpr = child(0).getPtr();
RelExpr *rightRelExpr = child(1).getPtr();
RETDesc *leftTable = child(0)->getRETDesc();
RETDesc *rightTable = child(1)->getRETDesc();
ItemExpr *joinPredx;
if (isNaturalJoin()) {
// since the common column references need fetch histograms, the where
// flag is set here so that when we call markAsReferencedColumn()
// in the joinCommoncolumns() method it would set the common
// columns as refenced by looking a the inWhereCaluse_ flag.
NABoolean orig = bindWA->getCurrentScope()->context()->inWhereClause();
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
joinPredx = joinCommonColumns(leftRelExpr, rightRelExpr, bindWA);
bindWA->getCurrentScope()->context()->inWhereClause() = orig;
}
else
joinPredx = removeJoinPredTree();
if (joinPredx) {
ItemExpr *saveInJP = bindWA->getCurrentScope()->context()->inJoinPred();
bindWA->getCurrentScope()->context()->inJoinPred() = joinPredx;
RETDesc preJoinResult;
preJoinResult.addColumns(bindWA, *leftTable);
preJoinResult.addColumns(bindWA, *rightTable);
bindWA->getCurrentScope()->setRETDesc(&preJoinResult);
joinPredx->convertToValueIdSet(joinPred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inJoinPred() = saveInJP;
if (bindWA->errStatus()) return this;
}
//
// Create the output list.
// The TRUE's in the nullInstantiate() force a Cast expression to be set up,
// as required by the Normalizer.
//
NABoolean newTables = TRUE;
ValueIdList &nullOutputList = nullInstantiatedOutput();
ValueIdList &nullOutputForRightJoinList = nullInstantiatedForRightJoinOutput();
switch(getOperatorType()) {
case REL_LEFT_JOIN:
leftTable = new (bindWA->wHeap()) RETDesc(bindWA, *leftTable);
rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList);
break;
case REL_RIGHT_JOIN:
leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputList);
rightTable = new (bindWA->wHeap()) RETDesc(bindWA, *rightTable);
break;
case REL_FULL_JOIN:
case REL_UNION_JOIN:
{
leftTable = leftTable->nullInstantiate(bindWA, TRUE, nullOutputForRightJoinList);
rightTable = rightTable->nullInstantiate(bindWA, TRUE, nullOutputList);
// comp_bool_198 = 'on' enables FullOuter transformation
// inner, left or right
if (CmpCommon::getDefault(COMP_BOOL_198) == DF_OFF) //don't enable FOJ transformation
{
ItemExpr * instNull = NULL;
CollIndex index = 0;
// disable the FOJ Transformation.
for (index = 0; index < nullInstantiatedOutput().entries(); index++)
{
instNull = nullInstantiatedOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE;
} // endfor
instNull = NULL;
for (index = 0;
index < nullInstantiatedForRightJoinOutput().entries(); index++)
{
instNull = nullInstantiatedForRightJoinOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
((InstantiateNull *)instNull)->NoCheckforLeftToInnerJoin = TRUE;
} // endfor
} // env "ENABLE_FOJ_TRANSFORMATION"
break;
}
case REL_JOIN:
default:
newTables = FALSE;
break;
}
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
Int32 rowSet = (child(0)->getOperatorType() == REL_RENAME_TABLE) &&
(child(0)->child(0)->getOperatorType() == REL_UNPACKROWS) &&
(child(1)->getOperatorType() == REL_ROOT);
if (NOT isNaturalJoin()) {
if ((!rowSet) &&
(getOperatorType() != REL_TSJ_FLOW)) {
resultTable->addColumns(bindWA, *leftTable);
}
// ++MV -- bug fixing for semi-joins
if (!isSemiJoin())
{
resultTable->addColumns(bindWA, *rightTable);
}
// --MV -- bug fixing for semi-joins
} else {
coalesceCommonColumns(bindWA,
getOperatorType(),
*leftTable,
*rightTable,
*resultTable);
if (bindWA->errStatus()) return this;
}
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
// QSTUFF
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
bindWA->getTablesInScope(xtnmList, &fmtdList);
if ((child(0)->getGroupAttr()->isStream()) &&
(child(1)->getGroupAttr()->isStream())){
bindWA->getTablesInScope(xtnmList, &fmtdList);
*CmpCommon::diags() << DgSqlCode(-4158)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
// Disallowing joins for EMBEDDED...INSERT
//
if (getGroupAttr()->isEmbeddedInsert() &&
!isTSJForWrite() // the tsjForWrite flag is set for
// those joins which are created by
// the Binder during inlining (eg. IndexMaintanence)
// Here we only want to disable user specified joins
// and not joins introduced as part of inlining.
){
*CmpCommon::diags() << DgSqlCode(-4336)
<< DgString0(fmtdList)
<< DgString1(getGroupAttr()->getOperationWithinGroup());
bindWA->setErrStatus();
return this;
}
if ( ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) &&
(child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete())) ||
((child(0)->getGroupAttr()->isEmbeddedInsert()) &&
(child(1)->getGroupAttr()->isEmbeddedInsert())) ||
(bindWA->isEmbeddedIUDStatement()) ) {
NAString type0,type1;
if (child(0)->getGroupAttr()->isEmbeddedUpdate())
type0 = "UPDATE";
else
{
if (child(0)->getGroupAttr()->isEmbeddedInsert())
type0 = "INSERT";
else
type0 = "DELETE";
}
if (child(1)->getGroupAttr()->isEmbeddedUpdate())
type1 = "UPDATE";
else
{
if (child(1)->getGroupAttr()->isEmbeddedInsert())
type1 = "INSERT";
else
type1 = "DELETE";
}
*CmpCommon::diags() << DgSqlCode(-4175)
<< DgString0(fmtdList)
<< DgString1(type0)
<< DgString2(type1);
bindWA->setErrStatus();
return this;
}
if ((child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(0)->getGroupAttr()->isStream()) &&
(child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(1)->getGroupAttr()->isStream())){
*CmpCommon::diags() << DgSqlCode(-4176)
<< DgString0(fmtdList)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (getOperatorType() == REL_LEFT_JOIN){
if (child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4156)
<< DgString0(fmtdList)
<< (child(1)->getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (child(1)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4157)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
}
if (getOperatorType() == REL_RIGHT_JOIN){
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4164)
<< DgString0(fmtdList)
<< (child(0)->getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
if (child(0)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4165)
<< DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
}
// we need to move stream and nested updates to the
// left to ensure correct execution. This causes the statement
// to be rejected if the user specified join_order_by_user and
// the query must be reordered
if (child(1)->getGroupAttr()->isStream() ||
child(1)->getGroupAttr()->isEmbeddedUpdateOrDelete()){
getGroupAttr()->setReorderNeeded(TRUE);
}
// QSTUFF
if (newTables) {
delete leftTable;
delete rightTable;
}
bindWA->getCurrentScope()->context()->inJoin() = saveInJ;
if (getOperatorType() == REL_TSJ){
//Using rowsets in a predicate with embedded update/delete results
//in a NestedJoin subtree after Normalization.This NestedJoin subtree
//has embedded update/delete as the right child, which is not allowed
//during optimization. Here we try to disallow this usage at Binding
//when a REL_TSJ subtree has rowsets as the left child and embedded
//update/delete as the right child. An error message[4123] is signaled.
if (rowSet && getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4213);
bindWA->setErrStatus();
return this;
}
}
// transfer rowsetRowCountArraySize from HostArrayWA to this node.
if (bindWA->getHostArraysArea() && isRowsetIterator())
setRowsetRowCountArraySize(bindWA->getHostArraysArea()->getRowsetRowCountArraySize());
// Bind the base class.
//
return bindSelf(bindWA);
} // Join::bindNode()
//++MV
// This function builds the BalueIdMap that is used for translating the required
// sort key to the right child sort key and backwards
void Join::BuildRightChildMapForLeftJoin()
{
ValueIdMap &map = rightChildMapForLeftJoin();
for (CollIndex j = 0; j < nullInstantiatedOutput().entries(); j++)
{
ValueId instNullId, rightChildId;
instNullId = nullInstantiatedOutput_[j];
assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL);
// Access the operand of the InstantiateNull
rightChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId());
map.addMapEntry(instNullId, rightChildId);
}
}
//--MV
//++MV
// This function builds the ValueIdMap that is used for translating the
// required
// sort key to the left child sort key and backwards
void Join::BuildLeftChildMapForRightJoin()
{
ValueIdMap &map = leftChildMapForRightJoin();
for (CollIndex j = 0; j < nullInstantiatedForRightJoinOutput().entries(); j++)
{
ValueId instNullId, leftChildId;
instNullId = nullInstantiatedForRightJoinOutput_[j];
assert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL);
// Access the operand of the InstantiateNull
leftChildId = (((InstantiateNull *)(instNullId.getItemExpr()))->getExpr()->getValueId());
map.addMapEntry(instNullId, leftChildId);
}
}
//--MV
// -----------------------------------------------------------------------
// member functions for class Intersect
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
RelExpr *Intersect::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Check that there are an equal number of select items on both sides.
//
const RETDesc &leftTable = *child(0)->getRETDesc();
const RETDesc &rightTable = *child(1)->getRETDesc();
if (leftTable.getDegree() != rightTable.getDegree()) {
// 4014 The operands of an intersect must be of equal degree.
*CmpCommon::diags() << DgSqlCode(-4014);
bindWA->setErrStatus();
return this;
}
// Join the columns of both sides. This is wrong semantics tho! ##
//
*CmpCommon::diags() << DgSqlCode(-3022) // ## INTERSECT not yet supported
<< DgString0("INTERSECT"); // ##
bindWA->setErrStatus(); // ##
if (bindWA->errStatus()) return NULL; // ##
//
ItemExpr *predicate = intersectColumns(leftTable, rightTable, bindWA);
RelExpr *join = new (bindWA->wHeap())
Join(child(0)->castToRelExpr(),
child(1)->castToRelExpr(),
REL_JOIN,
predicate);
// Bind the join.
//
join = join->bindNode(bindWA)->castToRelExpr();
if (bindWA->errStatus()) return join;
// Change the output of the join to just the left side.
//
delete join->getRETDesc();
join->setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, leftTable));
bindWA->getCurrentScope()->setRETDesc(join->getRETDesc());
// QSTUFF
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
if (child(0)->getGroupAttr()->isStream() &&
child(1)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4159)
<< DgString0(fmtdList1) << DgString1(fmtdList2);
bindWA->setErrStatus();
return this;
}
// Needs to be removed when supporting get_next for INTERSECT
if (getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4160)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2)
<< (child(0)->getGroupAttr()->isEmbeddedUpdate() ?
DgString2("UPDATE"):DgString2("DELETE"))
<< (child(1)->getGroupAttr()->isEmbeddedUpdate() ?
DgString3("UPDATE"):DgString3("DELETE"));
bindWA->setErrStatus();
return this;
}
// QSTUFF
return join;
} // Intersect::bindNode()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// member functions for class Union
// -----------------------------------------------------------------------
RelExpr *Union::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the conditional expression.
//
ItemExpr *condExprTree = removeCondExprTree();
if (condExprTree)
{
condExprTree->convertToValueIdList(condExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
//
// Bind the triggered action exception expression.
//
ItemExpr *trigExprTree = removeTrigExceptExprTree();
if (trigExprTree)
{
// the assumption in the binder (in Union::addValueIdUnion) is that
// unionMap_ count is always less than or equal to one but triggers
// code might increment this number during binding because of
// recursive triggers or triggers that are used more than once
// in the statement. This check fixes the unionMap_ for triggers.
if ((unionMap_ != NULL) && (unionMap_->count_ > 1))
{
unionMap_->count_--;
unionMap_ = new (CmpCommon::statementHeap()) UnionMap;
}
trigExprTree->convertToValueIdList(trigExceptExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
AssignmentStArea *assignArea = NULL;
// We store a pointer to this Union node in the assignment statements area.
// This is needed for compound statements project, in particular when we have
// assignment statements within an IF statement
if (getUnionForIF()) {
assignArea = bindWA->getAssignmentStArea();
setPreviousIF(assignArea->getCurrentIF());
assignArea->setCurrentIF(this);
}
//
// Bind the child nodes.
//
bindWA->getCurrentScope()->context()->inUnion() = TRUE;
currentChild() = 0;
child(0) = child(0)->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If we have assignment statements of compound statements, we need to get rid
// of the value ids generated while binding the first child. Also, we create a
// list of the value ids of the variables that are on the left side of a SET
// statement
if (getUnionForIF() && leftList() && assignArea) {
assignArea->removeLastValueIds(leftList(), this);
}
if (getCondUnary()) {
CollIndex leftDegree = child(0)->getRETDesc()->getDegree();
ItemExpr *tupleExpr = new (bindWA->wHeap()) ConstValue();
for (CollIndex i=0; i+1<leftDegree; i++) {
ItemExpr *con = new (bindWA->wHeap()) ConstValue();
ItemList *list = new (bindWA->wHeap()) ItemList(con, tupleExpr);
tupleExpr = list;
}
RelExpr *tuple = new (bindWA->wHeap()) Tuple(tupleExpr);
// create the selection predicate (1=0) for the Tuple node
ItemExpr *predicate = new (bindWA->wHeap())
BiRelat(ITM_EQUAL,
new (bindWA->wHeap()) ConstValue(1),
new (bindWA->wHeap()) ConstValue(0));
tuple->addSelPredTree(predicate);
RelExpr *tupleRoot = new (bindWA->wHeap()) RelRoot(tuple);
setChild (1, tupleRoot);
}
if (child(1)) {
if (!(child(1)->getOperator().match(REL_ANY_TSJ))) {
bindWA->getCurrentScope()->setRETDesc(NULL);
}
currentChild() = 1;
child(1) = child(1)->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If we have assignment statements of compound statements,
// we need to get rid of the value ids generated while binding
// the second child
if (getUnionForIF() && rightList() && assignArea) {
assignArea->removeLastValueIds(rightList(), this);
}
}
// check for & warn against UNIONs that have inconsistent access/lock modes.
// flag "select * from t1 union select * from t2 for <access> mode"
// with a warning that t1 and t2 may have inconsistent access/lock modes.
checkAccessLockModes();
//Copies the leftlist and rightlist this conditional union to the appropriate list of the
//conditional union node pointed to by the previousIF argument.
Union * previousIF = getPreviousIF();
if (previousIF && getUnionForIF()) {
copyLeftRightListsToPreviousIF(previousIF, bindWA);
}
synthPropForBindChecks();
// QSTUFF
bindWA->getCurrentScope()->context()->inUnion() = FALSE;
//
// Check that there are an equal number of select items on both sides.
//
const RETDesc &leftTable = *child(0)->getRETDesc();
const RETDesc &rightTable = *child(1)->getRETDesc();
RETDesc *resultTable = NULL;
RelRoot * root = bindWA->getTopRoot() ;
if (root) {
if (getGroupAttr()->isStream() && root->hasOrderBy()){
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
*CmpCommon::diags() << DgSqlCode(-4166)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2) ;
bindWA->setErrStatus();
return this;
}
}
if (leftTable.getDegree() != rightTable.getDegree()) {
#ifndef NDEBUG
dumpChildrensRETDescs(leftTable, rightTable);
#endif
if ( (!getUnionForIF()) &&
(!getCondUnary()) //for triggers
) {
// 4126 The row-value-ctors of a VALUES must be of equal degree.
// 4066 The operands of a union must be of equal degree.
// This is not necessary if we are in an assignment stmt.
Lng32 sqlcode = bindWA->getCurrentScope()->context()->inTupleList() ?
-4126 : -4066;
*CmpCommon::diags() << DgSqlCode(sqlcode);
bindWA->setErrStatus();
return this;
}
}
//
// For each select item on both sides, create a ValueIdUnion and insert its
// ValueId into the select list for the union.
//
// We check to see if there were assignments on either side
if ( !getUnionForIF() ) {
resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
for (CollIndex i = 0; i < leftTable.getDegree(); i++) {
ValueIdUnion *vidUnion = new (bindWA->wHeap())
ValueIdUnion(leftTable.getValueId(i),
rightTable.getValueId(i),
NULL_VALUE_ID,
#pragma nowarn(1506) // warning elimination
getUnionFlags());
#pragma warn(1506) // warning elimination
vidUnion->setIsTrueUnion(TRUE);
vidUnion->bindNode(bindWA);
if (bindWA->errStatus()) {
delete vidUnion;
delete resultTable;
return this;
}
ValueId valId = vidUnion->getValueId();
addValueIdUnion(valId, bindWA->wHeap());
resultTable->addColumn(bindWA, leftTable.getColRefNameObj(i), valId);
}
}
else {
// Case in which we have asignment statements below this node.
// We have to carefuly match the valueids in the IF and ELSE part.
// For instance, if SET :a = ... occurs in both branches or only in one.
if (getUnionForIF() && assignArea) {
resultTable = createReturnTable(assignArea, bindWA);
}
}
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Bind the base class.
//
// We are done binding this node. The current IF node is now the closest
// IF node that is also an ancestor of this node
if (getUnionForIF() && assignArea) {
assignArea->setCurrentIF(getPreviousIF());
}
// QSTUFF
// this is not a hard restriction. Once the get_next protocol supports unions
// similar to the split-top operator, this check can be removed.
if (getGroupAttr()->isEmbeddedUpdateOrDelete() ||
(getGroupAttr()->isEmbeddedInsert() && !isSystemGenerated_) ||
(bindWA->isEmbeddedIUDStatement())) {
if (getUnionForIF()) {
*CmpCommon::diags() << DgSqlCode(-4210);
bindWA->setErrStatus();
return this;
}
NAString fmtdList1(bindWA->wHeap());
LIST(TableNameMap*) xtnmList1(bindWA->wHeap());
NAString fmtdList2(bindWA->wHeap());
LIST(TableNameMap*) xtnmList2(bindWA->wHeap());
leftTable.getTableList(xtnmList1, &fmtdList1);
rightTable.getTableList(xtnmList2, &fmtdList2);
// Fix for Solution 10-070117-1834.
// Error Message for -4161 - assumes that both sides
// of the UNION is an embedded operation. For a
// query such as,
// select * from (delete from t709t1)as x union all (select * from t709t1)
// the right side of the UNION is not an embedded operation.
// Hence, changing the text for 4161 to a more generic one so
// that all cases are covered in this one text message.
*CmpCommon::diags() << DgSqlCode(-4161)
<< DgString0(fmtdList1)
<< DgString1(fmtdList2);
bindWA->setErrStatus();
return this;
}
// QSTUFF
// ++MV
// Bind the alternateRightChildOrderExprTree expression.
//
ItemExpr *alternateRightChildOrderExprTree = removeAlternateRightChildOrderExprTree();
if (alternateRightChildOrderExprTree)
{
alternateRightChildOrderExprTree->
convertToValueIdList(alternateRightChildOrderExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
return NULL;
}
}
// --MV
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) {
delete resultTable;
return boundExpr;
}
return boundExpr;
} // Union::bindNode()
// check for & warn against UNIONs that have inconsistent access/lock modes
void Union::checkAccessLockModes()
{
Scan *left = child(0)->getAnyScanNode();
Scan *right = child(1)->getAnyScanNode();
if (!left || !right) return; // no-op.
// UNION is user-specified as opposed to system-generated (eg, by
// triggers/RI in GenericUpdate::inlinePipelineActions, etc)
if (isSystemGenerated_) {
return;
}
Lng32 lockFlagSession = CmpCommon::transMode()->getDP2LockFlags().getValue();
StmtLevelAccessOptions optionsLeft = left->accessOptions();
StmtLevelAccessOptions optionsRight = right->accessOptions();
Lng32 lockFlagLeft = lockFlagSession;
Lng32 lockFlagRight = lockFlagSession;
if (optionsLeft.userSpecified()) {
lockFlagLeft = optionsLeft.getDP2LockFlags().getValue();
}
if (optionsRight.userSpecified()) {
lockFlagRight = optionsRight.getDP2LockFlags().getValue();
}
if (lockFlagLeft != lockFlagRight) {
*CmpCommon::diags()
<< DgSqlCode(3192)
<< DgString0(left->getTableName().getQualifiedNameAsString())
<< DgString1(right->getTableName().getQualifiedNameAsString());
}
} // Union::checkAccessLockModes()
void Union::copyLeftRightListsToPreviousIF(Union * previousIF, BindWA * bindWA)
{
AssignmentStHostVars *thisLeftList = leftList();
AssignmentStHostVars *thisRightList = rightList();
// If the previous IF node does not have a left list, we copy the left and right
// lists to that left list
if (previousIF->currentChild() == 0 && !(previousIF->leftList())) {
AssignmentStHostVars *leftListOfPreviousIF = previousIF->getCurrentList(bindWA);
// Copy the leftList of this node to the left list of the previous IF
leftListOfPreviousIF->addAllToListInIF(thisLeftList) ;
// Copy the rightList of this node to the left list of the previous IF
leftListOfPreviousIF->addAllToListInIF(thisRightList) ;
}
// If the previous IF node does not have a right list, we copy the left and right
// lists to that left list
if (previousIF->currentChild() == 1 && !(previousIF->rightList())) {
AssignmentStHostVars *rightListOfPreviousIF = previousIF->getCurrentList(bindWA);
// Copy the leftList of this node to the right list of the previous IF
rightListOfPreviousIF->addAllToListInIF(thisLeftList) ;
// Copy the rightList of this node to the right list of the previous IF
rightListOfPreviousIF->addAllToListInIF(thisRightList) ;
}
} // Union::copyLeftRightListsToPreviousIF
// -----------------------------------------------------------------------
// MV --
// A debugging method for dumping the columns in the RETDesc of both
// children when they do not match.
void Union::dumpChildrensRETDescs(const RETDesc& leftTable,
const RETDesc& rightTable)
{
#ifndef NDEBUG
// -- MVs. Debugging code !!!!! TBD
fprintf(stdout, " # Left Right\n");
CollIndex maxIndex, minIndex;
NABoolean leftIsBigger;
if (leftTable.getDegree() > rightTable.getDegree())
{
maxIndex = leftTable.getDegree();
minIndex = rightTable.getDegree();
leftIsBigger = TRUE;
}
else
{
maxIndex = rightTable.getDegree();
minIndex = leftTable.getDegree();
leftIsBigger = FALSE;
}
for (CollIndex i=0; i<minIndex; i++)
{
ColumnDesc *leftColDesc = leftTable.getColumnList()->at(i);
ColumnDesc *rightColDesc = rightTable.getColumnList()->at(i);
NAString leftCol (leftColDesc->getColRefNameObj().getColRefAsString());
NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-55s %-55s \n",
i, leftCol.data(), rightCol.data());
}
if (leftIsBigger)
{
for (CollIndex j=minIndex; j<maxIndex; j++)
{
ColumnDesc *leftColDesc = leftTable.getColumnList()->at(j);
NAString leftCol(leftColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-35s\n",
j, leftCol.data());
}
}
else
{
for (CollIndex k=minIndex; k<maxIndex; k++)
{
ColumnDesc *rightColDesc = rightTable.getColumnList()->at(k);
NAString rightCol(rightColDesc->getColRefNameObj().getColRefAsString());
fprintf(stdout, " %3d %-35s \n",
k, rightCol.data());
}
}
#endif
}
// ----------------------------------------------------------------------
// static helper functions for classes RelRoot and GroupByAgg
// ----------------------------------------------------------------------
static NABoolean containsGenericUpdate(const RelExpr *re)
{
if (re->getOperator().match(REL_ANY_GEN_UPDATE)) return TRUE;
for (Int32 i = 0; i < re->getArity(); ++i ) {
if (re->child(i) && containsGenericUpdate(re->child(i))) return TRUE;
}
return FALSE;
}
static NABoolean containsUpdateOrDelete(const RelExpr *re)
{
if (re->getOperator().match(REL_ANY_UPDATE_DELETE))
return TRUE;
for (Int32 i = 0; i < re->getArity(); ++i ) {
if (re->child(i) && containsUpdateOrDelete(re->child(i)))
return TRUE;
}
return FALSE;
}
// QSTUFF
static GenericUpdate *getGenericUpdate(RelExpr *re)
{
if (re) {
if (re->getOperatorType() == REL_UNARY_UPDATE ||
re->getOperatorType() == REL_UNARY_DELETE)
return (GenericUpdate *)re;
for (Int32 i = 0; i < re->getArity(); ++i) { // check all children (both sides)
GenericUpdate *gu = getGenericUpdate(re->child(i));
if (gu) return gu;
}
}
return NULL;
}
static NABoolean checkUnresolvedAggregates(BindWA *bindWA)
{
const ValueIdSet &aggs = bindWA->getCurrentScope()->getUnresolvedAggregates();
if (aggs.isEmpty()) return FALSE; // no error
NAString unparsed(bindWA->wHeap());
for (ValueId vid = aggs.init(); aggs.next(vid); aggs.advance(vid)) {
const ItemExpr *ie = vid.getItemExpr();
CMPASSERT(ie->isAnAggregate());
Aggregate *agg = (Aggregate *)ie;
// Don't display COUNT() part of SUM()/COUNTxxx(), our implementation of AVG()
// Display only the COUNT_NONULL() our implementation of VARIANCE and STDDEV
// This is to avoid printing the aggregate functions more than once.
if((agg->origOpType() != ITM_AVG || agg->getOperatorType() == ITM_SUM) &&
(!(agg->origOpType() == ITM_STDDEV || agg->origOpType() == ITM_VARIANCE)
|| agg->getOperatorType() == ITM_COUNT_NONULL)){
unparsed += ", ";
if (agg->origOpType() == ITM_COUNT_STAR__ORIGINALLY)
unparsed += "COUNT(*)";
else
agg->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE);
}
}
unparsed.remove(0,2); // remove initial ", "
// 4015 Aggregate functions placed incorrectly.
*CmpCommon::diags() << DgSqlCode(-4015) << DgString0(unparsed);
bindWA->setErrStatus();
return TRUE;
} // checkUnresolvedAggregates()
// ----------------------------------------------------------------------
// member functions for class RelRoot
// ----------------------------------------------------------------------
static NABoolean isRenamedColInSelList(BindWA * bindWA, ItemExpr * col,
ItemExprList &origSelectList,
CollIndex &indx,
RETDesc * childRETDesc)
{
if (col->getOperatorType() != ITM_REFERENCE)
return FALSE;
ColReference * havingColReference = (ColReference*)col;
CollIndex j = 0;
NABoolean found = FALSE;
while (j < origSelectList.entries())
{
ItemExpr * selectListEntry = origSelectList[j];
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
const ColRefName &selectListColRefName =
*((RenameCol *)selectListEntry)->getNewColRefName();
if (havingColReference->getColRefNameObj() == selectListColRefName)
{
if (found)
{
// multiple entries with the same name. Error.
*CmpCommon::diags() << DgSqlCode(-4195)
<< DgString0(selectListColRefName.getColName());
bindWA->setErrStatus();
return FALSE;
}
ColumnNameMap *baseColExpr = NULL;
if (childRETDesc)
baseColExpr = childRETDesc->findColumn(selectListColRefName);
if ( NOT baseColExpr)
{
found = TRUE;
indx = j;
}
}
} // rename col
j++;
} // while
return found;
}
static short replaceRenamedColInHavingWithSelIndex(
BindWA * bindWA,
ItemExpr * expr,
ItemExprList &origSelectList,
NABoolean &replaced,
NABoolean ¬AllowedWithSelIndexInHaving,
RETDesc * childRETDesc)
{
if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) &&
(expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) ||
((expr->getOperatorType() >= ITM_AVG) &&
(expr->getOperatorType() <= ITM_VARIANCE)) ||
((expr->getOperatorType() >= ITM_DIFF1) &&
(expr->getOperatorType() <= ITM_NOT_THIS)))
{
notAllowedWithSelIndexInHaving = TRUE;
return 0;
}
for (Int32 i = 0; i < expr->getArity(); i++)
{
CollIndex j = 0;
if (isRenamedColInSelList(bindWA, expr->child(i), origSelectList,
j, childRETDesc))
{
SelIndex * selIndex = new(bindWA->wHeap()) SelIndex(j+1);
expr->setChild(i, selIndex);
replaced = TRUE;
}
else if (bindWA->errStatus())
return -1;
else if (replaceRenamedColInHavingWithSelIndex(
bindWA, expr->child(i), origSelectList, replaced,
notAllowedWithSelIndexInHaving, childRETDesc))
return -1;
}
return 0;
}
static short setValueIdForRenamedColsInHaving(BindWA * bindWA,
ItemExpr * expr,
ValueIdList &compExpr)
{
if (((expr->getOperatorType() >= ITM_ROW_SUBQUERY) &&
(expr->getOperatorType() <= ITM_GREATER_EQ_ANY)) ||
((expr->getOperatorType() >= ITM_AVG) &&
(expr->getOperatorType() <= ITM_VARIANCE)) ||
((expr->getOperatorType() >= ITM_DIFF1) &&
(expr->getOperatorType() <= ITM_NOT_THIS)))
{
return 0;
}
for (Int32 i = 0; i < expr->getArity(); i++)
{
if (expr->child(i)->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr();
si->setValueId(compExpr[si->getSelIndex()-1]);
}
else
setValueIdForRenamedColsInHaving(bindWA, expr->child(i), compExpr);
}
return 0;
}
// Method to update the selIndecies after we have gone through a
// selectList expansion due to MVFs or Subqueries with degree > 1
// used to update the orderByTree
//
// Returns a list of SelIndecies that were updated.
static void fixUpSelectIndecies(ItemExpr * expr, ValueIdSet &updatedIndecies,
CollIndex idx, CollIndex offset)
{
if (expr == NULL ) return;
for (Int32 i = 0; i < expr->getArity(); i++)
{
// Only update ones that we haven't already done.
if ((expr->child(i)->getOperatorType() == ITM_SEL_INDEX) &&
!updatedIndecies.contains(expr->child(i)->getValueId()))
{
SelIndex * si = (SelIndex*)expr->child(i)->castToItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
else
fixUpSelectIndecies(expr->child(i), updatedIndecies, idx, offset);
}
// Now check myself..
// Only update ones that we haven't already done.
if ((expr->getOperatorType() == ITM_SEL_INDEX) &&
!updatedIndecies.contains(expr->getValueId()))
{
SelIndex * si = (SelIndex*)expr->castToItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
}
// Method to update the selIndecies after we have gone through a
// selectList expansion due to MVFs or Subqueries with degree > 1
// used to update the GroupByList
//
// Returns a list of SelIndecies that were updated.
static void fixUpSelectIndeciesInSet(ValueIdSet & expr,
ValueIdSet &updatedIndecies,
CollIndex idx,
CollIndex offset)
{
for (ValueId vid = expr.init(); expr.next(vid); expr.advance(vid))
{
// Only update ones that we haven't already done.
if (((ItemExpr *)vid.getItemExpr())->getOperatorType() == ITM_SEL_INDEX &&
!updatedIndecies.contains(vid))
{
SelIndex * si = (SelIndex*) vid.getItemExpr();
if (si->getSelIndex() > idx)
{
si->setSelIndex(si->getSelIndex() + offset);
updatedIndecies += si->getValueId();
}
}
}
}
RelRoot * RelRoot::transformOrderByWithExpr(BindWA *bindWA)
{
NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON);
if (NOT specialMode)
return this;
ItemExprList origSelectList(bindWA->wHeap());
ItemExprList origOrderByList(bindWA->wHeap());
CollIndex origSelectListCount ;
if ((getCompExprTree() == NULL) &&
(child(0)->getOperatorType() != REL_GROUPBY))
{
return this;
}
ItemExpr *orderByTree = getOrderByTree();
if (!orderByTree)
return this;
if (orderByTree)
{
origOrderByList.insertTree(orderByTree);
}
if (getCompExprTree())
origSelectList.insertTree(getCompExprTree());
else if (child(0)->getOperatorType() == REL_GROUPBY)
{
// this is the case: select distinct <expr> from t order by <expr>
GroupByAgg * grby = (GroupByAgg *)(child(0)->castToRelExpr());
if (grby->child(0) && grby->child(0)->getOperatorType() == REL_ROOT)
{
RelRoot * selRoot = (RelRoot*)grby->child(0)->castToRelExpr();
if (selRoot->getCompExprTree())
origSelectList.insertTree(selRoot->getCompExprTree());
}
}
Lng32 selListCount = origSelectList.entries();
// if there is an expression in the order by list and this expression matches
// a select list expression, then replace it with the index of that select list item.
ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap());
NABoolean orderByExprFound = FALSE;
for (Lng32 i = 0; i < origOrderByList.entries(); i++)
{
ItemExpr * currOrderByItemExpr = origOrderByList[i];
NABoolean isDesc = FALSE;
if (currOrderByItemExpr->getOperatorType() == ITM_INVERSE)
{
currOrderByItemExpr = currOrderByItemExpr->child(0)->castToItemExpr();
isDesc = TRUE;
}
if (NOT ((currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX) ||
(currOrderByItemExpr->getOperatorType() == ITM_REFERENCE) ||
(currOrderByItemExpr->getOperatorType() == ITM_CONSTANT)))
{
NABoolean found = FALSE;
Lng32 selListIndex = 0;
ItemExpr * selItem = NULL;
while ((NOT found) && (selListIndex < selListCount))
{
selItem = origSelectList[selListIndex];
found = currOrderByItemExpr->duplicateMatch(*selItem);
if (NOT found)
selListIndex++;
}
if (NOT found)
{
*CmpCommon::diags() << DgSqlCode(-4197)
<< DgString0("ORDER BY");
bindWA->setErrStatus();
return NULL;
}
selItem->setInOrderByOrdinal(TRUE);
currOrderByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1);
if (isDesc)
{
currOrderByItemExpr = new(bindWA->wHeap()) InverseOrder(currOrderByItemExpr);
}
orderByExprFound = TRUE;
} // if order by expr
newOrderByList.insert(currOrderByItemExpr);
}
if ((orderByExprFound) &&
(newOrderByList.entries() > 0))
{
removeOrderByTree();
addOrderByTree(newOrderByList.convertToItemExpr());
}
return this;
}
///////////////////////////////////////////////////////////////////////////
//
// This methods performs the following in this order:
//
// If groupby name refers to a renamed col name in the select list,
// replace group by entry with ordinal position of that sel list entry.
//
// If groupby ordinal exceeds the number of select list elements,
// return error.
//
// If groupby ordinal referes to a '*', return error.
//
// If groupby ordinal refers to a column(ITM_REFERENCE) or a renamed
// col name(ITM_RENAME_COL) whose child is a column(ITM_REFERENCE),
// replace ordinal with actual col name.
//
// If there are ordinals in group by list, mark RelRoot indicating
// phase2 transformation is needed.
//
// Mark all select list item exprs which are referened as an ordinal to
// indicate that groupby check to validate grouping columns is not needed
// for the subtree rooted below that select list item.
//
///////////////////////////////////////////////////////////////////////////
RelRoot * RelRoot::transformGroupByWithOrdinalPhase1(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) == DF_OFF) &&
(NOT specialMode))
return this;
// make sure child of root is a groupby node.or a sequence node
// whose child is a group by node
// And has groupby clause, if in specialMode
if (child(0)->getOperatorType() != REL_GROUPBY &&
(child(0)->getOperatorType() != REL_SEQUENCE ||
(child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY)))
return this;
NABoolean compExprTreeIsNull = FALSE;
CollIndex origSelectListCount ;
if (getCompExprTree() == NULL)
{
compExprTreeIsNull = TRUE;
origSelectListCount = 0;
// return this;
}
GroupByAgg * grby;
if (child(0)->getOperatorType() == REL_GROUPBY)
{
grby = (GroupByAgg *)(child(0)->castToRelExpr());
}
else
{// sequence node above group by
grby = (GroupByAgg *)(child(0)->child(0)->castToRelExpr());
}
DCMPASSERT(grby != NULL);
if ((NOT specialMode) &&
(grby->getGroupExprTree() == NULL))
return this;
ItemExpr * groupExprTree = grby->getGroupExprTree();
ItemExprList origSelectList(bindWA->wHeap());
ItemExprList origGrbyList(bindWA->wHeap());
if (groupExprTree)
{
origGrbyList.insertTree(groupExprTree);
}
if (NOT compExprTreeIsNull)
{
origSelectList.insertTree(getCompExprTree());
origSelectListCount = origSelectList.entries();
}
ItemExprList newGroupByList((Lng32)origGrbyList.entries(), bindWA->wHeap());
NABoolean foundSelIndex = FALSE;
NABoolean lookForRenamedCols = TRUE;
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_ALL) &&
(NOT specialMode))
lookForRenamedCols = FALSE;
NABoolean lookForExprInGroupByClause = TRUE;
if (CmpCommon::getDefault(COMP_BOOL_92) == DF_ON)
lookForExprInGroupByClause = FALSE;
// See if UDF_SUBQ_IN_AGGS_AND_GBYS is enabled. It is enabled if the
// default is ON, or if the default is SYSTEM and ALLOW_UDF is ON.
NABoolean udfSubqInAggGrby_Enabled = FALSE;
DefaultToken udfSubqTok = CmpCommon::getDefault(UDF_SUBQ_IN_AGGS_AND_GBYS);
if ((udfSubqTok == DF_ON) ||
(udfSubqTok == DF_SYSTEM))
udfSubqInAggGrby_Enabled = TRUE;
// This list will store duplicate expression specified in select list and
// GroupBy clause. It helps with specifying select Index as well as
// mark InGroupByOrdinal flag correctly (Gen Sol:10-100129-7836)
NAList<CollIndex> listOfExpressions(CmpCommon::statementHeap());
for (CollIndex i = 0; (i < (CollIndex) origGrbyList.entries());i++)
{
ItemExpr * currGroupByItemExpr =
((ItemExpr *) origGrbyList[i])->castToItemExpr();
ItemExpr * newGroupByItemExpr = NULL;
NABoolean selIndexError = FALSE;
Int64 selIndex = -1;
if (currGroupByItemExpr->getOperatorType() == ITM_CONSTANT)
{
ConstValue * cv = (ConstValue*)currGroupByItemExpr;
if ((cv->canGetExactNumericValue()) &&
(cv->getType()->getScale() == 0))
{
selIndex = cv->getExactNumericValue();
if ((selIndex >= 0) && (selIndex < MAX_COMSINT32))
{
if (selIndex == 0 || selIndex > origSelectListCount)
{
// remember that this select index is in error.
// Look for this constant in the select list.
// If it is not found, then this const will be
// treated as a select index and an error will
// returned. If it is found in the select list,
// then it will be treated as a group by expression.
selIndexError = TRUE;
}
else
currGroupByItemExpr =
new(bindWA->wHeap()) SelIndex((Lng32)selIndex);
}
}
}
NABoolean found = FALSE;
if ((currGroupByItemExpr->getOperatorType() != ITM_REFERENCE) &&
(currGroupByItemExpr->getOperatorType() != ITM_SEL_INDEX) &&
(lookForExprInGroupByClause))
{
Int32 selListIndex = -1, lastMatch = -1;
CollIndex j = 0;
while ((NOT found) && (j < origSelectListCount))
{
ItemExpr * selectListEntry = origSelectList[j];
if ((selectListEntry->getOperatorType() != ITM_REFERENCE) &&
((selectListEntry->getOperatorType() != ITM_RENAME_COL) ||
((selectListEntry->child(0)) &&
(selectListEntry->child(0)->getOperatorType() != ITM_REFERENCE))))
{
ItemExpr * renameColEntry = NULL;
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
renameColEntry = selectListEntry;
selectListEntry = selectListEntry->child(0);
}
found =
currGroupByItemExpr->duplicateMatch(*selectListEntry);
if (found)
{
lastMatch = j;
if(!listOfExpressions.contains(j))
{
selListIndex = j;
listOfExpressions.insert(j);
selectListEntry->setInGroupByOrdinal(TRUE);
if (renameColEntry)
renameColEntry->setInGroupByOrdinal(TRUE);
}
else
found = FALSE;
}
}
j++;
} // while
if(lastMatch != -1)
{
found = TRUE;
if(selListIndex == -1)
selListIndex = lastMatch;
if (bindWA->inViewDefinition())
currGroupByItemExpr =
new(bindWA->wHeap()) SelIndex(selListIndex+1,
currGroupByItemExpr);
else
currGroupByItemExpr = new(bindWA->wHeap()) SelIndex(selListIndex+1);
}
} // expr in group by clause
if ((NOT found) &&
(selIndexError) &&
(selIndex > 0))
{
// this const was not found in the select list and it was
// not a valid select index.
// Return an error.
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)selIndex)
<< DgInt1((Lng32)origSelectList.entries());
bindWA->setErrStatus();
return NULL;
}
if (compExprTreeIsNull)
return this;
if (currGroupByItemExpr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)currGroupByItemExpr;
if (si->getSelIndex() > origSelectList.entries())
{
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)si->getSelIndex())
<< DgInt1((Lng32)origSelectList.entries());
bindWA->setErrStatus();
return NULL;
}
ItemExpr * selectListEntry = origSelectList[si->getSelIndex()-1];
if ((selectListEntry->getOperatorType() == ITM_RENAME_COL) &&
(selectListEntry->child(0)->getOperatorType() == ITM_REFERENCE))
{
// make a copy of this entry's child
newGroupByItemExpr =
selectListEntry->child(0)->
castToItemExpr()->copyTopNode(NULL, bindWA->wHeap());
}
else if (selectListEntry->getOperatorType() == ITM_REFERENCE)
{
if (((ColReference*)selectListEntry)-> getColRefNameObj().isStar())
{
*CmpCommon::diags() << DgSqlCode(-4185) ;
bindWA->setErrStatus();
return NULL;
}
// make a copy of this entry
newGroupByItemExpr =
selectListEntry->copyTopNode(NULL, bindWA->wHeap());
}
else
{
selectListEntry->setInGroupByOrdinal(TRUE);
newGroupByItemExpr = currGroupByItemExpr;
}
foundSelIndex = TRUE;
} // group by ordinal
else if (currGroupByItemExpr->getOperatorType() == ITM_REFERENCE)
{
ColReference * groupByColReference =
(ColReference*)currGroupByItemExpr;
// find out if this ColReference name is a renamed col in the
// select list.
if (lookForRenamedCols &&
groupByColReference->getCorrNameObj().getQualifiedNameObj().getObjectName().length() == 0)
{
NABoolean renamedColsInSelectList = FALSE;
CollIndex j = 0;
NABoolean found = FALSE;
while (j < origSelectList.entries())
{
ItemExpr * selectListEntry = origSelectList[j];
if (selectListEntry->getOperatorType() == ITM_RENAME_COL)
{
renamedColsInSelectList = TRUE;
const ColRefName &selectListColRefName =
*((RenameCol *)selectListEntry)->getNewColRefName();
if (groupByColReference->getColRefNameObj().getColName()
== selectListColRefName.getColName())
{
if (found)
{
// multiple entries with the same name. Error.
*CmpCommon::diags() << DgSqlCode(-4195)
<< DgString0(selectListColRefName.getColName());
bindWA->setErrStatus();
return NULL;
}
foundSelIndex = TRUE;
selectListEntry->setInGroupByOrdinal(TRUE);
newGroupByItemExpr =
new(bindWA->wHeap()) SelIndex(j+1);
((SelIndex *) newGroupByItemExpr)->
setRenamedColNameInGrbyClause(TRUE);
found = TRUE;
}
} // rename col
j++;
} // while
if ((NOT renamedColsInSelectList) &&
(j == origSelectList.entries()))
lookForRenamedCols = FALSE;
} // lookForRenamedCols
if (! newGroupByItemExpr)
newGroupByItemExpr = currGroupByItemExpr;
} // else foundSelIndex
else if ((currGroupByItemExpr->getOperatorType() == ITM_USER_DEF_FUNCTION) &&
(udfSubqInAggGrby_Enabled))
newGroupByItemExpr = currGroupByItemExpr;
else if ((currGroupByItemExpr->getOperatorType() == ITM_ROW_SUBQUERY) &&
(udfSubqInAggGrby_Enabled))
newGroupByItemExpr = currGroupByItemExpr;
else
{
*CmpCommon::diags() << DgSqlCode(-4197)
<< DgString0("GROUP BY");
bindWA->setErrStatus();
return NULL;
}
newGroupByList.insert(newGroupByItemExpr);
} // for
if ((foundSelIndex) &&
(newGroupByList.entries() > 0))
{
grby->removeGroupExprTree();
grby->addGroupExprTree(newGroupByList.convertToItemExpr());
}
if ((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) {
grby->setParentRootSelectList(getCompExprTree());
}
// if order by and group by are specified, check to see that
// all columns specified in the order by clause are also present
// in the group by clause.
allOrderByRefsInGby_ = FALSE;
if ((specialMode) &&
(getOrderByTree()) &&
(grby->getGroupExprTree() != NULL))
{
ItemExpr *orderByTree = getOrderByTree();
ItemExprList orderByList(orderByTree, bindWA->wHeap());
ItemExprList groupByList(grby->getGroupExprTree(), bindWA->wHeap());
allOrderByRefsInGby_ = TRUE;
for (CollIndex ii = 0; ii < orderByList.entries(); ii++)
{
ItemExpr * colRef = orderByList[ii];
if (colRef->getOperatorType() == ITM_INVERSE)
colRef = colRef->child(0)->castToItemExpr();
if (colRef && colRef->getOperatorType() == ITM_REFERENCE)
{
ColReference * obyColRef = (ColReference*)colRef;
NABoolean found = FALSE;
for (CollIndex j = 0; j < groupByList.entries(); j++)
{
ItemExpr * gbyExpr = groupByList[j];
if (gbyExpr->getOperatorType() == ITM_REFERENCE)
{
ColReference * gbyColRef = (ColReference*)gbyExpr;
if (obyColRef->getColRefNameObj().getColName() ==
gbyColRef->getColRefNameObj().getColName())
{
found = TRUE;
break;
}
} // if
} // for
if (NOT found)
{
allOrderByRefsInGby_ = FALSE;
break;
}
} // if
} // for
} // if
return this;
}
RelRoot * RelRoot::transformGroupByWithOrdinalPhase2(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
// make sure child of root is a groupby node.or a sequence node
// whose child is a group by node
if (child(0)->getOperatorType() != REL_GROUPBY &&
(child(0)->getOperatorType() != REL_SEQUENCE ||
(child(0)->child(0) && child(0)->child(0)->getOperatorType()!=REL_GROUPBY)))
return this;
GroupByAgg * grby;
RelSequence * seqNode=NULL;
if (child(0)->getOperatorType() == REL_GROUPBY )
{
grby=(GroupByAgg *)(child(0)->castToRelExpr());
}
else
{//sequence node above group by
grby=(GroupByAgg *)(child(0)->child(0)->castToRelExpr());
seqNode=(RelSequence *)(child(0)->castToRelExpr());
}
DCMPASSERT(grby != NULL);
ValueIdSet &groupExpr = grby->groupExpr();
// copy of groupExpr used to identify the changed
// value ids
ValueIdSet groupExprCpy(grby->groupExpr());
// When we encounter subqueries or MVFs in the select list
// these gets expanded at bind time, and so the select index have to
// be offset with the expansion number since the sel_index number
// reflects the select list at parse time.
for (ValueId vid = groupExpr.init();
groupExpr.next(vid);
groupExpr.advance(vid))
{
if (vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX)
{
CollIndex selIndexExpansionOffset = 0;
SelIndex * si = (SelIndex*)(vid.getItemExpr());
ValueId grpById =
compExpr()[si->getSelIndex() -1];
si->setValueId(grpById);
if (child(0)->getOperatorType() != REL_SEQUENCE)
{
groupExprCpy.remove(vid);
groupExprCpy.insert(grpById);
}
else
{ //sequence
CMPASSERT(seqNode);
const ValueIdSet seqCols = ((const RelSequence*)seqNode)->sequencedColumns();
ItemExpr * ie = grpById.getItemExpr();
ItemExpr::removeNotCoveredFromExprTree(ie,seqCols);
//ie = ie->copyTree(bindWA->wHeap());
//ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
groupExprCpy.remove(vid);
groupExprCpy.insert(ie->getValueId());
ie = new (bindWA->wHeap()) NotCovered(ie);
ie->synthTypeAndValueId();
compExpr()[si->getSelIndex()-1] = ie->getValueId();
seqNode->addSequencedColumn(ie->getValueId());
}
switch (grpById.getItemExpr()->getOperatorType())
{
case ITM_VALUEID_PROXY:
{
ValueId derivedId =
(( ValueIdProxy *)(grpById.getItemExpr()))->isDerivedFrom();
// If this is not the ValueIdProxy that represents the MVF or Subq
// skip the expansion.
if ((( ValueIdProxy *)(grpById.getItemExpr()))->
needToTransformChild() != TRUE) break;
ValueIdList outputs;
switch (derivedId.getItemExpr()->getOperatorType())
{
case ITM_USER_DEF_FUNCTION:
{
// When we reference a UDF in the groupBy clause,
// if the UDF is a MVF(has multiple outputs), we need to add
// the other elements from the MVF's outputs.
// These elements have already been expanded into the
// select list, so all we need to do is to add them to the
// groupby expression.
// By default, we associate the valueId of the MVF with
// its first output, so we just need to copy the rest of the
// outputs.
UDFunction *udf = (UDFunction *) derivedId.getItemExpr();
const RoutineDesc *rDesc = udf->getRoutineDesc();
outputs = rDesc->getOutputColumnList();
break;
}
case ITM_ROW_SUBQUERY:
{
// When we reference a subquery in the groupBy clause,
// if the subquery has a degree > 1, we need to add the other
// elements from the subquery's select list.
Subquery *subq = (Subquery *) derivedId.getItemExpr();
RelRoot *subqRoot = (RelRoot *) subq->getSubquery();
outputs = subqRoot->compExpr();
break;
}
default:
CMPASSERT(0); // we don't support anything else
}
// Add in the other outputs from the MVF/Subquery
for (CollIndex i=1; i < outputs.entries(); i++)
{
selIndexExpansionOffset ++;
groupExprCpy.insert(outputs[i]);
}
// Need to check the groupBy and orderBy lists
// for selIndexes with an index greater than this one,
// If we find one, bump its index into the select list by
// the expansion.
ValueIdSet fixedUpIndecies;
fixUpSelectIndeciesInSet(grby->groupExpr(),fixedUpIndecies,
si->getSelIndex(),
selIndexExpansionOffset);
fixUpSelectIndecies(getOrderByTree(), fixedUpIndecies,
si->getSelIndex(),
selIndexExpansionOffset);
break;
}
}
// Now that we have swapped the vid list from grouping
// expression to the corresponding one from select list
// go thru each expression, collect the base columns
// and mark each column as referenced for histogram.
// Since this is only for group by, we will get only single
// interval histograms - 10-081015-6557
ValueIdSet columns;
grpById.getItemExpr()->findAll(ITM_BASECOLUMN, columns, TRUE, TRUE);
for (ValueId id = columns.init();
columns.next(id);
columns.advance(id))
{
NAColumn *nacol = id.getNAColumn();
if (nacol->isReferencedForHistogram())
continue;
nacol->setReferencedForSingleIntHist();
}
} // found Sel Index
}
// recreate the groupExpr expression after updating the value ids
grby->setGroupExpr (groupExprCpy);
if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) &&
(grby->selPredTree()) &&
(grby->selIndexInHaving()))
{
setValueIdForRenamedColsInHaving(bindWA, grby->selPredTree(),
compExpr());
BindScope *currScope = bindWA->getCurrentScope();
ItemExpr *havingPred = grby->removeSelPredTree();
currScope->context()->inHavingClause() = TRUE;
havingPred->convertToValueIdSet(grby->selectionPred(),
bindWA, ITM_AND);
currScope->context()->inHavingClause() = FALSE;
if (bindWA->errStatus())
return this;
}
if (orderByTree_ && seqNode && grby)
{
ItemExprList origOrderByList(bindWA->wHeap());
origOrderByList.insertTree(orderByTree_);
ItemExprList newOrderByList((Lng32)origOrderByList.entries(), bindWA->wHeap());
for (CollIndex i = 0; (i < (CollIndex) origOrderByList.entries());i++)
{
ItemExpr * currOrderByItemExpr =
((ItemExpr *) origOrderByList[i])->castToItemExpr();
ItemExpr * newOrderByItemExpr = currOrderByItemExpr;
if (currOrderByItemExpr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)(currOrderByItemExpr);
if (compExpr()[si->getSelIndex()-1].getItemExpr()->getOperatorType() != ITM_BASECOLUMN)
{
newOrderByItemExpr = compExpr()[si->getSelIndex()-1].getItemExpr();
}
}
newOrderByList.insert(newOrderByItemExpr);
}
orderByTree_ = newOrderByList.convertToItemExpr();
}
return this;
}
void RelRoot::transformTDPartitionOrdinals(BindWA *bindWA)
{
if(!getHasTDFunctions())
return ;
if (getCompExprTree() == NULL)
return ;
BindScope *currScope = bindWA->getCurrentScope();
RelExpr * realChildNode = NULL;
if (child(0)->getOperatorType() == REL_FIRST_N)
{
realChildNode = child(0)->child(0);
}
else
{
realChildNode = child(0);
}
if(realChildNode->getOperatorType() != REL_SEQUENCE )
{
return;
}
RelSequence * seqNode = (RelSequence *)realChildNode;
if (!seqNode->getPartitionBy())
{
return;
}
ItemExpr * partitionBy = seqNode->getPartitionBy()->copyTree(bindWA->wHeap());
ItemExprList origSelectList(getCompExprTree(), bindWA->wHeap());
ItemExprList origPartitionByList(bindWA->wHeap());
if (partitionBy)
{
origPartitionByList.insertTree(partitionBy);
}
for (CollIndex i = 0; (i < (CollIndex) origPartitionByList.entries());i++)
{
ItemExpr * currPartitionByItemExpr =
((ItemExpr *) origPartitionByList[i])->castToItemExpr();
NABoolean selIndexError = FALSE;
Int64 selIndex = -1;
if (currPartitionByItemExpr->getOperatorType() == ITM_CONSTANT)
{
ConstValue * cv = (ConstValue*)currPartitionByItemExpr;
if ((cv->canGetExactNumericValue()) &&
(cv->getType()->getScale() == 0))
{
selIndex = cv->getExactNumericValue();
if (selIndex <= 0 || selIndex > origSelectList.entries())
{ //index in error -- produce error message
//in TD mode group by <constant> -- constant is purely positional
//selIndexError = TRUE;
*CmpCommon::diags() << DgSqlCode(-4366);
bindWA->setErrStatus();
return;
}
else
{
origPartitionByList.usedEntry( i )=
origSelectList.usedEntry((CollIndex)selIndex-1)->copyTree(bindWA->wHeap());
}
}
}
}
seqNode->setPartitionBy(origPartitionByList.convertToItemExpr());
}
// resolveAggregates -
// If aggregate functions have been found in the select list, then
// either attach the aggregate functions to the existing GroupBy below
// this RelRoot, or if there is no GroupBy create a GroupBy with an
// empty groupby list (scalar) and attach the aggregate functions to
// this GroupBy.
//
void RelRoot::resolveAggregates(BindWA *bindWA)
{
BindScope *currScope = bindWA->getCurrentScope();
if (NOT currScope->getUnresolvedAggregates().isEmpty()) {
if (getHasTDFunctions())
{ //Using rank function and aggregate functions in the same scope is not supported.
*CmpCommon::diags() << DgSqlCode(-4365);
bindWA->setErrStatus();
return;
}
RelExpr *sequence = currScope->getSequenceNode();
// The aggregates were used without a GROUP BY or HAVING
// clause, i.e. an implicit aggregation is performed
// (with a NULL result for an empty input table).
NABoolean implicitGrouping = (child(0)->getOperatorType() != REL_GROUPBY);
if(getHasOlapFunctions()) {
implicitGrouping = (sequence->child(0)->getOperatorType() != REL_GROUPBY);
}
GroupByAgg *groupByAgg = NULL;
if (implicitGrouping) {
RelExpr * realChildNode = NULL;
// if my child is a FIRST_N node, then add the GroupByAgg below it.
// Otherwise, add the GroupByAgg below me.
if (child(0)->getOperatorType() == REL_FIRST_N)
{
realChildNode = child(0)->child(0);
}
else
realChildNode = child(0);
if(getHasOlapFunctions()) {
realChildNode = sequence->child(0);
}
groupByAgg =
new (bindWA->wHeap()) GroupByAgg(realChildNode,REL_GROUPBY);
realChildNode->setBlockStmt(isinBlockStmt());
if(getHasOlapFunctions())
sequence->setChild(0, groupByAgg);
else if (child(0)->getOperatorType() == REL_FIRST_N)
child(0)->setChild(0, groupByAgg);
else
setChild(0, groupByAgg);
groupByAgg->setBlockStmt(isinBlockStmt());
}
else {
if(getHasOlapFunctions()) {
groupByAgg = (GroupByAgg *)sequence->child(0).getPtr();
} else {
groupByAgg = (GroupByAgg *)child(0).getPtr();
}
}
NAString colName(bindWA->wHeap());
Lng32 sqlCode = 0;
ValueId valId = NULL_VALUE_ID;
if (currScope->context()->unaggColRefInSelectList()) {
sqlCode = -4021;
valId = currScope->context()->unaggColRefInSelectList()->getValueId();
}
else if (implicitGrouping) {
// Genesis 10-000414-9410: "SELECT SUM(A),* FROM T; --no GROUP BY"
// cannot be flagged with err 4012 in ColReference::bindNode
// because table not marked "grouped" yet.
//
const ColumnDescList &cols = *currScope->getRETDesc()->getColumnList();
CollIndex i, n = cols.entries();
for (i=0; i<n; i++) {
const ColumnDesc *col = cols[i];
if (!col->isGrouped())
if (col->getColRefNameObj().isStar() ||
col->getValueId().getNAColumn(TRUE/*okIfNotColumn*/)) {
sqlCode = -4012;
valId = col->getValueId();
colName = col->getColRefNameObj().getColRefAsAnsiString();
break;
}
}
}
// Table has no GROUP BY (so no grouping columns exist at all)
// but is grouped by dint of a column reference within an aggregate,
// making any unaggregated column references illegal, by ANSI 7.9 SR 7.
if (sqlCode) {
if (colName.isNull()) {
const NAColumn *nacol = valId.getNAColumn(TRUE/*okIfNotColumn*/);
if (nacol)
colName = nacol->getFullColRefNameAsAnsiString();
else
colName = "_unnamed_column_";
}
// 4012 Col ref must be grouping or aggregated -- no star ref allowed!
// 4021 The select list contains a non-grouping non-aggregated column.
*CmpCommon::diags() << DgSqlCode(sqlCode) << DgColumnName(colName);
bindWA->setErrStatus();
return;
}
// Move the unresolved aggregates into the groupby node and bind
// (simply returns if "groupByAgg" isn't new).
groupByAgg->aggregateExpr() += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
groupByAgg->bindNode(bindWA);
}
}
// resolveSequenceFunctions -
// Add the unresolvedSequenceFunctions to the Sequence node for this
// scope. If there are sequence functions, but no sequence node, it
// is an error. Also if there is a sequence node, but no sequence
// functions, it is an error.
//
//
void RelRoot::resolveSequenceFunctions(BindWA *bindWA)
{
BindScope *currScope = bindWA->getCurrentScope();
// If we have a Sequence Node associated with the RelRoot node,
//
RelSequence *sequenceNode = (RelSequence *)currScope->getSequenceNode();
currScope->getSequenceNode() = NULL;
if (sequenceNode) {
if (getHasTDFunctions() && sequenceNode->child(0)->getOperatorType() == REL_GROUPBY)
{ //Using rank function and group by clause in the same scope is not supported.
*CmpCommon::diags() << DgSqlCode(-4366);
bindWA->setErrStatus();
return;
}
CMPASSERT(sequenceNode->getOperatorType() == REL_SEQUENCE);
// Do not allow sequence functions or OLAP Window functions
// with Embedded Updates.
//
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4202)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE"):DgString0("DELETE"));
bindWA->setErrStatus();
return;
}
// If there are some sequence functions that have not been attached
// to the Sequence node, do so now. These were found when binding
// the select list.
//
sequenceNode->
addUnResolvedSeqFunctions(currScope->getUnresolvedSequenceFunctions(),
bindWA);
currScope->getUnresolvedSequenceFunctions().clear();
currScope->getAllSequenceFunctions().clear();
if (bindWA->errStatus()) return;
// Make sure the sequence function has some work to do.
// The cast is needed since the compiler will attempt to pick the
// protected (writable) version of 'sequenceFunctions()'. (Is this
// a compiler bug)
//
if ((((const RelSequence *)sequenceNode)->sequenceFunctions().isEmpty() )
&&
( !getHasOlapFunctions() &&
((const RelSequence *)sequenceNode)->requiredOrder().entries() != 0 )) {
// Can't have a sequence by clause without
// sequence functions.
//
*CmpCommon::diags() << DgSqlCode(-4111);
bindWA->setErrStatus();
return;
}
} else if (! currScope->getUnresolvedSequenceFunctions().isEmpty()) {
// Can't have sequence functions without a
// sequence by clause.
// First, loop through the list of functions.
//
ValueIdSet &unresolved = currScope->getUnresolvedSequenceFunctions();
NAString unparsed(bindWA->wHeap());
for (ValueId vid = unresolved.init(); unresolved.next(vid); unresolved.advance(vid)) {
ItemExpr *ie = vid.getItemExpr();
CMPASSERT(ie->isASequenceFunction());
unparsed += ", ";
ie->unparse(unparsed, DEFAULT_PHASE, USER_FORMAT_DELUXE);
}
unparsed.remove(0,2); // remove initial ", "
*CmpCommon::diags() << DgSqlCode(-4110) << DgString0(unparsed);
bindWA->setErrStatus();
return;
}
}
// if a where pred is specified on an immediate child scan or rename node,
// and it contains an 'and'ed rownum() predicate of the form:
// rownum < val, or rownum <= val, or rownum = val
// then get the val and make it the firstN value.
// Also, remove this predicate from selPredTree.
void RelRoot::processRownum(BindWA * bindWA)
{
NABoolean specialMode = (CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON);
if (NOT specialMode)
return;
if (! child(0))
return;
if ((child(0)->getOperatorType() != REL_SCAN) &&
(child(0)->getOperatorType() != REL_RENAME_TABLE))
return;
if (! child(0)->selPredTree())
return;
ItemExpr * wherePred = child(0)->selPredTree();
ItemExprList iel(wherePred, bindWA->wHeap(), ITM_AND, FALSE, FALSE);
NABoolean found = FALSE;
for (Lng32 i = 0; ((NOT found) && (i < iel.entries())); i++)
{
ItemExpr * ie = iel[i];
if (ie->getArity() != 2)
continue;
if (NOT ((ie->getOperatorType() == ITM_LESS) ||
(ie->getOperatorType() == ITM_EQUAL) ||
(ie->getOperatorType() == ITM_LESS_EQ)))
continue;
ItemExpr * child0 = ie->child(0)->castToItemExpr();
ItemExpr * child1 = ie->child(1)->castToItemExpr();
if (NOT ((child0->getOperatorType() == ITM_REFERENCE) &&
(child1->getOperatorType() == ITM_CONSTANT)))
continue;
ColReference * col = (ColReference*)child0;
ColRefName &colRefName = col->getColRefNameObj();
CorrName &cn = col->getCorrNameObj();
const NAString &catName = cn.getQualifiedNameObj().getCatalogName();
const NAString &schName = cn.getQualifiedNameObj().getSchemaName();
const NAString &objName = cn.getQualifiedNameObj().getObjectName();
const NAString &colName = colRefName.getColName();
if (NOT ((catName.isNull()) &&
(schName.isNull()) &&
(objName.isNull()) &&
(colName == "ROWNUM")))
continue;
ConstValue * cv = (ConstValue*)child1;
if (NOT cv->canGetExactNumericValue())
continue;
Int64 val = cv->getExactNumericValue();
if (val < 0)
continue;
if ((ie->getOperatorType() == ITM_EQUAL) &&
(val != 1))
continue;
if ((ie->getOperatorType() == ITM_LESS) &&
(val > 0))
val--;
setFirstNRows(val);
// remove this pred from the list
iel.removeAt(i);
found = TRUE;
}
if (found)
{
// convert the list back to selection pred.
ItemExpr * ie = iel.convertToItemExpr();
child(0)->removeSelPredTree();
child(0)->addSelPredTree(ie);
}
return;
}
RelExpr *RelRoot::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (isTrueRoot())
{
// if this is simple scalar aggregate on a seabase table
// (of the form: select count(*), sum(a) from t; )
// then transform it so it could be evaluated using hbase co-processor.
if ((CmpCommon::getDefault(HBASE_COPROCESSORS) == DF_ON) &&
(child(0) && child(0)->getOperatorType() == REL_SCAN))
{
Scan * scan = (Scan*)child(0)->castToRelExpr();
if ((getCompExprTree()) &&
(NOT hasOrderBy()) &&
(! getSelPredTree()) &&
(! scan->getSelPredTree()) &&
(scan->selectionPred().isEmpty()) &&
((scan->getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) ||
(scan->getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) &&
!scan->getTableName().isPartitionNameSpecified() &&
!scan->getTableName().isPartitionRangeSpecified() &&
(NOT bindWA->inViewDefinition()))
{
ItemExprList selList(bindWA->wHeap());
selList.insertTree(getCompExprTree());
// for now, only count(*) can be co-proc'd
if ((selList.entries() == 1) &&
(selList[0]->getOperatorType() == ITM_COUNT) &&
(selList[0]->origOpType() == ITM_COUNT_STAR__ORIGINALLY))
{
NATable *naTable = bindWA->getNATable(scan->getTableName());
if (bindWA->errStatus())
return this;
if (((naTable->getObjectType() == COM_BASE_TABLE_OBJECT) ||
(naTable->getObjectType() == COM_INDEX_OBJECT)) &&
((naTable->isSeabaseTable()) ||
((naTable->isHiveTable()) &&
(naTable->getClusteringIndex()->getHHDFSTableStats()->isOrcFile()))))
{
Aggregate * agg =
new(bindWA->wHeap()) Aggregate(ITM_COUNT,
new (bindWA->wHeap()) SystemLiteral(1),
FALSE /*i.e. not distinct*/,
ITM_COUNT_STAR__ORIGINALLY,
'!');
agg->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
ValueIdSet aggrSet;
aggrSet.insert(agg->getValueId());
ExeUtilExpr * eue = NULL;
if (naTable->isSeabaseTable())
eue =
new(CmpCommon::statementHeap())
ExeUtilHbaseCoProcAggr(scan->getTableName(),
aggrSet);
else
eue =
new(CmpCommon::statementHeap())
ExeUtilOrcFastAggr(scan->getTableName(),
aggrSet);
eue->bindNode(bindWA);
if (bindWA->errStatus())
{
return this;
}
setChild(0, eue);
removeCompExprTree();
addCompExprTree(agg);
} // if seabaseTable
} // count aggr
}
} // coproc on
if (child(0) &&
((child(0)->getOperatorType() == REL_INSERT) ||
(child(0)->getOperatorType() == REL_UNARY_INSERT) ||
(child(0)->getOperatorType() == REL_LEAF_INSERT)))
{
Insert * ins = (Insert*)child(0)->castToRelExpr();
if (ins->isNoRollback())
{
if ((CmpCommon::getDefault(AQR_WNR)
!= DF_OFF) &&
(CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP)
!= DF_OFF))
ins->enableAqrWnrEmpty() = TRUE;
}
if (CmpCommon::transMode()->anyNoRollback())
{
// tbd - may need to integrate these two.
if ((CmpCommon::getDefault(AQR_WNR)
!= DF_OFF) &&
(CmpCommon::getDefault(AQR_WNR_INSERT_CLEANUP)
!= DF_OFF))
ins->enableAqrWnrEmpty() = TRUE;
}
}
// if lob is being extracted as chunks of string, then only one
// such expression could be specified in the select list.
// If this is the case, then insert ExeUtilLobExtract operator.
// This operator reads lob contents and returns them to caller as
// multiple rows.
// This lobextract function could only be used in the outermost select
// list and must be converted at this point.
// It is not evaluated on its own.
if (getCompExprTree())
{
ItemExprList selList(bindWA->wHeap());
selList.insertTree(getCompExprTree());
if ((selList.entries() == 1) &&
(selList[0]->getOperatorType() == ITM_LOBEXTRACT))
{
LOBextract * lef = (LOBextract*)selList[0];
ExeUtilLobExtract * le =
new (PARSERHEAP()) ExeUtilLobExtract
(lef, ExeUtilLobExtract::TO_STRING_,
NULL, NULL, lef->getTgtSize(), 0,
NULL, NULL, NULL, child(0), PARSERHEAP());
le->setHandleInStringFormat(FALSE);
setChild(0, le);
}
}
processRownum(bindWA);
} // isTrueRoot
if (getHasTDFunctions())
{
transformTDPartitionOrdinals(bindWA);
if (bindWA->errStatus()) return NULL;
}
RelRoot * returnedRoot =
transformGroupByWithOrdinalPhase1(bindWA);
if (! returnedRoot)
return NULL;
returnedRoot =
transformOrderByWithExpr(bindWA);
if (! returnedRoot)
return NULL;
if (bindWA->getCurrentScope()->context()->inTableCheckConstraint()) {
// See ANSI 11.9 Leveling Rule 1a (Intermediate Sql).
// 4089 A check constraint cannot contain a subquery.
*CmpCommon::diags() << DgSqlCode(-4089)
<< DgConstraintName(
bindWA->getCurrentScope()->context()->inCheckConstraint()->
getConstraintName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
if (isTrueRoot())
bindWA->setTopRoot(this);
bindWA->setBindTrueRoot(isTrueRoot());
if (!bindWA->getAssignmentStArea()) {
bindWA->getAssignmentStArea() =
new (bindWA->wHeap()) AssignmentStArea(bindWA);
bindWA->getAssignmentStArea()->getAssignmentStHostVars() =
new (bindWA->wHeap()) AssignmentStHostVars(bindWA);
}
// If there are one or more output rowset variables, then we introduce
// a RowsetInto node below this Root node. The RowsetInto node will
// create a Pack node later on when it is binded, so that we can
// insert values into the rowset output variables.
// We don't do this transformation if we are inside a compound statement.
//
if (isTrueRoot() && assignmentStTree()) {
ItemExpr *outputVar = getOutputVarTree();
if (outputVar) {
CMPASSERT(outputVar->getChild(0)->getOperatorType() == ITM_HOSTVAR);
HostVar *hostVar = (HostVar *) outputVar->getChild(0);
if (hostVar->getType()->getTypeQualifier() == NA_ROWSET_TYPE) {
ItemExpr *outputVar = removeOutputVarTree();
assignmentStTree() = NULL;
// Get the output size expression. It may be a constant or a variable.
ItemExpr * sizeExpr = getHostArraysArea()->outputSize();
// set the SelectIntoRowsets flag
getHostArraysArea()->setHasSelectIntoRowsets(TRUE);
// Create INTO node. Its child is the current root
RelExpr *intoNode =
new (bindWA->wHeap()) RowsetInto(this, outputVar, sizeExpr);
//If case of first N with ORDER BY generator introduces the FIRST N
//operator. For rowsets FIRST N node need to be introduced below the
//PACK node and not below the top root. So set first N rows for INTO
//node and not the top root.
if (hasOrderBy()) {
intoNode->setFirstNRows(getFirstNRows());
setFirstNRows(-1);
}
// Create a new root node that will go above the RowsetInto node
setRootFlag(FALSE);
RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(intoNode);
newRoot->setRootFlag(TRUE);
// copy the display flag from this true Root to the new root.
// newRoot->setDisplayTree(getDisplayTree());
newRoot->setDisplayTree(TRUE);
newRoot->addInputVarTree(removeInputVarTree());
newRoot->outputVarCnt() = outputVarCnt();
NABoolean defaultSortedRows = newRoot->needFirstSortedRows();
//Int64 defaultFirstNRows = newRoot->getFirstNRows();
newRoot->needFirstSortedRows() = needFirstSortedRows();
//newRoot->setFirstNRows(getFirstNRows());
needFirstSortedRows() = defaultSortedRows;
// setFirstNRows(defaultFirstNRows);
newRoot->rollbackOnError() = rollbackOnError();
// migrate hostArraysArea to newroot, and tell bindWA about it
newRoot->setHostArraysArea(getHostArraysArea());
bindWA->setHostArraysArea(getHostArraysArea());
setSubRoot(FALSE); // old root is no longer the root
newRoot->setSubRoot(TRUE); // newRoot becomes the root
return newRoot->bindNode(bindWA);
}
}
}
if (assignmentStTree() && child(0)->getOperatorType() != REL_ROWSET_INTO) {
AssignmentStHostVars *ptr =
new (bindWA->wHeap()) AssignmentStHostVars(bindWA);
if (ptr->containsRowsets(assignmentStTree())) {
ItemExpr *outputSizeExpr = NULL;
// The user may have used the ROWSET FOR OUTPUT SIZE construct
// set the SelectIntoRowsets flag.
if (getHostArraysArea()) {
outputSizeExpr = getHostArraysArea()->outputSize();
getHostArraysArea()->setHasSelectIntoRowsets(TRUE);
}
// Create RowsetInto node. Its child is the current root
RelExpr *intoNode = new (bindWA->wHeap())
RowsetInto(this, assignmentStTree(), outputSizeExpr);
//If case of first N with ORDER BY generator introduces the FIRST N
//operator. For rowsets FIRST N node need to be introduced below the
//PACK node and not below the top root. So set first N rows for INTO
//node and not the top root.
if (hasOrderBy()) {
intoNode->setFirstNRows(getFirstNRows());
setFirstNRows(-1);
}
RelRoot *newRoot = new (bindWA->wHeap()) RelRoot(*this);
newRoot->child(0) = intoNode;
newRoot->removeCompExprTree();
setRootFlag(FALSE);
removeInputVarTree();
assignmentStTree() = NULL;
return newRoot->bindNode(bindWA);
}
}
// Create a new scope.
//
if (!isDontOpenNewScope()) // -- Triggers.
{
bindWA->initNewScope();
// MV --
if(TRUE == hasMvBindContext())
{
// Copy the MvBindContext object from the RelRoot node to the
// current BindContext.
bindWA->markScopeWithMvBindContext(getMvBindContext());
}
if (getInliningInfo().isTriggerRoot())
{
CMPASSERT(getInliningInfo().getTriggerObject() != NULL);
bindWA->getCurrentScope()->context()->triggerObj() =
getInliningInfo().getTriggerObject()->getCreateTriggerNode();
}
if (getInliningInfo().isActionOfRI())
bindWA->getCurrentScope()->context()->inRIConstraint() = TRUE;
}
// Save whether the user specified SQL/MP-style access options in the query
// (this is always true for the LOCK stmt, which we must maximize).
//
if (child(0)->getOperatorType() == REL_LOCK) {
accessOptions().updateAccessOptions(
TransMode::ILtoAT(TransMode::READ_COMMITTED_),
((RelLock *)child(0).getPtr())->getLockMode());
accessOptions().updateAccessOptions(
TransMode::ILtoAT(CmpCommon::transMode()->getIsolationLevel()));
}
// QSTUFF: the updateOrDelete flag is set to ensure that scans done as
// part of a generic update cause an exclusive lock to be set to ensure
// a consistent completion of the following update or delete.
if (containsUpdateOrDelete(this))
{
accessOptions().setUpdateOrDelete(TRUE);
}
else if (isTrueRoot())
{
// if the query does not contain any Generic Update nodes, mark it
// as read only query. In that case, we have freedom not to include
// some indexes in the indexes list.
bindWA->setReadOnlyQuery();
}
// This block of code used to be in RelRoot::propagateAccessOptions() which
// used to be called from here. We've since replaced this old 'push' call
// with the 'pull' of BindWA->findUserSpecifiedAccessOption() calls from
// RelRoot, Scan, and GenericUpdate.
// QSTUFF
// We decided to stick with READ COMMITTED as the default access
// (even for streams). However, if we change our mind again, this is
// the place to do it.
// if (getGroupAttr()->isStream() &&
// (accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_))
// accessOptions().accessType() = SKIP_CONFLICT_;
// Set the flag to indicate to DP2 that this executes an
// embedded update or delete.
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
accessOptions().setUpdateOrDelete(TRUE);
// QSTUFF
if (accessOptions().userSpecified())
bindWA->getCurrentScope()->context()->setStmtLevelAccessOptions(accessOptions());
if (isSubRoot() && getHostArraysArea())
getHostArraysArea()->setRoot(this);
if (isTrueRoot()) {
// If this were false, then SynthType's ValueDesc::create()
// would use a DIFFERENT SchemaDB than BindItemExpr's createValueDesc()
// -- wrong! Assert this only once per query.
CMPASSERT(ActiveSchemaDB() == bindWA->getSchemaDB());
// set the upDateCurrentOf_ attribute for the root if possible
if (child(0)->getOperatorType() == REL_UNARY_UPDATE ||
child(0)->getOperatorType() == REL_UNARY_DELETE) {
GenericUpdate *gu = (GenericUpdate *)child(0)->castToRelExpr();
if (gu->updateCurrentOf()) {
updateCurrentOf() = gu->updateCurrentOf();
currOfCursorName() = gu->currOfCursorName();
}
}
// If we are processing a rowset,
// then the child operator is a REL_TSJ.
// If this is the case, and the operation is
// an update or delete, we need to search
// further to deterine its correct child
// operator type.
// Otherwise, the child operator type is correct.
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->hasHostArraysInWhereClause() &&
bindWA->getHostArraysArea()->hasInputRowsetsInSelectPredicate() == HostArraysWA::NO_ &&
NOT bindWA->getHostArraysArea()->hasHostArraysInTuple())
// ensure that we don't flag rowset selects or insert selects with rowsets in the predicate
{
if (bindWA->getHostArraysArea()->hasHostArraysInSetClause()) // includes rowset merge statements too
childOperType() = REL_UNARY_UPDATE;
else
childOperType() = REL_UNARY_DELETE;
}
else
childOperType() = child(0)->getOperator();
// see if we can potentially optimize the buffer sizes for
// oltp queries. Done for update/delete/insert-values/select-unique.
// if scan, olt opt is possible.
if (childOperType() == REL_SCAN)
oltOptInfo().setOltOpt(TRUE);
/*
// For Denali release 1, compound statements are restricted
// to yield at most one row; so olt opt is possible for CS.
// If a compound statement is not pushed down to DP2, then
// OLT optimization will be turned off in generator.
//
// Turn it off for Compound statement as insertion with tuple list
// is possible in a CS.
*/
else if (childOperType() == REL_COMPOUND_STMT)
oltOptInfo().setOltOpt(TRUE);
// if INSERT...VALUES, olt opt is possible.
else if ((childOperType() == REL_UNARY_INSERT) &&
(NOT child(0)->child(0) ||
child(0)->child(0)->getOperatorType() == REL_TUPLE))
oltOptInfo().setOltOpt(TRUE);
} // isTrueRoot
else if (checkFirstNRowsNotAllowed(bindWA)) {
*CmpCommon::diags() << DgSqlCode(-4102);
bindWA->setErrStatus();
return NULL;
}
BindScope *currScope = bindWA->getCurrentScope();
// -- MVs
// Check for the Refresh node before binding, because after binding it
// will be gone.
if (child(0)->getOperatorType() == REL_REFRESH)
setRootOfInternalRefresh();
// set the currect host area in bindWA for non-root stmt.
// fix 10-031106-4430 (RG: mxcmp failed to compile INSERT
// statement with rowsets within IF statement)
HostArraysWA *tempWA = NULL;
if ( NOT isTrueRoot() && getHostArraysArea() )
{
tempWA = bindWA->getHostArraysArea();
bindWA->setHostArraysArea(getHostArraysArea());
}
bindWA->setBindTrueRoot(FALSE);
// Bind the children here to determine if we need to rollback on error
// for embedded update/delete's.
//
bindChildren(bindWA);
if ( tempWA )
{
// Restore previous environment
bindWA->setHostArraysArea(tempWA);
}
if (bindWA->errStatus()) return NULL;
// For SPJ, store the spOutParams_ from the bindWA in RelRoot,
// We need it at codegen
if ( bindWA->getSpOutParams ().entries ())
spOutParams_ = &( bindWA->getSpOutParams ());
if (isTrueRoot()) {
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) {
// Olt optimization is now supported for embedded updates/deletes (pub/sub
// thingy) for now.
oltOptInfo().setOltOpt(TRUE);
if (getFirstNRows() != -1) {
// [FIRST/ANY n] syntax cannot be used with an embedded update or embedded delete.
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return NULL;
}
}
// If updateCurrentOf_ not set yet
// Check the tree for a GenericUpdate RelExpr (anywhere in the tree)
// so we can set the root node accordingly.
GenericUpdate *gu = getGenericUpdate(this);
if (!updateCurrentOf() && gu && gu->updateCurrentOf()) {
updateCurrentOf() = gu->updateCurrentOf();
currOfCursorName() = gu->currOfCursorName();
}
// if standalone update/delete(no update where current of),
// olt opt is possible.
if (((childOperType() == REL_UNARY_UPDATE) ||
(childOperType() == REL_UNARY_DELETE)) &&
(NOT updateCurrentOf()))
oltOptInfo().setOltOpt(TRUE);
// If transaction statement (begin/commit/rollback/set xn,
// olt opt is possible.
if (childOperType() == REL_TRANSACTION)
oltOptInfo().setOltOpt(TRUE);
// Set indication whether transaction need to be aborted on error
// during an IUD query.
// Rollback will be done for a query that contains
// rowsets, or an insert which is
// not an 'insert...values' with a single value.
//
// There are more cases when a transaction will be rolled back on
// an IUD error. These are set in GenericUpdate::preCodeGen,
// and DP2(IUD)::preCodeGen.
// These include embedded update or delete, stream access, non-unique
// update or delete... See ::preCodeGen methods for details.
rollbackOnError() = FALSE;
if (childOperType().match(REL_ANY_GEN_UPDATE))
{
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->done()) // rowsets
rollbackOnError() = TRUE;
else if ((childOperType() == REL_UNARY_INSERT) &&
(child(0)->child(0) &&
child(0)->child(0)->getOperatorType() != REL_TUPLE))
rollbackOnError() = TRUE;
}
if (bindWA->getHostArraysArea() &&
bindWA->getHostArraysArea()->getTolerateNonFatalError())
{
setTolerateNonFatalError(RelExpr::NOT_ATOMIC_);
}
}
CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check
// do not do olt qry optimization, if rowsets are present.
if (bindWA->getHostArraysArea() && bindWA->getHostArraysArea()->done())
{
oltOptInfo().setOltOpt(FALSE);
if (bindWA->getHostArraysArea()->getTolerateNonFatalError()) {
// we also cannot do dp2 level olt optimization if this is a non-atomic rowset insert
oltOptInfo().setOltEidOpt(FALSE);
}
else {
// but can do dp2 level olt optimization if this is "regular" rowset insert
oltOptInfo().setOltEidOpt(TRUE);
}
}
// If unresolved aggregate functions have been found in the children of the
// root node, that would mean that we are referencing aggregates before
// the groupby operation is performed
if (checkUnresolvedAggregates(bindWA)) return this;
// A RelRoot does not have a select list for SQL update, delete, insert
// statements as well as when the query contains an SQL union. If a
// select list is absent, assign the select list of its child to it.
// This will propagate the selection lists of the children of the
// union up to the root.
//
// Detach the item expression tree for the select list and bind it.
//
ItemExpr *compExprTree = removeCompExprTree();
if (NOT compExprTree) {
// -- for RI and Triggers
if (isEmptySelectList())
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
else {
setRETDesc(child(0)->getRETDesc());
getRETDesc()->getValueIdList(compExpr());
}
}
else {
CMPASSERT(!currScope->context()->inSelectList());
currScope->context()->inSelectList() = TRUE;
// QSTUFF
// in case we are binding an embedded generic update within a view
// we have to rename column references using OLD or NEW as
// table names since we adopted the RETDesc/TableDesc of the
// scan node or the view scan node, i.e. the RenameTable node
// at the root of an expanded view.
if (bindWA->renameToScanTable()){
ColReference * cr = NULL;
ItemExpr * itm = compExprTree;
NABoolean done = FALSE;
const CorrName corr =
(getViewScanNode()->getOperatorType() == REL_RENAME_TABLE) ?
((RenameTable *)getViewScanNode())->getTableName() :
((Scan *)getViewScanNode())->getTableDesc()->getCorrNameObj();
while (NOT done){
if (itm->getOperatorType() == ITM_ITEM_LIST){
cr = (ColReference *) itm->getChild(0);
itm = itm->getChild(1)->castToItemExpr();
}
else {
cr = (ColReference *) itm;
done = TRUE;
}
cr->getCorrNameObj().getQualifiedNameObj().
setObjectName(corr.getQualifiedNameObj().getObjectName());
}
}
// QSTUFF
RelRoot *viewQueryRoot = NULL;
StmtDDLCreateView *pCreateView = NULL;
if (bindWA->inViewDefinition()) {
pCreateView = bindWA->getCreateViewParseNode();
if (pCreateView->getQueryExpression() == this) {
viewQueryRoot = this;
CMPASSERT(isTrueRoot());
pCreateView->setCurViewColNum((CollIndex)0);
}
}
// charset inference
compExprTree->setResolveIncompleteTypeStatus(TRUE);
HostArraysWA * arrayWA = bindWA->getHostArraysArea() ;
if (arrayWA && arrayWA->hasHostArraysInTuple()) {
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
// If this query (scope) contains OLAP Window functions, then add
// a Sequence Operator just below the Root node. Also, if aggregates
// exist, resolve them now.
//
setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), this, isTrueRoot()));
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
}
else {
setRETDesc(bindRowValues(bindWA, compExprTree, compExpr(), viewQueryRoot, isTrueRoot()));
}
if (bindWA->errStatus()) return NULL;
if (viewQueryRoot) pCreateView->resetCurViewColNum();
currScope->context()->inSelectList() = FALSE;
}
// MVs --
if (bindWA->isPropagateOpAndSyskeyColumns() &&
child(0)->getOperatorType()!=REL_GROUPBY &&
child(0)->getOperatorType()!=REL_AGGREGATE &&
currScope->getUnresolvedAggregates().isEmpty() &&
!isEmptySelectList() &&
!isTrueRoot())
getRETDesc()->propagateOpAndSyskeyColumns(bindWA, TRUE);
CMPASSERT(currScope == bindWA->getCurrentScope()); // sanity check
currScope->setRETDesc(getRETDesc());
bindWA->setRenameToScanTable(FALSE); // QSTUFF
// Genesis 10-980106-2038 + 10-990202-1098.
//
if (isTrueRoot()) {
castComputedColumnsToAnsiTypes(bindWA, getRETDesc(), compExpr());
if (bindWA->errStatus()) return NULL;
}
// Genesis 10-970822-2581. See finalize() in SqlParser.y.
//
// If we are in a compound statement (an IF's UNION), do not issue an error.
//
// Added condition for CALL StoredProcedures
// If we invoke a CALL statement, the #out params do not match the
// # columns, we make that check in the CallSP::bindNode, so ignore it
// for now.
if (isTrueRoot() &&
(child(0)->getOperatorType() != REL_CALLSP &&
(child(0)->getOperatorType() != REL_COMPOUND_STMT &&
(child(0)->getOperatorType() != REL_TUPLE &&
(Int32)getRETDesc()->getDegree() != 0))) &&
(child(0)->getOperatorType() != REL_UNION ||
(!((Union *) (RelExpr *) child(0))->getUnionForIF())) &&
outputVarCntValid() &&
outputVarCnt() != (Int32)getRETDesc()->getDegree() &&
(outputVarCnt() ||
CmpCommon::context()->GetMode() != STMT_DYNAMIC)) {
// 4093 The number of output parameters ($0) must equal the number of cols
// 4094 The number of output host vars ($0) must equal the number of cols
Lng32 sqlcode = (CmpCommon::context()->GetMode() == STMT_DYNAMIC) ?
-4093 : -4094;
*CmpCommon::diags() << DgSqlCode(sqlcode)
#pragma nowarn(1506) // warning elimination
<< DgInt0(outputVarCnt()) << DgInt1(getRETDesc()->getDegree());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ItemExpr *inputVarTree = removeInputVarTree();
if (inputVarTree) {
inputVarTree->convertToValueIdList(inputVars(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
// If DYNAMIC SQL compilation, then
// remove from the input var list (list of HostVars and DynamicParams)
// any env vars that were found to have a equivalence value which is
// valid (parseable) for the context it appears in
// (i.e., we've already bound the env var name's dynamic value,
// so we no longer need the env var name at all).
// Right now, this means that in sqlci you can say
// set envvar xyz cat.sch.tbl;
// select * from $xyz;
//
if (CmpCommon::context()->GetMode() == STMT_DYNAMIC) {
for (CollIndex i = inputVars().entries(); i--; ) {
HostVar *hostVar = (HostVar *)inputVars()[i].getItemExpr();
if (hostVar->getOperatorType() == ITM_HOSTVAR &&
hostVar->isPrototypeValid() &&
(hostVar->isEnvVar() ||
hostVar->isDefine()))
inputVars().removeAt(i);
}
} // STMT_DYNAMIC
} // inputVarTree
// add to the inputVars, any user functions that are to be treated
// like input values, that is, evaluated once and used therafter.
// Do not insert duplicate value ids.
for (CollIndex i = 0; i < bindWA->inputFunction().entries(); i++ ) {
if (NOT inputVars().contains(bindWA->inputFunction()[i]))
inputVars().insert(bindWA->inputFunction()[i]);
}
// If aggregate functions have been found in the select list, then
// create a groupby node with an empty groupby list, if the child is not
// already a groupby node.
//
resolveAggregates(bindWA);
if (bindWA->errStatus()) return NULL;
// Add the unresolvedSequenceFunctions to the Sequence node for this
// scope. If there are sequence functions, but no sequence node, it
// is an error. Also if there is a sequence node, but no sequence
// functions, it is an error.
// If OLAP Window functions exist for this scope, they will have been
// translated into sequence functions by this point and so will be added
// to the Sequence node here.
//
resolveSequenceFunctions(bindWA);
if (bindWA->errStatus()) return NULL;
BindScope *prevScope = bindWA->getPreviousScope(currScope);
NABoolean inRowSubquery = FALSE;
if (prevScope)
inRowSubquery = prevScope->context()->inRowSubquery();
if (inRowSubquery && (CmpCommon::getDefault(COMP_BOOL_137) == DF_OFF))
addOneRowAggregates(bindWA);
returnedRoot =
transformGroupByWithOrdinalPhase2(bindWA);
if (! returnedRoot)
return NULL;
// ItemExpr *orderByTree = removeOrderByTree();
ItemExpr *orderByTree = removeOrderByTree();
if (orderByTree) {
//
// Tandem extension to ANSI (done only if source table is not grouped!):
// Allow the ORDER BY clause to reference columns in the source table even
// if the columns are not referenced in the select list. Treat the extra
// columns as *system* columns so that they can be referenced by name
// (ORDER BY name) but not by position in select list (ORDER BY n).
// Thus, select-list columns have precedence, as they should since ANSI
// allows only them in ORDER BY to begin with!
//
// Add all source columns to system column list of temporary orderBy;
// remove select-list columns from this system column list;
// insert select-list columns into the *user* column list
// (these must be in separate loops to set up the orderBy XCNM correctly!).
// Then bind the temporary (convert to ValueId list), reset the RETDesc.
//
bindWA->getCurrentScope()->context()->inOrderBy() = TRUE;
CollIndex i;
RETDesc orderBy;
const RETDesc &select = *getRETDesc();
const RETDesc &source = *child(0)->getRETDesc();
// if the source is grouped, then the ORDER BY columns must be in
// the select list. So, don't add any other columns that aren't
// in the select list...
if (source.isGrouped()) {
orderBy.setGroupedFlag();
//10-031125-1549 -begin
//Since we are processing a groupby we should
//certainly have some node below it. Futher if
//that node is a REL_ROOT we will certainly have
//a child. So this rather unusual call sequence
//is safe. We are actually looking for a Pattern
//like REL_GROUPBY(REL_ROOT(*)) introduced to handle
//Distint qualifier.
//for example if we have a query like
//select distinct j as jcol from t1 order by j;
//the tree will look like
//REL_ROOT(REL_GROUPBY(REL_ROOT(REL_SCAN(t1))))
//In this is a NON-ANSI query. To support queries like this
//we need to expose "J" as a system column. To do that we need
//to get hold of the RetDesc of the node below the REL_ROOT
//(not the actual REL_ROOT).
RETDesc *src = NULL;
if(child(0)->child(0)&&
child(0)->child(0)->getOperatorType() == REL_ROOT)
{
src = child(0)->child(0)->child(0)->getRETDesc();
}
else
{
src = child(0)->getRETDesc();
}
const ColumnDescList &sysColList = *src->getSystemColumnList();
const ColumnDescList &usrColList = *src->getColumnList();
ValueId vid;
for(i = 0; i < select.getDegree(); i++) {
vid = select.getValueId(i);
for(CollIndex j = 0; j < sysColList.entries(); j++){
if( vid == sysColList[j]->getValueId()){
orderBy.addColumn(bindWA, sysColList[j]->getColRefNameObj()
, sysColList[j]->getValueId()
, SYSTEM_COLUMN);
}
}
for(CollIndex k = 0; k < usrColList.entries(); k++){
if(vid == usrColList[k]->getValueId()){
orderBy.addColumn(bindWA, usrColList[k]->getColRefNameObj()
, usrColList[k]->getValueId()
, SYSTEM_COLUMN);
}
}
}
//10-031125-1549 -end
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_4) == DF_ON));
// In specialMode, we want to support order by on columns
// which are not explicitely specified in the select list.
// Ex: select a+1 from t group by a order by a;
// Find all the column references in the orderByTree which are
// also in the group by list but are not explicitely specified
// in the select list.
// This code path is for cases when both GROUP BY and ORDER BY are
// specified.
// If order by is specified without the group by, then that case
// is already covered in the 'else' portion.
if ((specialMode) &&
(child(0)->getOperatorType() == REL_GROUPBY) &&
(allOrderByRefsInGby_)) // already validated that all order by cols
// are also in group by clause
{
ItemExprList orderByList(orderByTree, bindWA->wHeap());
GroupByAgg * grby=(GroupByAgg *)(child(0)->castToRelExpr());
for (CollIndex ii = 0; ii < orderByList.entries(); ii++)
{
ItemExpr * colRef = orderByList[ii];
if (colRef->getOperatorType() == ITM_INVERSE)
colRef = colRef->child(0)->castToItemExpr();
if (colRef && colRef->getOperatorType() == ITM_REFERENCE)
{
ColReference * obyColRef = (ColReference*)colRef;
for (CollIndex k = 0; k < usrColList.entries(); k++)
{
if (obyColRef->getColRefNameObj().getColName() ==
usrColList[k]->getColRefNameObj().getColName())
{
orderBy.delColumn(bindWA,
usrColList[k]->getColRefNameObj(),
SYSTEM_COLUMN);
orderBy.addColumn(bindWA,
usrColList[k]->getColRefNameObj(),
usrColList[k]->getValueId(),
SYSTEM_COLUMN);
break;
} // if
} // for
} // if
} // for
}
for (i = 0; i < select.getDegree(); i++)
orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN);
}
else {
// add the potential ORDER BY columns... omitting the ones that will
// in the select list anyway.
orderBy.addColumns(bindWA, *source.getColumnList(), SYSTEM_COLUMN);
orderBy.addColumns(bindWA, *source.getSystemColumnList(), SYSTEM_COLUMN);
for (i = 0; i < select.getDegree(); i++)
orderBy.delColumn(bindWA, select.getColRefNameObj(i), SYSTEM_COLUMN);
}
for (i = 0; i < select.getDegree(); i++)
orderBy.addColumn(bindWA, select.getColRefNameObj(i),
select.getValueId(i), USER_COLUMN);
bindWA->getCurrentScope()->setRETDesc(&orderBy);
// fix for defect 10-010522-2978
// If we need to move this OrderBy to the RelRoot above this one...
// move it to the rowsetReqdOrder_ of that RelRoot, otherwise keep
// it at this level... in the current RelRoot's reqdOrder_
ValueIdList & pRRO = getParentForRowsetReqdOrder() ?
getParentForRowsetReqdOrder()->rowsetReqdOrder_ :
reqdOrder();
// Replace any selIndexies in the orderByTree with what it refers to
// before we expand it.
// This is done so that we can deal with subqueries with degree > 1
// and MVFs.
ItemExpr *sPtr = orderByTree, *ePtr = orderByTree;
Int32 childIdx = 0;
NABoolean onlyOneEntry(TRUE);
CollIndex selListCount = compExpr().entries();
while (sPtr != NULL)
{
if (sPtr->getOperatorType() == ITM_ITEM_LIST)
{
ePtr = sPtr;
sPtr = ePtr->child(0);
childIdx = 0;
onlyOneEntry = FALSE;
}
if (sPtr->getOperatorType() == ITM_SEL_INDEX)
{
SelIndex * si = (SelIndex*)(sPtr);
CollIndex selIndex = si->getSelIndex();
if(selIndex == 0 || selIndex > selListCount)
{
*CmpCommon::diags() << DgSqlCode(-4007)
<< DgInt0((Lng32)si->getSelIndex())
<< DgInt1(selListCount);
bindWA->setErrStatus();
return NULL;
}
ValueId orderById = compExpr()[si->getSelIndex()-1];
if (ePtr->getOperatorType() == ITM_ITEM_LIST)
ePtr->child(childIdx) = orderById.getItemExpr();
else
ePtr = orderById.getItemExpr();
orderById.getItemExpr()->setInOrderByOrdinal(TRUE);
}
if ((ePtr->getArity() == 2) && ePtr->child(1) != NULL &&
ePtr->child(1)->getOperatorType() != ITM_ITEM_LIST &&
childIdx != 1)
childIdx = 1;
else
childIdx = 0;
sPtr = (childIdx == 1) ? ePtr->child(1) : NULL;
}
if (onlyOneEntry)
orderByTree = ePtr;
// If we had any ordinal expressions expand them in case there
// are any UDFs or subquery of degree > 1.
// Also expand any directly referenced UDFs and subqueries of degree > 1.
ItemExprList origOrderByList(orderByTree, bindWA->wHeap());
origOrderByList.convertToItemExpr()->
convertToValueIdList(pRRO, bindWA, ITM_ITEM_LIST);
// end fix for defect 10-010522-2978
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
bindWA->getCurrentScope()->context()->inOrderBy() = FALSE;
}
// validate that select list doesn't contain any expressions that cannot be
// grouped or ordered.
for (Lng32 selIndex = 0; selIndex < compExpr().entries(); selIndex++)
{
ItemExpr * ie = compExpr()[selIndex].getItemExpr();
if ((ie->inGroupByOrdinal()) || (ie->inOrderByOrdinal()))
{
if (NOT ie->canBeUsedInGBorOB(TRUE))
{
return NULL;
}
}
}
if (hasPartitionBy())
{
ItemExpr *partByTree = removePartitionByTree();
partByTree->convertToValueIdSet(partArrangement_, bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
}
// fix for defect 10-010522-2978
// If we're the upper level RelRoot, we must check to see if we have
// any entries that need to be added to reqdOrder() and add them if
// there are any...
if ( rowsetReqdOrder_.entries() ) {
// We never expect for reqdOrder to contain any entries. But
// if it ever does, we want to be able to take a look at this
// code again to decide whether we should be appending to the
// reqdOrder list. Currently the code is written to append to
// the end of the reqdOrder list, which is likely to be the correct
// behavior even if there are entries in reqdOrder; we just think
// that someone should have the chance to rethink this in the event
// there are entries in reqdOrder and so we're making it fail here
// to allow/force someone to make the decision.
CMPASSERT(reqdOrder().entries() == 0);
// note: NAList<ValueIdList>::insert(const NAList<ValueIdList> &)
// actually does an append to the END of the list (not an
// insert at the head or after the current position).
reqdOrder().insert( rowsetReqdOrder_ );
}
// end fix for defect 10-010522-2978
// Bind the update column specification of a cursor declaration.
// Don't remove the tree: leave it for possible error 4118 in NormRelExpr.
if (updateColTree_) {
updateColTree_->convertToValueIdList(updateCol(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) {
if (CmpCommon::diags()->contains(-4001))
*CmpCommon::diags() << DgSqlCode(-4117);
return NULL;
}
if (getGroupAttr()->isEmbeddedDelete()) { // QSTUFF
*CmpCommon::diags() << DgSqlCode(-4169);
bindWA->setErrStatus() ;
return NULL;
}
}
// check whether a CONTROL QUERY SHAPE statement is in effect.
// Do not do if this is a control query statement.
if (ActiveControlDB()->getRequiredShape()) {
OperatorTypeEnum op = child(0)->getOperatorType();
if (!child(0)->isAControlStatement() &&
op != REL_DESCRIBE &&
op != REL_EXPLAIN &&
op != REL_DDL &&
op != REL_LOCK &&
op != REL_UNLOCK &&
op != REL_SET_TIMEOUT &&
op != REL_STATISTICS &&
op != REL_TRANSACTION &&
op != REL_EXE_UTIL)
reqdShape_ = ActiveControlDB()->getRequiredShape()->getShape();
}
// If this is a parallel extract producer query:
// * the number of requested streams must be greater than one and
// not more than the number of configured CPUs
// * force a shape with an ESP exchange node immediately below
// the root
ComUInt32 numExtractStreams = getNumExtractStreams();
if (numExtractStreams_ > 0)
{
// Check the number of requested streams
NADefaults &defs = bindWA->getSchemaDB()->getDefaults();
NABoolean fakeEnv = FALSE;
ComUInt32 numConfiguredESPs = defs.getTotalNumOfESPsInCluster(fakeEnv);
if ((numExtractStreams == 1) || (numExtractStreams > numConfiguredESPs))
{
*CmpCommon::diags() << DgSqlCode(-4119)
<< DgInt0((Lng32) numConfiguredESPs);
bindWA->setErrStatus();
return NULL;
}
// Force the shape. There are three cases to consider:
// a. there is no required shape in the ControlDB
// b. there is a required shape and it is acceptable for this
// parallel extract.
// c. there is a required shape and it is not acceptable.
if (reqdShape_ == NULL)
{
// Case a.
// Manufacture an esp_exchange(cut,N) shape
reqdShape_ = new (bindWA->wHeap())
ExchangeForceWildCard(new (bindWA->wHeap()) CutOp(0),
ExchangeForceWildCard::FORCED_ESP_EXCHANGE,
ExchangeForceWildCard::ANY_LOGPART,
(Lng32) numExtractStreams_);
}
else
{
NABoolean reqdShapeIsOK = FALSE;
if (reqdShape_->getOperatorType() == REL_FORCE_EXCHANGE)
{
ExchangeForceWildCard *exch = (ExchangeForceWildCard *) reqdShape_;
ExchangeForceWildCard::forcedExchEnum whichType = exch->getWhich();
Lng32 howMany = exch->getHowMany();
if (whichType == ExchangeForceWildCard::FORCED_ESP_EXCHANGE &&
howMany == (Lng32) numExtractStreams_)
{
reqdShapeIsOK = TRUE;
}
}
if (reqdShapeIsOK)
{
// Case b.
// Do nothing
}
else
{
// Case c.
// Add an esp_exchange to the top of the required shape
RelExpr *child = reqdShape_;
reqdShape_ = new (bindWA->wHeap())
ExchangeForceWildCard(child,
ExchangeForceWildCard::FORCED_ESP_EXCHANGE,
ExchangeForceWildCard::ANY_LOGPART,
(Lng32) numExtractStreams_);
}
} // if (reqdShape_ == NULL) else ...
} // if (numExtractStreams_ > 0)
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
// If we have dynamic rowsets, we want to replace
// dynamic parameters with available inputs.
if (isTrueRoot() && bindWA->hasDynamicRowsetsInQuery()) {
ValueIdSet inputs = getGroupAttr()->getCharacteristicInputs();
UInt32 j = 0;
// this for loop is over the list of available inputs. We are replacing array
// parameters with hostvars introduced during HostArraysWA::processArrayHostVar
// The hostvars introduced in that method are contained in the inputs() list.
for (ValueId id = inputs.init(); inputs.next(id); inputs.advance(id)) {
if (id.getItemExpr()->getOperatorType() == ITM_DYN_PARAM) {
continue;
}
// We are assuming here that the hostvars introduced are in the same order as
// the parameter arrays in inputVars(), i.e. (hv_A, hv_B) corresponds to
// (?,?,?(as A), ?(as B))
while (j < inputVars().entries()) {
ItemExpr *ie = inputVars()[j].getItemExpr() ;
OperatorTypeEnum ieType = ie->getOperatorType() ;
if (( ieType != ITM_DYN_PARAM) ||
(((DynamicParam *) ie)->getRowsetSize() == 0))
{
// if an ie is not a dynamicParam or it is a scalar dynamic Param do not remove
// it from inputVars_. From embedded SQL it is possible to have scalar and array
// dynamic params in the same statement. This is not possible from ODBC.
j++;
}
else
break ;
}
if (j < inputVars().entries()) {
inputVars().removeAt(j);
inputVars().insertAt(j, id);
j++;
}
}
}
// RelRoot::codeGen() and Statement::execute() use TOPMOST root's accessOpts.
//
if (bindWA->getCurrentScope()->context()->stmtLevelAccessOptions())
if (!accessOptions().userSpecified()) // seems redundant
accessOptions() = *bindWA->getCurrentScope()->context()->stmtLevelAccessOptions();
// Update operations currently require SERIALIZABLE (== MP REPEATABLE_)
// locking level -- the QSTUFF-enabled DP2 now does this, supporting a true
// READ_COMMITTED that is STABLE rather than merely CLEAN.
if (!containsGenericUpdate(this)) {
// Genesis 10-990114-6293:
// This flag tells RelRoot::codeGen to set a flagbit in the root-tdb which
// cli/Statement::execute + compareTransModes() will look at --
// if set, then this "read-write" stmt will be allowed to execute
// in a run-time transmode of read-only W/O HAVING TO BE RECOMPILED.
readOnlyTransIsOK() = TRUE;
}
if (isTrueRoot()) {
if (updateCurrentOf()) {
// add child genericupdate's primary key hostvars to pkeyList.
// The getLeftmostScanNode() method will return the leftmost Scan node
// as the original scan node may have moved due to the IM tree.
pkeyList().insert(child(0)->castToRelExpr()->getLeftmostScanNode()->pkeyHvarList());
}
for(Int32 st=0; st < (Int32)bindWA->getStoiList().entries(); st++)
{
if(bindWA->getStoiList()[st]->getStoi()->isView())
viewStoiList_.insert(bindWA->getStoiList()[st]);
}
if(bindWA->inDDL())
ddlStoiList_.insert(bindWA->getStoiList());
// populate the list of all the routines open information of this query
stoiUdrList_.insert(bindWA->getUdrStoiList());
// populate the list of all the UDF information of this query
udfList_.insert(bindWA->getUDFList());
// check privileges
if (!checkPrivileges(bindWA))
{
bindWA->setErrStatus();
return NULL;
}
// store the trigger's list in the root
if (bindWA->getTriggersList())
{
triggersList_ =
new (bindWA->wHeap()) LIST(ComTimestamp)
(bindWA->wHeap(), bindWA->getTriggersList()->entries());
triggersList_->insert(*(bindWA->getTriggersList()));
// Don't allow OLT optimization when triggers are involved.
oltOptInfo().setOltOpt(FALSE);
}
// store the uninitialized mv list if there are any
// entries
if( bindWA->getUninitializedMvList() )
{
uninitializedMvList_ = new (bindWA->wHeap()) UninitializedMvNameList
(bindWA->wHeap(), bindWA->getUninitializedMvList()->entries());
uninitializedMvList_->insert( *(bindWA->getUninitializedMvList()) );
}
DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); )
} // isTrueRoot
// Don't allow OLT optimization when ON STATEMENT MV refresh is involved.
if (bindWA->isBindingOnStatementMv())
oltOptInfo().setOltOpt(FALSE);
// disable esp parallelism for merge statements.
// See class RelRoot for details about this.
if ((isTrueRoot()) &&
(bindWA->isMergeStatement()))
{
setDisableESPParallelism(TRUE);
}
// Remove the current scope.
//
if (!isDontOpenNewScope()) // -- Triggers
bindWA->removeCurrentScope();
// In case we have a query of the form
// SET <host var list> = <select statement>
// we must update the value ids of the host variables in that list.
// See Assignment Statement Internal Spec (a project of Compound Statements).
if (assignmentStTree() &&
bindWA->getAssignmentStArea() &&
bindWA->getAssignmentStArea()->getAssignmentStHostVars() &&
!bindWA->getAssignmentStArea()->getAssignmentStHostVars()->
updateValueIds(compExpr(), assignmentStTree())) {
bindWA->setErrStatus();
return NULL;
}
if (getPredExprTree())
{
CMPASSERT(isTrueRoot());
ItemExpr * ie = removePredExprTree();
ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
addPredExprTree(ie);
}
if (getFirstNRowsParam())
{
firstNRowsParam_ = firstNRowsParam_->bindNode(bindWA);
if (bindWA->errStatus())
return this;
const SQLInt si(FALSE, FALSE);
ValueId vid = firstNRowsParam_->castToItemExpr()->getValueId();
vid.coerceType(si, NA_NUMERIC_TYPE);
if (vid.getType().getTypeQualifier() != NA_NUMERIC_TYPE)
{
// 4045 must be numeric.
*CmpCommon::diags() << DgSqlCode(-4045) << DgString0(getTextUpper());
bindWA->setErrStatus();
return this;
}
}
if ((NOT hasOrderBy()) &&
((getFirstNRows() != -1) ||
(getFirstNRowsParam())))
{
// create a firstN node to retrieve firstN rows.
FirstN * firstn = new(bindWA->wHeap())
FirstN(child(0), getFirstNRows(), getFirstNRowsParam());
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
// reset firstN indication in the root node.
setFirstNRows(-1);
setFirstNRowsParam(NULL);
}
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE)
{
if (accessOptions().accessType() != ACCESS_TYPE_NOT_SPECIFIED_)
{
if (accessOptions().accessType() == REPEATABLE_)
{
*CmpCommon::diags() << DgSqlCode(-4381);
bindWA->setErrStatus();
return NULL;
}
}
else
{
TransMode::IsolationLevel il=CmpCommon::transMode()->getIsolationLevel();
if (CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ )
{
*CmpCommon::diags() << DgSqlCode(-4381);
bindWA->setErrStatus();
return NULL;
}
}
}
// The above code is in Scan::bindNode also.
// It would be nice to refactor this common code; someday.
return boundExpr;
} // RelRoot::bindNode()
// Present the select list as a tree of Item Expressions
ItemExpr *RelRoot::selectList()
{
return compExpr().rebuildExprTree(ITM_ITEM_LIST);
} // RelRoot::selectList()
// Returns current place that assignmentStTree_ points to and
// sets that pointer to NULL
// LCOV_EXCL_START - cnu
ItemExpr * RelRoot::removeAssignmentStTree()
{
ItemExpr* tempTree = assignmentStTree_;
assignmentStTree_ = NULL;
return tempTree;
}
// LCOV_EXCL_STOP
bool OptSqlTableOpenInfo::checkColPriv(const PrivType privType,
const PrivMgrUserPrivs *pPrivInfo)
{
CMPASSERT (pPrivInfo);
NATable* table = getTable();
NAString columns = "";
if (!isColumnPrivType(privType))
{
*CmpCommon::diags() << DgSqlCode(-4481)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(table->getTableName().getQualifiedNameAsAnsiString())
<< DgString2(columns);
return false;
}
bool hasPriv = true;
// initialize to something, gets set appropriately below
LIST (Lng32) * colList = NULL ;
switch (privType)
{
case INSERT_PRIV:
{
colList = (LIST (Lng32) *)&(getInsertColList());
break;
}
case UPDATE_PRIV:
{
colList = (LIST (Lng32) *)&(getUpdateColList());
break;
}
case SELECT_PRIV:
{
colList = (LIST (Lng32) *)&(getSelectColList());
break;
}
default:
CMPASSERT(FALSE); // delete has no column privileges.
}
bool collectColumnNames = false;
if (pPrivInfo->hasAnyColPriv(privType))
{
collectColumnNames = true;
columns += "(columns:" ;
}
bool firstColumn = true;
for(size_t i = 0; i < colList->entries(); i++)
{
size_t columnNumber = (*colList)[i];
if (!(pPrivInfo->hasColPriv(privType,columnNumber)))
{
hasPriv = false;
if (firstColumn && collectColumnNames)
{
columns += " ";
firstColumn = false;
}
else
if (collectColumnNames)
columns += ", ";
if (collectColumnNames)
columns += table->getNAColumnArray()[columnNumber]->getColName();
}
}
if (collectColumnNames)
columns += ")" ;
// (colList->entries() == 0) ==> we have a select count(*) type query or a
// select 1 from T type query. In other words the table needs to be accessed
// but no column has been explicitly referenced.
// For such queries if the user has privilege on any one column that is
// sufficient. collectColumnNames indicates whether the user has privilege
// on at least one column. The following if statement applies only to selects
// For update and insert we do not expect colList to be empty.
if ((colList->entries() == 0)&& !collectColumnNames)
{
hasPriv = false;
columns = "";
}
if (!hasPriv)
*CmpCommon::diags() << DgSqlCode(-4481)
<< DgString0(PrivMgrUserPrivs::convertPrivTypeToLiteral(privType).c_str())
<< DgString1(table->getTableName().getQualifiedNameAsAnsiString())
<< DgString2(columns);
return hasPriv;
}
NABoolean RelRoot::checkFirstNRowsNotAllowed(BindWA *bindWA)
{
// do not call this method on a true root.
CMPASSERT(NOT isTrueRoot());
//*****************************************************************
// FirstNRows >= 0 (for FirstN)
// == -2 For Last 0
// == -3 For Last 1
// These values are set in parser; see the code SqlParser.y under
// Non-Terminal querySpecification when fisrtN is specified
//******************************************************************
if ( (getFirstNRows() >= 0 ||
getFirstNRows() == -2 ||
getFirstNRows() == -3) && // this root has firstn
(!((getInliningInfo().isEnableFirstNRows()) ||
(getHostArraysArea() && getHostArraysArea()->getHasSelectIntoRowsets()) || //firstn is allowed with a rowset subroot
(assignmentStTree())))) // first n is allowed in a CS. Presence of assignmentStTree
// on a non true root implies presence of select into statement
// within a cs
{
// 4102 The [FIRST/ANY n] syntax can only be used in an outermost SELECT statement.
if (CmpCommon::getDefault(ALLOW_FIRSTN_IN_SUBQUERIES) == DF_OFF)
return TRUE;
}
return FALSE;
}
// ----------------------------------------------------------------------------
// Method: checkPrivileges
//
// This method:
// - Verifies that the user executing the query has the necessary privileges
// - Adds security keys to RelRoot class that need to be checked when priv
// changes (revokes) are performed. Security keys are part of the Query
// Invalidation feature.
// - Also, removes any previously cached entries if the user has no priv
//
// Input: pointer to the binder work area
// Output: result of the check
// TRUE - user has priv
// FALSE - user does not have priv or unexpected error occurred
//
// The ComDiags area is populated with error details
// The BindWA flag setFailedForPrivileges is set to TRUE if priv check fails
// ----------------------------------------------------------------------------
NABoolean RelRoot::checkPrivileges(BindWA* bindWA)
{
// If internal caller and not part of explain, then return
if (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
return TRUE;
// If qiPath (used for testing) is not 0, skip root user check
NAString qiPath = "";
CmpCommon::getDefault(QI_PATH, qiPath, FALSE);
if (qiPath.length() == 0 && ComUser::isRootUserID())
return TRUE;
// See if there is anything to check
// StoiList contains any tables used in the query
// UdrStoiList contains any routines used in the query
// CoProcAggrList contains any queries using the aggregate co-processor
// SeqValList contains any sequences
if (bindWA->getStoiList().entries() == 0 &&
bindWA->getUdrStoiList().entries() == 0 &&
bindWA->getCoProcAggrList().entries() == 0 &&
bindWA->getSeqValList().entries() == 0)
return TRUE;
// If authorization is not enabled, then return TRUE
if (!CmpCommon::context()->isAuthorizationEnabled())
return TRUE;
ComBoolean QI_enabled = (CmpCommon::getDefault(CAT_ENABLE_QUERY_INVALIDATION) == DF_ON);
NABoolean RemoveNATableEntryFromCache = FALSE ;
// Have the ComSecurityKey constructor compute the hash value for the the User's ID.
// Note: The following code doesn't care about the object's hash value or the resulting
// ComSecurityKey's ActionType....we just need the hash value for the User's ID.
int64_t objectUID = 12345;
Int32 thisUserID = ComUser::getCurrentUser();
ComSecurityKey userKey( thisUserID , objectUID
, SELECT_PRIV
, ComSecurityKey::OBJECT_IS_OBJECT
);
uint32_t userHashValue = userKey.getSubjectHashValue();
// Set up a PrivMgrCommands class in case we need to get privilege information
NAString privMDLoc;
CONCAT_CATSCH(privMDLoc,CmpSeabaseDDL::getSystemCatalogStatic(),SEABASE_PRIVMGR_SCHEMA);
PrivMgrCommands privInterface(privMDLoc.data(), CmpCommon::diags(), PrivMgr::PRIV_INITIALIZED);
PrivStatus retcode = STATUS_GOOD;
// ==> Check privileges for tables used in the query.
SqlTableOpenInfo * stoi = NULL ;
OptSqlTableOpenInfo * optStoi = NULL;
for(Int32 i=0; i<(Int32)bindWA->getStoiList().entries(); i++)
{
RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop
optStoi = (bindWA->getStoiList())[i];
stoi = optStoi->getStoi();
NATable* tab = optStoi->getTable();
// System metadata tables do not, by default, have privileges stored in the
// NATable structure. Go ahead and retrieve them now.
PrivMgrUserPrivs *pPrivInfo = tab->getPrivInfo();
PrivMgrUserPrivs privInfo;
if (!pPrivInfo)
{
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges( tab->objectUid().get_value(),
tab->getObjectType(), thisUserID,
privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
tab->setRemoveFromCacheBNC(TRUE);
bindWA->setFailedForPrivileges(TRUE);
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
pPrivInfo = &privInfo;
}
// Check each primary DML privilege to see if the query requires it. If
// so, verify that the user has the privilege
bool insertQIKeys = (QI_enabled && tab->getSecKeySet().entries() > 0);
for (int_32 i = FIRST_DML_PRIV; i <= LAST_PRIMARY_DML_PRIV; i++)
{
if (stoi->getPrivAccess((PrivType)i))
{
if (!pPrivInfo->hasPriv((PrivType)i) && !optStoi->checkColPriv((PrivType)i, pPrivInfo))
RemoveNATableEntryFromCache = TRUE;
else
if (insertQIKeys)
findKeyAndInsertInOutputList(tab->getSecKeySet(),userHashValue,(PrivType)(i));
}
}
// wait until all the primary DML privileges have been checked before
// setting failure information
if ( RemoveNATableEntryFromCache )
{
bindWA->setFailedForPrivileges( TRUE );
tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry
}
} // for loop over tables in stoi list
// ==> Check privileges for functions and procedures used in the query.
NABoolean RemoveNARoutineEntryFromCache = FALSE ;
if (bindWA->getUdrStoiList().entries())
{
for(Int32 i=0; i<(Int32)bindWA->getUdrStoiList().entries(); i++)
{
// Privilege info for the user/routine combination is stored in the
// NARoutine object.
OptUdrOpenInfo *udrStoi = (bindWA->getUdrStoiList())[i];
NARoutine* rtn = udrStoi->getNARoutine();
PrivMgrUserPrivs *pPrivInfo = rtn->getPrivInfo();
NABoolean insertQIKeys = FALSE;
if (QI_enabled && (rtn->getSecKeySet().entries() > 0))
insertQIKeys = TRUE;
if (pPrivInfo == NULL)
{
RemoveNARoutineEntryFromCache = TRUE ;
*CmpCommon::diags() << DgSqlCode( -1034 );
}
// Verify that the user has execute priv
else
{
if (pPrivInfo->hasPriv(EXECUTE_PRIV))
{
// do this only if QI is enabled and object has security keys defined
if ( insertQIKeys )
findKeyAndInsertInOutputList(rtn->getSecKeySet(), userHashValue, EXECUTE_PRIV);
}
// plan requires privilege but user has none, report an error
else
{
RemoveNARoutineEntryFromCache = TRUE ;
*CmpCommon::diags()
<< DgSqlCode( -4482 )
<< DgString0( "EXECUTE" )
<< DgString1( udrStoi->getUdrName() );
}
}
if ( RemoveNARoutineEntryFromCache )
{
bindWA->setFailedForPrivileges(TRUE);
// If routine exists in cache, add it to the list to remove
NARoutineDB *pRoutineDBCache = bindWA->getSchemaDB()->getNARoutineDB();
NARoutineDBKey key(rtn->getSqlName(), bindWA->wHeap());
NARoutine *cachedNARoutine = pRoutineDBCache->get(bindWA, &key);
if (cachedNARoutine != NULL)
pRoutineDBCache->moveRoutineToDeleteList(cachedNARoutine, &key);
}
} // for loop over UDRs
} // end if any UDRs.
// ==> Check privs on any CoprocAggrs used in the query.
for (Int32 i=0; i<(Int32)bindWA->getCoProcAggrList().entries(); i++)
{
RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop
ExeUtilHbaseCoProcAggr *coProcAggr = (bindWA->getCoProcAggrList())[i];
NATable* tab = bindWA->getSchemaDB()->getNATableDB()->
get(coProcAggr->getCorrName(), bindWA, NULL);
Int32 numSecKeys = 0;
// Privilege info for the user/table combination is stored in the NATable
// object.
PrivMgrUserPrivs* pPrivInfo = tab->getPrivInfo();
PrivMgrUserPrivs privInfo;
// System metadata tables do not, by default, have privileges stored in the
// NATable structure. Go ahead and retrieve them now.
if (!pPrivInfo)
{
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges( tab->objectUid().get_value(),
tab->getObjectType(), thisUserID,
privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
bindWA->setFailedForPrivileges( TRUE );
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
pPrivInfo = &privInfo;
}
// Verify that the user has select priv
// Select priv is needed for EXPLAIN requests, so no special check is done
NABoolean insertQIKeys = FALSE;
if (QI_enabled && (tab->getSecKeySet().entries()) > 0)
insertQIKeys = TRUE;
if (pPrivInfo->hasPriv(SELECT_PRIV))
{
// do this only if QI is enabled and object has security keys defined
if ( insertQIKeys )
findKeyAndInsertInOutputList(tab->getSecKeySet(), userHashValue, SELECT_PRIV );
}
// plan requires privilege but user has none, report an error
else
{
bindWA->setFailedForPrivileges( TRUE );
tab->setRemoveFromCacheBNC(TRUE); // To be removed by CmpMain before Compilation retry
*CmpCommon::diags()
<< DgSqlCode( -4481 )
<< DgString0( "SELECT" )
<< DgString1( tab->getTableName().getQualifiedNameAsAnsiString() );
}
} // for loop over coprocs
// ==> Check privs on any sequence generators used in the query.
for (Int32 i=0; i<(Int32)bindWA->getSeqValList().entries(); i++)
{
RemoveNATableEntryFromCache = FALSE ; // Initialize each time through loop
SequenceValue *seqVal = (bindWA->getSeqValList())[i];
NATable* tab = const_cast<NATable*>(seqVal->getNATable());
CMPASSERT(tab);
// get privilege information from the NATable structure
PrivMgrUserPrivs *pPrivInfo = tab->getPrivInfo();
PrivMgrUserPrivs privInfo;
if (!pPrivInfo)
{
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return FALSE;
}
retcode = privInterface.getPrivileges(tab->objectUid().get_value(),
COM_SEQUENCE_GENERATOR_OBJECT,
thisUserID, privInfo);
cmpSBD.switchBackCompiler();
if (retcode != STATUS_GOOD)
{
bindWA->setFailedForPrivileges(TRUE);
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags() << DgSqlCode( -1034 );
return FALSE;
}
pPrivInfo = &privInfo;
}
// Verify that the user has usage priv
NABoolean insertQIKeys = FALSE;
if (QI_enabled && (tab->getSecKeySet().entries()) > 0)
insertQIKeys = TRUE;
if (pPrivInfo->hasPriv(USAGE_PRIV))
{
// do this only if QI is enabled and object has security keys defined
if ( insertQIKeys )
findKeyAndInsertInOutputList(tab->getSecKeySet(), userHashValue, USAGE_PRIV );
}
// plan requires privilege but user has none, report an error
else
{
bindWA->setFailedForPrivileges( TRUE );
RemoveNATableEntryFromCache = TRUE;
*CmpCommon::diags()
<< DgSqlCode( -4491 )
<< DgString0( "USAGE" )
<< DgString1( tab->getTableName().getQualifiedNameAsAnsiString());
}
} // for loop over sequences
return !bindWA->failedForPrivileges() ;
}
void RelRoot::findKeyAndInsertInOutputList( ComSecurityKeySet KeysForTab
, const uint32_t userHashValue
, const PrivType which
)
{
ComSecurityKey dummyKey;
ComQIActionType objectActionType =
dummyKey.convertBitmapToQIActionType ( which, ComSecurityKey::OBJECT_IS_OBJECT );
ComSecurityKey * UserSchemaKey = NULL;
ComSecurityKey * UserObjectKey = NULL;
ComSecurityKey * RoleSchemaKey = NULL;
ComSecurityKey * RoleObjectKey = NULL;
ComSecurityKey * BestKey = NULL;
ComSecurityKey * thisKey = &(KeysForTab[0]);
uint32_t hashValueOfPublic = 0;
// NOTE: hashValueOfPublic will be the same for all keys, so we generate it only once.
if ( KeysForTab.entries() > 0 )
hashValueOfPublic = thisKey->generateHash(PUBLIC_USER);
// Traverse List looking for ANY appropriate ComSecurityKey
for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ )
{
thisKey = &(KeysForTab[ii]);
if ( thisKey->getSecurityKeyType() == objectActionType )
{
if ( thisKey->getSubjectHashValue() == hashValueOfPublic ||
thisKey->getSubjectHashValue() == userHashValue )
{
if ( ! UserObjectKey ) UserObjectKey = thisKey;
}
else if ( ! RoleObjectKey ) RoleObjectKey = thisKey;
}
else {;} // Not right action type, just continue traversing.
}
if ( UserObjectKey ) BestKey = UserObjectKey ;
else if ( RoleObjectKey ) BestKey = RoleObjectKey ;
if ( BestKey == NULL)
return; // Sometimes there aren't any security keys
securityKeySet_.insert(*BestKey);
uint32_t SubjHashValue = BestKey->getSubjectHashValue();
hashValueOfPublic = BestKey->generateHash(PUBLIC_USER);
// Check whether this privilege was granted to PUBLIC. If so, nothing more to check.
if ( SubjHashValue == hashValueOfPublic )
return;
while ( SubjHashValue != userHashValue ) //While we see a ComSecurityKey for a Role
{
NABoolean found = FALSE;
for ( Int32 ii = 0; ii < (Int32)(KeysForTab.entries()); ii++ )
{
// If this ComSecurityKey is a GRANT type and the grantee (the object)
// is the Role specified by SubjHashValue, then break out of inner loop.
ComSecurityKey * thisKey = &(KeysForTab[ii]);
if ( ( thisKey->getObjectHashValue() == SubjHashValue ) &&
( (thisKey->getSecurityKeyType() == COM_QI_USER_GRANT_ROLE ) ) )
{
securityKeySet_.insert(*thisKey); // Insert this GRANT type ComSecurityKey into the Plan
found = TRUE;
SubjHashValue = thisKey->getSubjectHashValue();
break; // We found the user or Role which granted the user the privilege
}
}
// found should never be FALSE
CMPASSERT(found)
}
}
// -----------------------------------------------------------------------
// member functions for class GroupByAgg
// -----------------------------------------------------------------------
RelExpr *GroupByAgg::bindNode(BindWA *bindWA)
{
NABoolean specialMode =
((CmpCommon::getDefault(MODE_SPECIAL_1) == DF_ON) ||
(CmpCommon::getDefault(MODE_SPECIAL_2) == DF_ON));
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// add any aggregate functions found in the parent node(s)
//
BindScope *currScope = bindWA->getCurrentScope();
aggregateExpr_ += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
//
// Bind the child nodes.
//
currScope->context()->lookAboveToDecideSubquery() = TRUE;
bindChildren(bindWA);
currScope->context()->lookAboveToDecideSubquery() = FALSE;
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// QSTUFF
NAString fmtdList(bindWA->wHeap());
LIST(TableNameMap*) xtnmList(bindWA->wHeap());
bindWA->getTablesInScope(xtnmList, &fmtdList);
// can be removed when supporting aggregates on streams
if (getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4162) << DgString0(fmtdList);
bindWA->setErrStatus();
return this;
}
if ((getGroupAttr()->isEmbeddedUpdateOrDelete()) ||
(bindWA->isEmbeddedIUDStatement())) {
*CmpCommon::diags() << DgSqlCode(-4163) << DgString0(fmtdList)
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString1("UPDATE"):DgString1("DELETE"));
bindWA->setErrStatus();
return this;
}
// QSTUFF
// if unresolved aggregate functions have been found in the children of the
// Groupby node, that would mean that we are referencing aggregates before
// the groupby operation is performed
//
if (checkUnresolvedAggregates(bindWA))
return this;
//
// Detach the item expression tree for the grouping column list, bind it,
// convert it to a ValueIdSet, and attach it to the GroupByAgg node.
//
ItemExpr *groupExprTree = removeGroupExprTree();
if (groupExprTree) {
currScope->context()->inGroupByClause() = TRUE;
groupExprTree->convertToValueIdSet(groupExpr(), bindWA, ITM_ITEM_LIST);
currScope->context()->inGroupByClause() = FALSE;
if (bindWA->errStatus()) return this;
ValueIdList groupByList(groupExpr());
for (CollIndex i = 0; i < groupByList.entries(); i++)
{
ValueId vid = groupByList[i];
vid.getItemExpr()->setIsGroupByExpr(TRUE);
}
if (((CmpCommon::getDefault(GROUP_BY_USING_ORDINAL) != DF_OFF) ||
(specialMode)) &&
(groupExprTree != NULL) &&
(getParentRootSelectList() != NULL))
{
RETDesc * childRETDesc = child(0)->getRETDesc();
ItemExprList origSelectList(getParentRootSelectList(), bindWA->wHeap());
for (CollIndex i = 0; i < groupByList.entries(); i++)
{
ValueId vid = groupByList[i];
if((vid.getItemExpr()->getOperatorType() == ITM_SEL_INDEX)&&
(((SelIndex*)(vid.getItemExpr()))->renamedColNameInGrbyClause()))
{
ULng32 indx = ((SelIndex*)(vid.getItemExpr()))->getSelIndex() - 1;
if (origSelectList.entries() > indx &&
origSelectList[indx]->getOperatorType() == ITM_RENAME_COL)
{
const ColRefName &selectListColRefName =
*((RenameCol *)origSelectList[indx])->getNewColRefName();
ColumnNameMap *baseColExpr =
childRETDesc->findColumn(selectListColRefName);
if (baseColExpr)
{
groupExpr().remove(vid);
groupExpr().insert(baseColExpr->getValueId());
baseColExpr->getColumnDesc()->setGroupedFlag();
origSelectList[indx]->setInGroupByOrdinal(FALSE);
}
}
}
}
if (getSelPredTree())
{
ItemExpr * havingPred = (ItemExpr *) getSelPredTree();
// see if having expr refers to any renamed col in the select list.
// that is NOT a name exposed by child RETDesc.
// If it does, replace it with SelIndex.
// For now, do this for special1 mode and only if the having
// is a simple pred of the form: col <op> value.
// Later, we can extend this to all kind of having pred by
// traversing the having pred tree and replacing renamed cols.
NABoolean replaced = FALSE;
NABoolean notAllowedWithSelIndexInHaving = FALSE;
replaceRenamedColInHavingWithSelIndex(
bindWA, havingPred, origSelectList, replaced,
notAllowedWithSelIndexInHaving,child(0)->getRETDesc());
if (bindWA->errStatus())
return this;
if (replaced)
{
if (notAllowedWithSelIndexInHaving)
{
*CmpCommon::diags() << DgSqlCode(-4196) ;
bindWA->setErrStatus();
return this;
}
setSelIndexInHaving(TRUE);
}
}
setParentRootSelectList(NULL);
}
// Indicate that we are not in a scalar groupby. Any aggregate
// functions found in the select list or having clause cannot
// evaluate to NULL unless their argument is null.
currScope->context()->inScalarGroupBy() = FALSE;
}
//
// bind the having predicates and attach the resulting value id set
// to the node (as a selection predicate on the groupby node)
//
ItemExpr *havingPred = removeSelPredTree();
if (havingPred && NOT selIndexInHaving())
{
currScope->context()->inHavingClause() = TRUE;
havingPred->convertToValueIdSet(selectionPred(), bindWA, ITM_AND);
currScope->context()->inHavingClause() = FALSE;
if (bindWA->errStatus())
return this;
}
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
if ((havingPred) &&
(selIndexInHaving()))
{
addSelPredTree(havingPred);
}
//
// Get the aggregate expressions from the list that has accumulated
// in the current bind scope and clear the list in the bind scope --
// but first, if Tuple::bindNode()/checkTupleElementsAreAllScalar()
// created this node, add the subquery aggregate expr
// (Genesis 10-000221-6676).
//
if (aggregateExprTree_) { // only Binder, not Parser, should put anything here
// CMPASSERT(bindWA->getCurrentScope()->context()->inTupleList());
CMPASSERT(aggregateExprTree_->nodeIsBound() ||
aggregateExprTree_->child(0)->nodeIsBound());
aggregateExprTree_ = aggregateExprTree_->bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
aggregateExpr_ += aggregateExprTree_->getValueId();
aggregateExprTree_ = NULL;
}
aggregateExpr_ += currScope->getUnresolvedAggregates();
currScope->getUnresolvedAggregates().clear();
getRETDesc()->setGroupedFlag();
return boundExpr;
} // GroupByAgg::bindNode()
// -----------------------------------------------------------------------
// member functions for class Scan
// -----------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////
// A list of 'fabricated' hostvar representing the hostvars is generated
// that will contain the primary key values. These primary key
// values are retrieved at runtime from the cursor statement
// specified in the 'current of' clause. A predicate of the
// form 'where pkey1 = :pkey1 and pkey2 = :pkey2...' is attached
// to the selection pred of this node. The hostvar values are
// then passed in by the root node to its child and they reach
// this node at runtime where the 'where' predicate is evaluated.
////////////////////////////////////////////////////////////////////////
void Scan::bindUpdateCurrentOf(BindWA *bindWA, NABoolean updateQry)
{
ValueIdList keyList = getTableDesc()->getClusteringIndex()->getIndexKey();
ItemExpr * rootPtr = NULL;
char hvName[30];
CollIndex i = 0;
for (i = 0; i < keyList.entries(); i++)
{
ValueId vid = keyList[i];
// Fabricate a name for the i'th host variable,
// make a hostvar,add it to pkeyHvarList.
sprintf(hvName,"_upd_pkey_HostVar%d",i);
HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE);
hv->bindNode(bindWA);
pkeyHvarList().insert(hv->getValueId());
// Build a 'pkey = pkey_hvar' predicate.
ItemExpr * eqPred = new(bindWA->wHeap())
BiRelat(ITM_EQUAL, vid.getItemExpr(), hv);
if (!rootPtr)
rootPtr = eqPred;
else
rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, eqPred);
} // loop over all pkey columns
if (updateQry)
{
ItemExpr * updCheckPtr = NULL;
ValueIdList nonKeyColList;
getTableDesc()->getClusteringIndex()->getNonKeyColumnList(nonKeyColList);
for (i = 0; i < nonKeyColList.entries(); i++)
{
ValueId vid = nonKeyColList[i];
// Fabricate a name for the i'th host variable,
// make a hostvar,add it to pkeyHvarList.
sprintf(hvName,"_upd_col_HostVar%d",i);
HostVar *hv = new(bindWA->wHeap()) HostVar(hvName, &vid.getType(), TRUE);
hv->bindNode(bindWA);
pkeyHvarList().insert(hv->getValueId());
// Build a 'col = col_hvar' predicate.
ItemExpr * eqPred = new(bindWA->wHeap())
BiRelat(ITM_EQUAL, vid.getItemExpr(), hv, TRUE);
if (!updCheckPtr)
updCheckPtr = eqPred;
else
updCheckPtr =
new(bindWA->wHeap()) BiLogic(ITM_AND, updCheckPtr, eqPred);
} // loop over all pkey columns
if (updCheckPtr)
{
updCheckPtr = new (bindWA->wHeap())
Case(NULL,
new (bindWA->wHeap())
IfThenElse(updCheckPtr,
new (bindWA->wHeap()) BoolVal(ITM_RETURN_TRUE),
new (bindWA->wHeap())
BoolVal(ITM_RETURN_TRUE,
new (bindWA->wHeap())
RaiseError(-(Lng32)EXE_CURSOR_UPDATE_CONFLICT))));
rootPtr = new(bindWA->wHeap()) BiLogic(ITM_AND, rootPtr, updCheckPtr);
}
}
// rootPtr->bindNode(bindWA);
// add this new tree to the existing selection predicate
addSelPredTree(rootPtr);
bindSelf(bindWA); // just in case
} // Scan::bindUpdateCurrentOf()
// Every Scan and every GenericUpdate has its own stoi,
// plus copies of some of these stoi's are copied to the BindWA
//
// The scan/gu stoi's will become ex_partn_access stoi's
//
// The stoiList copies in BindWA will have their security
// checked in the binder, in RelRoot::checkPrivileges
//
// Stoi's must exist for every table/view/MV/index.
// Stoi's that are not copied to the BindWA are those for which Ansi mandates
// that no security checking be done (e.g., indexes).
//
OptSqlTableOpenInfo *setupStoi(OptSqlTableOpenInfo *&optStoi_,
BindWA *bindWA,
const RelExpr *re,
const NATable *naTable,
const CorrName &corrName,
NABoolean noSecurityCheck)
{
// Get the PHYSICAL (non-Ansi/non-delimited) filename of the table or view.
CMPASSERT(!naTable->getViewText() || naTable->getViewFileName());
NAString fileName( naTable->getViewText() ?
(NAString)naTable->getViewFileName() :
naTable->getClusteringIndex()->
getFileSetName().getQualifiedNameAsString(),
bindWA->wHeap());
SqlTableOpenInfo * stoi_ = new (bindWA->wHeap()) SqlTableOpenInfo;
optStoi_ = new(bindWA->wHeap()) OptSqlTableOpenInfo(stoi_,
corrName,
bindWA->wHeap());
stoi_->setFileName(convertNAString(fileName, bindWA->wHeap()));
if (naTable->getIsSynonymTranslationDone())
{
stoi_->setAnsiName(convertNAString(
naTable->getSynonymReferenceName(),
bindWA->wHeap()));
}
else
{
stoi_->setAnsiName(convertNAString(
naTable->getTableName().getQualifiedNameAsAnsiString(),
bindWA->wHeap()));
}
if(naTable->isUMDTable() || naTable->isSMDTable()
|| naTable->isMVUMDTable() || naTable->isTrigTempTable())
{
stoi_->setIsMXMetadataTable(1);
}
if (NOT corrName.getCorrNameAsString().isNull())
{
NABoolean corrNameSpecified = TRUE;
if (corrNameSpecified)
{
stoi_->setCorrName(convertNAString(
corrName.getCorrNameAsString(),
bindWA->wHeap()));
}
}
// Materialized-View is considered as a regular table
stoi_->setSpecialTable(naTable->getSpecialType() != ExtendedQualName::NORMAL_TABLE &&
naTable->getSpecialType() != ExtendedQualName::MV_TABLE);
stoi_->setIsView(naTable->getViewText() ? TRUE : FALSE);
if (naTable->isHbaseTable())
stoi_->setIsHbase(TRUE);
stoi_->setLocationSpecified(corrName.isLocationNameSpecified() ||
corrName.isPartitionNameSpecified() );
stoi_->setUtilityOpen(corrName.isUtilityOpenIdSpecified());
stoi_->setUtilityOpenId(corrName.getUtilityOpenId());
stoi_->setIsNSAOperation(corrName.isNSAOperation());
if (! naTable->getViewText())
stoi_->setIsAudited(naTable->getClusteringIndex()->isAudited());
switch (re->getOperatorType())
{
case REL_UNARY_INSERT:
case REL_LEAF_INSERT:
stoi_->setInsertAccess();
break;
case REL_UNARY_UPDATE:
{
stoi_->setUpdateAccess();
if (((GenericUpdate*)re)->isMerge())
stoi_->setInsertAccess();
}
break;
case REL_UNARY_DELETE:
case REL_LEAF_DELETE:
{
stoi_->setDeleteAccess();
if (((GenericUpdate*)re)->isMerge())
stoi_->setInsertAccess();
if (((Delete*)re)->isFastDelete())
stoi_->setSelectAccess();
}
break;
case REL_SCAN:
case REL_LOCK:
case REL_UNLOCK:
case REL_HBASE_COPROC_AGGR:
stoi_->setSelectAccess();
break;
case REL_EXE_UTIL:
stoi_->setSelectAccess();
stoi_->setInsertAccess();
stoi_->setUpdateAccess();
stoi_->setDeleteAccess();
break;
default:
CMPASSERT(FALSE);
}
NABoolean validateTS = TRUE;
if ((naTable->getClusteringIndex() &&
naTable->getClusteringIndex()->isSystemTable()) ||
(NOT validateTS))
stoi_->setValidateTimestamp(FALSE);
else
stoi_->setValidateTimestamp(TRUE);
// MV --
// For INTERNAL REFRESH statements, leave only the insert on the MV itself.
if (re->getInliningInfo().isAvoidSecurityCheck() ||
(bindWA->isBindingMvRefresh() &&
(!naTable->isAnMV() || !stoi_->getInsertAccess())))
{
return NULL;
}
// In a SCAN, only the topmost view is inserted into BindWA StoiList
// (thus there will be no security check on underlying views/basetables,
// as Ansi says there shouldn't).
if (re->getOperatorType() == REL_SCAN && bindWA->viewCount())
{
return NULL;
}
// Genesis 10-980306-4309:
// Ansi says not supposed to be any security check on referenced tables,
// nor of course on indexes, RIs and temp tables which are not an Ansi
// notion to begin with.
if ((naTable->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::IUD_LOG_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::INDEX_TABLE) ||
(naTable->getSpecialType() == ExtendedQualName::RESOURCE_FORK))
{
return NULL;
}
if (noSecurityCheck)
{
return NULL;
}
if (re->getOperator().match(REL_ANY_GEN_UPDATE)&&
(((GenericUpdate*)re)->getUpdateCKorUniqueIndexKey()))
{
return NULL;
}
OptSqlTableOpenInfo *stoiInList = NULL;
for (CollIndex i=0; i < bindWA->getStoiList().entries(); i++)
if (strcmp(bindWA->getStoiList()[i]->getStoi()->fileName(), fileName) == 0) {
stoiInList = bindWA->getStoiList()[i];
break;
}
if (!stoiInList) {
stoiInList =
new(bindWA->wHeap()) OptSqlTableOpenInfo(
new (bindWA->wHeap()) SqlTableOpenInfo(*stoi_),
corrName,
bindWA->wHeap());
stoiInList->setTable((NATable*)naTable);
bindWA->getStoiList().insert(stoiInList);
bindWA->hbaseColUsageInfo()->insert((QualifiedName*)&naTable->getTableName());
} else {
// This is conceptually equivalent to
// stoiInList->AccessFlags |= stoi_->AccessFlags :
if (stoi_->getInsertAccess()) stoiInList->getStoi()->setInsertAccess();
if (stoi_->getUpdateAccess()) stoiInList->getStoi()->setUpdateAccess();
if (stoi_->getDeleteAccess()) stoiInList->getStoi()->setDeleteAccess();
if (stoi_->getSelectAccess()) stoiInList->getStoi()->setSelectAccess();
}
return stoiInList;
} // setupStoi()
//----------------------------------------------------------------------------
RelExpr *Scan::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// -- Triggers
// Is this a Scan on a temporary table inside the action of a statement trigger?
if (getTableName().isATriggerTransitionName(bindWA))
return buildTriggerTransitionTableView(bindWA); // Located in Inlining.cpp
// -- MV
// Is this a Scan on a log inside the select statement of a Materialized View?
// If so - maybe we need to replace this Scan with some other RelExpr tree.
// Ignore when inDDL() because the log may not exist yet.
if (!bindWA->inDDL() &&
getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE)
{
const MvBindContext *pMvBindContext = bindWA->getClosestMvBindContext();
if (NULL != pMvBindContext)
{
RelExpr *replacementTree =
pMvBindContext->getReplacementFor(getTableName().getQualifiedNameObj());
if (replacementTree != NULL)
{
// We need to replace the Scan on the base table by some other tree.
// Make sure this tree has the same name as the Scan.
const CorrName& baseCorrName = getTableName();
replacementTree = new(bindWA->wHeap())
RenameTable(TRUE, replacementTree, baseCorrName);
// Move any selection predicates on the Scan to the tree.
replacementTree->addSelPredTree(removeSelPredTree());
// Bind the tree and return instead of the tree.
return replacementTree->bindNode(bindWA);
}
}
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Get the NATable for this object.
//
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return this;
// Set up stoi. bindWA->viewCount is altered during expanding the view.
setupStoi(stoi_, bindWA, this, naTable, getTableName(), noSecurityCheck());
// If the object is a view, expand the view.
//
if (naTable->getViewText()) {
// Allow view on exception_table or any other special_table_name objects
ComBoolean specialTableFlagOn = Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
if (specialTableFlagOn == FALSE)
{
Set_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
SQL_EXEC_SetParserFlagsForExSqlComp_Internal(ALLOW_SPECIALTABLETYPE);
}
RelExpr * boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr(),
TRUE/*catmanCollectUsages*/);
// QSTUFF
// First we checked whether its a view and if so it must be updatable
// when using it for stream access or an embedded update or delete
if (!naTable->isUpdatable() && getGroupAttr()->isEmbeddedUpdateOrDelete()){
*CmpCommon::diags() << DgSqlCode(-4206)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
// restore ALLOW_SPECIALTABLETYPE setting
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
if (!naTable->isUpdatable() && getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4151)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
// Second we make sure the the underlying base table is key sequenced
// in case of embedded d/u and streams
// -- for as long as we don't support entry sequenced tables
if (boundView->getLeftmostScanNode()) {
// this is not a "create view V(a) as values(3)" kind of a view
const NATable * baseTable =
boundView->getLeftmostScanNode()->getTableDesc()->getNATable();
if (getGroupAttr()->isStream()) {
if (!baseTable->getClusteringIndex()->isKeySequenced()) {
*CmpCommon::diags() << DgSqlCode(-4204)
<< DgTableName(
baseTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
if (!baseTable->getClusteringIndex()->isKeySequenced()){
*CmpCommon::diags() << DgSqlCode(-4205)
<< DgTableName(
baseTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return NULL;
}
}
}
// QSTUFF
// restore ALLOW_SPECIALTABLETYPE setting
if (specialTableFlagOn == FALSE)
Reset_SqlParser_Flags(ALLOW_SPECIALTABLETYPE);
return boundView;
}
// -- MV
// If this is the expanded tree pass during CREATE MV, expand the MV into
// its SELECT tree, just like a regular view.
// Do this only for incremental MVs, otherwise they may introduce unsupported
// operators such as Union.
if (naTable->isAnMV() &&
bindWA->isExpandMvTree() &&
naTable->getMVInfo(bindWA)->isIncremental())
{
CMPASSERT(bindWA->inDDL());
return bindExpandedMaterializedView(bindWA, naTable);
}
// Do not allow to select from an un initialized MV
if (naTable->isAnMV() && !bindWA->inDDL() && !bindWA->isBindingMvRefresh())
{
if (naTable->verifyMvIsInitializedAndAvailable(bindWA))
return NULL;
}
// Allocate a TableDesc and attach it to the Scan node.
// This call also allocates a RETDesc, attached to the BindScope,
// which we want to attach also to the Scan.
//
// disable override schema for synonym
NABoolean os = FALSE;
if ( ( bindWA->overrideSchemaEnabled() )
&& ( ! naTable->getSynonymReferenceName().isNull() ) )
{
os = bindWA->getToOverrideSchema();
bindWA->setToOverrideSchema(FALSE);
}
TableDesc * tableDesc = NULL;
if ((NOT isHbaseScan()) || (! getTableDesc()))
{
tableDesc = bindWA->createTableDesc(naTable, getTableName(),
FALSE, getHint());
}
else
tableDesc = getTableDesc();
// restore override schema setting
if ( ( bindWA->overrideSchemaEnabled() )
&& ( ! naTable->getSynonymReferenceName().isNull() ) )
bindWA->setToOverrideSchema(os);
// before attaching set the selectivity hint defined by the user for this
// table
if (tableDesc && getHint() &&
getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE)
{
double s;
s = getHint()->getSelectivity();
if (0.0 <= s && s <= 1.0) {
SelectivityHint *selHint = new (STMTHEAP) SelectivityHint();
selHint->setScanSelectivityFactor(s);
tableDesc->setSelectivityHint(selHint);
}
if (getHint()->getCardinality() >= 1.0) {
s = getHint()->getCardinality();
CostScalar scanCard(s);
if((scanCard.getValue() - floor(scanCard.getValue())) > 0.00001)
scanCard = ceil(scanCard.getValue());
CardinalityHint *cardHint = new (STMTHEAP) CardinalityHint();
cardHint->setScanCardinality(scanCard);
tableDesc->setCardinalityHint(cardHint);
}
}
setTableDesc(tableDesc);
if (bindWA->errStatus()) return this;
setRETDesc(bindWA->getCurrentScope()->getRETDesc());
if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) &&
(naTable && naTable->getClusteringIndex() && !naTable->getClusteringIndex()->isAudited()))
{
*CmpCommon::diags() << DgSqlCode(-4211)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// restricted partitions for HBase table
if (naTable->isHbaseTable() &&
(naTable->isPartitionNameSpecified() ||
naTable->isPartitionRangeSpecified()))
{
PartitioningFunction * partFunc = naTable->getClusteringIndex()->getPartitioningFunction();
// find the salt column and apply a predicate on the salt column.
// For Hash2, since the partittion key columns are columns used to build
// the _SALT_ column, we need to search all columns for the _SALT_ column.
const NAColumnArray &ccCols =
(partFunc && partFunc->castToHash2PartitioningFunction())?
naTable->getClusteringIndex()->getAllColumns()
:
naTable->getClusteringIndex()->getPartitioningKeyColumns();
NABoolean saltColFound = FALSE;
for (CollIndex i=0; i<ccCols.entries() && !saltColFound; i++)
{
if (ccCols[i]->isComputedColumn() &&
ccCols[i]->getColName() ==
ElemDDLSaltOptionsClause::getSaltSysColName())
{
saltColFound = TRUE;
// create a predicate "_SALT_" = <num> or
// "_SALT_" between <num> and <num>
Int32 beginPartNum = partFunc->getRestrictedBeginPartNumber() - 1;
Int32 endPartNum = partFunc->getRestrictedEndPartNumber() - 1;
// fill in defaults, indicated by -1 (-2 after subtraction above)
if (beginPartNum < 0)
beginPartNum = 0;
if (endPartNum < 0)
endPartNum = partFunc->getCountOfPartitions() - 1;
ItemExpr *partPred = NULL;
ColReference *saltColRef = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
ccCols[i]->getFullColRefName(), bindWA->wHeap()));
if (beginPartNum == endPartNum)
{
partPred = new(bindWA->wHeap()) BiRelat
(ITM_EQUAL,
saltColRef,
new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap()));
}
else
{
partPred = new(bindWA->wHeap()) Between
(saltColRef,
new(bindWA->wHeap()) ConstValue(beginPartNum,bindWA->wHeap()),
new(bindWA->wHeap()) ConstValue(endPartNum,bindWA->wHeap()));
}
ItemExpr *newSelPred = removeSelPredTree();
if (newSelPred)
newSelPred = new(bindWA->wHeap()) BiLogic(ITM_AND,
newSelPred,
partPred);
else
newSelPred = partPred;
// now add the partition predicates
addSelPredTree(newSelPred->bindNode(bindWA));
}
}
if (!saltColFound)
{
// not allowed to select individual partitions from HBase tables
// unless they are salted
char buf[20];
snprintf(buf, 20, "%d", partFunc->getRestrictedBeginPartNumber());
*CmpCommon::diags() << DgSqlCode(-1276)
<< DgString0(buf)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return this;
//
// Assign the set of columns that belong to the table to be scanned
// as the output values that can be produced by this scan.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->hbaseTSList());
// MV --
if (getInliningInfo().isMVLoggingInlined())
projectCurrentEpoch(bindWA);
// QSTUFF
// Second we make sure the the underlying base table is key sequenced in case
// of embedded d/u and streams
// -- for as long as we don't support entry sequenced tables
if (getGroupAttr()->isStream()){
if (!naTable->getClusteringIndex()->isKeySequenced() ||
naTable->hasVerticalPartitions()){
*CmpCommon::diags() << DgSqlCode(-4204)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
if (!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) {
// Stream access not allowed on a non-audited table
*CmpCommon::diags() << DgSqlCode(-4215)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
if (!naTable->getClusteringIndex()->isKeySequenced()
|| naTable->hasVerticalPartitions()){
*CmpCommon::diags() << DgSqlCode(-4205)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString())
<< (getGroupAttr()->isEmbeddedUpdate() ?
DgString0("UPDATE") : DgString0("DELETE"));
bindWA->setErrStatus();
return NULL;
}
}
// QSTUFF
// Fix "browse access mode incorrectly starts transaction" genesis case
// 10-021111-1080. Here's a glimpse at what may have been the original
// intent of the old code (taken from RelExpr.h comment for the now
// defunct RelExpr::propagateAccessOptions):
//
// At parse time, user can specify statement level access options.
// (See SQL/ARK Language spec). These options are attached to the
// RelRoot node and could be different for different Scans in the query.
// All Scan and Update nodes under a RelRoot have the same Access
// type and the Lock Mode.
//
// The problem is propagateAccessOptions did not visit all the Scans,
// eg, it did not propagate to subquery Scans, and it did not propagate
// to internal RelRoots. This "push" model seems harder to understand
// and to do correctly.
//
// So, we go with the "pull" model. An interesting node such as a Scan,
// GenericUpdate, RelRoot that needs a user-specified access/lock mode
// can "pull" one from BindWA. BindWA already implements SQL scoping
// and visibility rules. It's easier to explain also. Each table
// reference inherits the user-specified access/lock mode of the
// nearest SQL scope, going from the table outwards. If the entire
// query has no user-specified access/lock mode, then it uses the
// session-level default access/lock mode.
//
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
// The above code is in RelRoot::bindNode also.
// It would be nice to refactor this common code; someday.
// See Halloween handling code in GenericUpdate::bindNode
if (accessOptions().userSpecified()) {
if ( accessOptions().accessType() == REPEATABLE_ ||
accessOptions().accessType() == STABLE_ ||
accessOptions().accessType() == BROWSE_
) {
naTable->setRefsIncompatibleDP2Halloween();
}
}
else {
TransMode::IsolationLevel il = CmpCommon::transMode()->getIsolationLevel();
if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == BROWSE_ )) {
naTable->setRefsIncompatibleDP2Halloween();
}
}
const NAString * tableLockVal =
ActiveControlDB()->getControlTableValue(
getTableName().getUgivenName(), "TABLELOCK");
if (*tableLockVal == "ON")
naTable->setRefsIncompatibleDP2Halloween();
//Embedded update/delete queries on partitioned table
//generates assertion when ATTEMPT_ASYNCHRONOUS_ACCESS
//flag is OFF.This is because split operator is used.
//Removing of split top operator causes some problems.
//Error 66 from file system is one of them.
//So, for now compiler will generate error if these
//conditions occur.
if (getGroupAttr()->isEmbeddedUpdateOrDelete() &&
naTable->getClusteringIndex()->isPartitioned() &&
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) {
*CmpCommon::diags() << DgSqlCode(-4321)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// Stream access not allowed on a multi-partioned access paths, when
// CQD ATTEMPT_ASYNCHRONOUS_ACCESS is set to OFF.If we find
// that all access paths are partitioned we give an error.
if (getGroupAttr()->isStream() &&
(CmpCommon::getDefault(ATTEMPT_ASYNCHRONOUS_ACCESS) == DF_OFF)) {
NABoolean atleastonenonpartitionedaccess = FALSE;
NAFileSetList idescList = naTable->getIndexList();
for(CollIndex i = 0;
i < idescList.entries() && !atleastonenonpartitionedaccess; i++)
if(!(idescList[i]->isPartitioned()) )
atleastonenonpartitionedaccess = TRUE;
if (!atleastonenonpartitionedaccess) {
*CmpCommon::diags() << DgSqlCode(-4320)
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
}
if (hbaseAccessOptions_)
{
if (hbaseAccessOptions_->isMaxVersions())
{
hbaseAccessOptions_->setHbaseVersions
(
getTableDesc()->getClusteringIndex()->getNAFileSet()->numMaxVersions()
);
}
}
return boundExpr;
} // Scan::bindNode()
//----------------------------------------------------------------------------
RelExpr *Scan::bindExpandedMaterializedView(BindWA *bindWA, NATable *naTable)
{
CollHeap *heap = bindWA->wHeap();
MVInfoForDML *mvInfo = naTable->getMVInfo(bindWA);
QualifiedName mvName(mvInfo->getNameOfMV(), 3, heap, bindWA);
CorrName mvCorrName(mvName, heap, getTableName().getCorrNameAsString());
RelExpr *viewTree = mvInfo->buildMVSelectTree();
viewTree = new(heap) RenameTable(TRUE, viewTree, mvCorrName);
viewTree->addSelPredTree(removeSelPredTree());
RelExpr *boundExpr = viewTree->bindNode(bindWA);
if (bindWA->errStatus())
return this;
if (naTable->getClusteringIndex()->hasSyskey())
{
// In case the MV on top of this MV is an MJV, it needs the SYSKEY
// column of this MV. Since the SYSKEY column is not projected from
// the select list of this MV, just fake it. It's value will never be
// used anyway - just it's existance.
ConstValue *dummySyskey = new(heap) ConstValue(0);
dummySyskey->changeType(new(heap) SQLLargeInt());
ItemExpr *dummySyskeyCol = dummySyskey->bindNode(bindWA);
if (bindWA->errStatus())
return this;
ColRefName syskeyName("SYSKEY", mvCorrName);
boundExpr->getRETDesc()->addColumn(bindWA,
syskeyName,
dummySyskeyCol->getValueId(),
SYSTEM_COLUMN);
}
bindWA->getCurrentScope()->setRETDesc(boundExpr->getRETDesc());
return boundExpr;
}
//----------------------------------------------------------------------------
// This Scan needs to project the CurrentEpoch column.
// Create and bind the CurrentEpoch function
void Scan::projectCurrentEpoch(BindWA *bindWA)
{
ItemExpr *currEpoch =
new(bindWA->wHeap()) GenericUpdateOutputFunction(ITM_CURRENTEPOCH);
currEpoch->bindNode(bindWA);
// Add it to the RETDesc
ColRefName virtualColName(InliningInfo::getEpochVirtualColName());
getRETDesc()->addColumn(bindWA, virtualColName, currEpoch->getValueId());
// And force the generator to project it even though it is not
// a column in the IndexDesc.
ValueIdSet loggingCols;
loggingCols.insert(currEpoch->getValueId());
setExtraOutputColumns(loggingCols);
}
// -----------------------------------------------------------------------
// methods for class Tuple
// -----------------------------------------------------------------------
// Genesis 10-990226-4329 and 10-000221-6676.
static RelExpr *checkTupleElementsAreAllScalar(BindWA *bindWA, RelExpr *re)
{
if (!re) return NULL;
RETDesc *rd = re->getRETDesc();
CMPASSERT(rd);
// an empty tuple is okay (dummy for Triggers, e.g.)
const ColumnDescList &cols = *rd->getColumnList();
for (CollIndex i = cols.entries(); i--; ) {
ColumnDesc *col = cols[i];
Subquery *subq = (Subquery *)cols[i]->getValueId().getItemExpr();
if (subq->isASubquery()) {
if (cols.entries() > 1 && subq->getDegree() > 1) {
// 4125 The select list of a subquery in a VALUES clause must be scalar.
*CmpCommon::diags() << DgSqlCode(-4125);
bindWA->setErrStatus();
return NULL;
}
else if (cols.entries() == 1) { // if cols.entries() > 1 && subq->getDegree() > 1
// we do not want to make the transformation velow. We want to keep the
// values clause, so that it cann be attached by a tsj to the subquery
// during transform.
CMPASSERT(subq->isARowSubquery());
if (CmpCommon::getDefault(COMP_BOOL_137) == DF_ON)
{
ValueIdList subqSelectList;
RETDesc *subqRD = subq->getSubquery()->getRETDesc()->nullInstantiate(
bindWA, TRUE/*forceCast for GenRelGrby*/, subqSelectList);
subq->getSubquery()->setRETDesc(subqRD);
ItemExpr *agg = new(bindWA->wHeap())
Aggregate(ITM_ONE_ROW, subqSelectList.rebuildExprTree());
RelExpr * gby = new(bindWA->wHeap())
GroupByAgg(subq->getSubquery(), REL_GROUPBY, NULL, agg);
NABoolean save = bindWA->getCurrentScope()->context()->inTupleList();
bindWA->getCurrentScope()->context()->inTupleList() = TRUE;
gby = gby->bindNode(bindWA);
bindWA->getCurrentScope()->context()->inTupleList() = save;
return gby;
}
else
{
return subq->getSubquery();
}
}
}
}
return re;
}
RelExpr *Tuple::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Used by rowsets. We search for occurrences of arrays within this node to
// replace them with scalar variables
if (bindWA->getHostArraysArea() && !bindWA->getHostArraysArea()->done())
{
RelExpr *boundExpr = bindWA->getHostArraysArea()->modifyTupleNode(this);
if (boundExpr)
return checkTupleElementsAreAllScalar(bindWA, boundExpr);
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Detach the item expression tree for the value list and bind it.
// We use counterForRowValues() and pass in parent, for DEFAULT processing
// (Ansi 7.1 SR 1).
//
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
//
setRETDesc(bindRowValues(bindWA, removeTupleExprTree(), tupleExpr(), this, FALSE));
if (bindWA->errStatus()) return this;
//
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
// Do NOT set currently scoped RETDesc to this VALUES(...) RETDesc --
// makes "select * from t where ((values(1)),a) = (1,2);"
// fail with error 4001 "column A not found, no named tables in scope"
//
// bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
// -- Trigger
if (bindWA->errStatus()) return this;
//
//for case 10-020716-5497
RelExpr *newExpr = checkTupleElementsAreAllScalar(bindWA, boundExpr);
//before doing anything with newExpr make sure it is not null it can
//be null if there is an error incheckTupleElementsAreAllScalar.
getGroupAttr()->addCharacteristicOutputs(tupleExpr());
return newExpr;
} // Tuple::bindNode()
// -----------------------------------------------------------------------
// methods for class TupleList
// -----------------------------------------------------------------------
RelExpr *TupleList::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
RelExpr * boundExpr = NULL;
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
ExprValueId eVid(tupleExprTree());
ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST);
NABoolean castTo = castToList().entries() > 0;
if (tupleExprTree()->containsSubquery() ||
tupleExprTree()->containsUDF()
#ifndef NDEBUG
||
getenv("UNIONED_TUPLES")
#endif
) {
// Make a union'ed tree of all the tuples in tupleList. ##
// This is done coz TupleList doesn't handle transformation ##
// of subqueries in tuples correctly yet. ##
CollIndex nTupleListEntries = (CollIndex)tupleList.entries();
for (CollIndex i = 0; i < nTupleListEntries ; i++) {
ItemExpr *ituple = tupleList[i]->child(0)->castToItemExpr();
RelExpr *rtuple = new(bindWA->wHeap()) Tuple(ituple);
rtuple = rtuple->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// If INSERTing a TupleList, make some Assign's (even tmp's work!)
// to do some error-checking for MP-NCHAR-as-single-byte target columns.
//
// Similar code exists in
// (a) the loop further down, (b) TupleList::codeGen()
// and yes, it needs to be in all three places.
//
// NOTE: tmpAssign MUST BE ON HEAP --
// Cannot be done with a stack-allocated tmpAssign
// because ItemExpr destructor will delete children,
// which we (and parent) are still referencing!
if (castTo) {
const ColumnDescList &itms = *rtuple->getRETDesc()->getColumnList();
for (CollIndex j = 0; j < (CollIndex)itms.entries(); j++) {
ValueId src = itms[j]->getValueId();
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), src.getItemExpr());
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
}
if (!boundExpr)
boundExpr = rtuple;
else
boundExpr = new(bindWA->wHeap()) Union(boundExpr, rtuple);
} // for loop over tupleList
CMPASSERT(boundExpr);
return boundExpr->bindNode(bindWA);
} // containsSubquery
// Detach the item expression tree for the value list and bind it.
// We use counterForRowValues() and pass in parent, for DEFAULT processing
// (Ansi 7.1 SR 1).
//
CollIndex counterRowVals = 0;
CMPASSERT(!bindWA->getCurrentScope()->context()->counterForRowValues());
bindWA->getCurrentScope()->context()->counterForRowValues() = &counterRowVals;
// tupleExprTree() contains a list of tuples.
// Each tuple is also a list of values (this list may contain one item).
// Bind all values in all the tuples.
// Check that the number of elements in each tuple is the same,
// and that the types of corresponding elements are compatible.
//
numberOfTuples_ = tupleList.entries();
CollIndex prevTupleNumEntries = NULL_COLL_INDEX;
// A list of ValueIdUnions nodes. Will create as many as there are
// entries in each tuple. The valIds from corresponding elements of
// the tuples will be added so that each ValueIdUnion represents a
// column of the tuple virtual table. Used to determine the
// union-compatible type to be used for the result type produced by
// the tuplelist.
//
ItemExprList vidUnions(bindWA->wHeap());
ValueIdUnion *vidUnion;
CollIndex i = 0;
CollIndex nEntries = (CollIndex)tupleList.entries() ;
for (i = 0; i < nEntries ; i++) {
counterRowVals = 0;
ValueIdList vidList;
ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr();
tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this);
if (bindWA->errStatus())
return NULL;
if (prevTupleNumEntries == NULL_COLL_INDEX) {
prevTupleNumEntries = vidList.entries();
}
else if (prevTupleNumEntries != vidList.entries()) {
// 4126 The row-value-ctors of a VALUES must be of equal degree.
*CmpCommon::diags() << DgSqlCode(-4126);
bindWA->setErrStatus();
return NULL;
}
// Genesis 10-980611-7153
if (castTo && prevTupleNumEntries != castToList().entries()) break;
for (CollIndex j = 0; j < prevTupleNumEntries; j++) {
// If any unknown type in the tuple, coerce it to the target type.
// Also do same MP-NCHAR magic as above.
if (castTo) {
ValueId src = vidList[j];
src.coerceType(castToList()[j].getType());
// tmpAssign MUST BE ON HEAP -- see note above!
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), src.getItemExpr());
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
if (bindWA->errStatus())
return this;
}
if(i == 0) {
ValueIdList vids;
// Create an empty ValueIdUnion. Will create as many as there
// are entries in each tuple. Add the valIds from
// corresponding elements of the tuples so that each
// ValueIdUnion represents a column of the tuple virtual
// table.
//
vidUnion = new(bindWA->wHeap())
ValueIdUnion(vids, NULL_VALUE_ID);
vidUnion->setWasDefaultClause(TRUE);
vidUnions.insertAt(j, vidUnion);
}
// Add the valIds from corresponding elements of the tuples so
// that each ValueIdUnion represents a column of the tuple
// virtual table.
//
vidUnion = (ValueIdUnion *)vidUnions[j];
vidUnion->setSource((Lng32)i, vidList[j]);
if (NOT vidList[j].getItemExpr()->wasDefaultClause())
vidUnion->setWasDefaultClause(FALSE);
} // for loop over entries in tuple
} // for loop over tupleList
if (castTo && prevTupleNumEntries != castToList().entries())
{
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
<< DgInt0((Lng32)prevTupleNumEntries)
<< DgInt1((Lng32)castToList().entries());
bindWA->setErrStatus();
return NULL;
}
// do INFER_CHARSET fixup
if (!doInferCharSetFixup(bindWA, CharInfo::ISO88591, prevTupleNumEntries,
tupleList.entries())) {
return NULL;
}
ItemExpr * outputList = NULL;
for (CollIndex j = 0; j < prevTupleNumEntries; j++) {
// Get the ValueIdUnion node corresponding to this column of the
// tuple list virtual table
//
vidUnion = (ValueIdUnion *)vidUnions[j];
if (castTo) {
// Make sure the place holder type can support all the values in
// the tuple list and target column
//
vidUnion->setSource(numTuples(), castToList()[j]);
}
vidUnion->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (castTo) {
// Check that the source and target types are compatible.
// Cannot be done with a stack-allocated tmpAssign
// because ItemExpr destructor will delete children,
// which we (and parent) are still referencing!
Assign *tmpAssign = new(bindWA->wHeap())
Assign(castToList()[j].getItemExpr(), vidUnion);
if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON )
{
tmpAssign->tryToDoImplicitCasting(bindWA);
}
const NAType *targetType = tmpAssign->synthesizeType();
if (!targetType) {
bindWA->setErrStatus();
return NULL;
}
}
NAType *phType = vidUnion->getValueId().getType().newCopy(bindWA->wHeap());
NATypeToItem *placeHolder = new(bindWA->wHeap()) NATypeToItem(phType);
Cast * cnode;
if (castTo)
{
cnode = new(bindWA->wHeap()) Cast(placeHolder, phType, ITM_CAST, TRUE);
if (vidUnion->getValueId().getItemExpr()->wasDefaultClause())
cnode->setWasDefaultClause(TRUE);
}
else
cnode = new(bindWA->wHeap()) Cast(placeHolder, phType);
cnode->setConstFoldingDisabled(TRUE);
cnode->bindNode(bindWA);
if (!outputList)
outputList = cnode;
else
outputList = new(bindWA->wHeap()) ItemList(outputList, cnode);
}
setRETDesc(bindRowValues(bindWA, outputList, tupleExpr(), this, FALSE));
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->context()->counterForRowValues() = NULL;
// Bind the base class.
//
boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return this;
// need to add system columns as well....?
NABoolean inSubquery = FALSE;
BindScope *currScope = bindWA->getCurrentScope();
BindScope *prevScope = bindWA->getPreviousScope(currScope);
if (prevScope)
inSubquery = prevScope->context()->inSubquery();
if (inSubquery)
{
// need to change tupleExpr() & make it null-instantiated as RETDesc stores
// null instantiated columns (most probably these are constants, but not
// necessarily)
const ColumnDescList *viewColumns = getRETDesc()->getColumnList();
tupleExpr().clear();
for (CollIndex k=0; k < viewColumns->entries(); k++)
{
ValueId vid = (*viewColumns)[k]->getValueId();
// Special logic in Normalizer to optimize away a LEFT JOIN is not to
// be explored there, as this is not a LEFT JOIN
// Genesis case: 10-010312-1675
// If the query were to be a LEFT JOIN, we would not be here
if (vid.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL)
{
((InstantiateNull *)vid.getItemExpr())->NoCheckforLeftToInnerJoin
= TRUE;
}
tupleExpr().insert(vid);
}
}
getGroupAttr()->addCharacteristicOutputs(tupleExpr());
return boundExpr;
} // TupleList::bindNode()
// set vidlist = ith tuple of this tuplelist and return TRUE
RelExpr* TupleList::getTuple
(BindWA *bindWA, ValueIdList& vidList, CollIndex i)
{
ExprValueId eVid(tupleExprTree());
ItemExprTreeAsList tupleList(&eVid, ITM_ITEM_LIST);
ItemExpr *tuple = tupleList[i]->child(0)->castToItemExpr();
tuple->convertToValueIdList(vidList, bindWA, ITM_ITEM_LIST, this);
return bindWA->errStatus() ? NULL : this;
}
// set needsFixup to TRUE iff tuplelist needs INFER_CHARSET fixup
RelExpr*
TupleList::needsCharSetFixup(BindWA *bindWA,
CollIndex arity,
CollIndex nTuples,
NAList<NABoolean> &strNeedsFixup,
NABoolean &needsFixup)
{
// assume it needs no INFER_CHARSET fixup until proven otherwise
needsFixup = FALSE;
if (CmpCommon::wantCharSetInference()) {
CollIndex t, x;
for (x = 0; x < arity; x++) { // initialize
strNeedsFixup.insert(FALSE);
}
// go thru tuplelist looking for unprefixed string literals
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something wrong
}
else {
// go thru columns of tuple looking for unprefixed string literals
for (x = 0; x < arity; x++) {
if (!strNeedsFixup[x] && tup[x].inferableCharType()) {
strNeedsFixup[x] = TRUE;
needsFixup = TRUE;
}
}
}
}
}
return this; // all OK
}
// find fixable strings' inferredCharTypes
RelExpr*
TupleList::pushDownCharType(BindWA *bindWA,
enum CharInfo::CharSet cs,
NAList<const CharType*> &inferredCharType,
NAList<NABoolean> &strNeedsFixup,
CollIndex arity,
CollIndex nTuples)
{
// mimic CharType::findPushDownCharType() logic
const CharType* dctp = CharType::desiredCharType(cs);
NAList<const CharType*> sampleCharType(CmpCommon::statementHeap(),arity);
NAList<Int32> total(CmpCommon::statementHeap(),arity);
NAList<Int32> ct (CmpCommon::statementHeap(),arity);
CollIndex t, x;
for (x = 0; x < arity; x++) { // initialize
total.insert(0);
ct.insert(0);
sampleCharType.insert(NULL);
}
// go thru tuplelist looking for fixable strings' inferredCharType
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something wrong
}
else {
// go thru tuple looking for fixable strings' inferredCharType
for (x = 0; x < arity; x++) {
if (strNeedsFixup[x]) {
total[x] += 1;
const CharType *ctp;
if (tup[x].hasKnownCharSet(&ctp)) {
ct[x] += 1;
if (sampleCharType[x] == NULL) {
sampleCharType[x] = ctp;
}
}
}
}
}
}
for (x = 0; x < arity; x++) {
if (ct[x] == total[x]) {
// all have known char set or none need fixup
inferredCharType.insert(NULL); // nothing to fix
}
else {
inferredCharType.insert(sampleCharType[x] ? sampleCharType[x] : dctp);
}
}
return this; // all OK
}
// do INFER_CHARSET fixup
RelExpr*
TupleList::doInferCharSetFixup(BindWA *bindWA,
enum CharInfo::CharSet cs,
CollIndex arity,
CollIndex nTuples)
{
NABoolean needsFixup;
NAList<NABoolean> strNeedsFixup(CmpCommon::statementHeap(),arity);
RelExpr *result = needsCharSetFixup
(bindWA, arity, nTuples, strNeedsFixup, needsFixup);
if (!result || // something went wrong
!needsFixup) { // no INFER_CHARSET fixup needed
return result;
}
else { // some string literal needs INFER_CHARSET fixup
NAList<const CharType*> inferredCharType(CmpCommon::statementHeap(),arity);
if (!pushDownCharType(bindWA, cs, inferredCharType,
strNeedsFixup, arity, nTuples)) {
return NULL; // something went wrong
}
else {
// go thru tuplelist fixing up literals' char sets
CollIndex t, x;
for (t = 0; t < nTuples; t++) {
// get tuple
ValueIdList tup;
if (!getTuple(bindWA, tup, t)) {
return NULL; // something went wrong
}
else {
// go thru tuple fixing up literals' char sets
for (x = 0; x < arity; x++) {
if (strNeedsFixup[x] && tup[x].inferableCharType()) {
// coerce literal to have column's inferred char set
tup[x].coerceType(*(inferredCharType[x]), NA_CHARACTER_TYPE);
}
}
}
}
}
}
return this;
}
// -----------------------------------------------------------------------
// member functions for class RenameTable
// -----------------------------------------------------------------------
RelExpr *RenameTable::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc()); // -- Triggers
return this;
}
//
// Create a new table name scope.
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
// code to enforce the specification that if an index expression is specified
// with a rowset and the index is included in the derived table, the index
// must be the last column of the derived column list
if((getTableName().getCorrNameAsString() != "Rowset___") && (getArity() != 0))
{
if(child(0)->getOperatorType() == REL_ROWSET)
{
NAString indexExpr(bindWA->wHeap());
NAString lastString("", bindWA->wHeap());
ItemExpr *tempPtr;
indexExpr = ((Rowset *)getChild(0))->getIndexName();
if((indexExpr != "") && newColNamesTree_)
{
for (tempPtr = newColNamesTree_; tempPtr; tempPtr=tempPtr->child(1))
{
Int32 arity = tempPtr->getArity();
if(arity == 1)
{
lastString = ((RenameCol *)tempPtr)->getNewColRefName()->getColName();
}
}
if(indexExpr != lastString)
{
*CmpCommon::diags() << DgSqlCode(-30012)
<< DgString0(indexExpr)
<< DgString1(getTableName().getCorrNameAsString());
bindWA->setErrStatus();
return NULL;
}
}
}
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
//
// Remove the table name scope.
//
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
//
// Create the result table.
//
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
const RETDesc &sourceTable = *child(0)->getRETDesc();
const CorrName &tableName = getTableName();
ItemExpr *derivedColTree = removeColNameTree();
ItemExprList derivedColList(bindWA->wHeap());
const NAString *simpleColNameStr;
CollIndex i;
//
// Check that there are an equal number of columns to values.
//
if (derivedColTree) {
derivedColList.insertTree(derivedColTree);
if (derivedColList.entries() != sourceTable.getDegree()) {
// 4016 The number of derived columns must equal the degree of the derived table.
*CmpCommon::diags() << DgSqlCode(-4016)
#pragma nowarn(1506) // warning elimination
<< DgInt0(derivedColList.entries()) << DgInt1(sourceTable.getDegree());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
delete resultTable;
return this;
}
}
//
// Insert the derived column names into the result table.
// By ANSI 6.3 SR 6 (applies to explicit derived column list),
// duplicate names are not allowed.
// If user did not specify a derived column name list,
// expose the select list's column names (implicit derived column list);
// ANSI does not say that these cannot be duplicates --
// if there's a later (i.e. in an outer scope) reference to a duplicately
// named column, ColReference::bindNode will issue an error
// (in particular, if all references are to constants, e.g. "count(*)",
// then duplicates are not disallowed in the implicit derived column list!).
//
// When Create View DDL uses this Binder, we must enforce
// ANSI 11.19 SR 8 + 9, clearly disallowing dups/ambigs
// (and disallowing implem-dependent names, i.e. our unnamed '(expr)' cols!).
//
for (i = 0; i < sourceTable.getDegree(); i++) {
//
if (derivedColTree) { // explicit derived column list
CMPASSERT(derivedColList[i]->getOperatorType() == ITM_RENAME_COL);
simpleColNameStr = &((RenameCol *) derivedColList[i])->
getNewColRefName()->getColName();
if (*simpleColNameStr != "") { // named column, not an expression
if (resultTable->findColumn(*simpleColNameStr)) {
ColRefName errColName(*simpleColNameStr, tableName);
// 4017 Derived column name $ColumnName was specified more than once.
*CmpCommon::diags() << DgSqlCode(-4017)
<< DgColumnName(errColName.getColRefAsAnsiString());
bindWA->setErrStatus();
delete resultTable;
return this;
}
}
} else // implicit derived column list
simpleColNameStr = &sourceTable.getColRefNameObj(i).getColName();
//
ColRefName colRefName(*simpleColNameStr, tableName);
ValueId valId = sourceTable.getValueId(i);
resultTable->addColumn(bindWA, colRefName, valId);
} // for-loop
//
// Insert system columns similarly, completely ignoring dup names.
//
const ColumnDescList &sysColList = *sourceTable.getSystemColumnList();
for (i = 0; i < sysColList.entries(); i++) {
simpleColNameStr = &sysColList[i]->getColRefNameObj().getColName();
if (NOT resultTable->findColumn(*simpleColNameStr)) {
ColRefName colRefName(*simpleColNameStr, tableName);
ValueId valId = sysColList[i]->getValueId(); // (slight diff from the
resultTable->addColumn(bindWA, colRefName, valId, SYSTEM_COLUMN); //above)
}
}
setRETDesc(resultTable);
// MVs --
// When binding INTERNAL REFRESH commands, the SYSKEY and @OP columns should
// be propageted to the scope above, even when they are not specified in the
// select list.
if (bindWA->isPropagateOpAndSyskeyColumns())
getRETDesc()->propagateOpAndSyskeyColumns(bindWA, FALSE);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Insert the table name into the XTNM,
// casting away constness on the correlation name
// in order to have default cat+sch filled in.
//
bindWA->getCurrentScope()->getXTNM()->insertNames(bindWA,
(CorrName &)tableName);
if (bindWA->errStatus()) {
delete resultTable;
return this;
}
if (getViewNATable())
{
const NATable * natable = getViewNATable() ;
const ColumnDescList &columnsRET = *(resultTable->getColumnList());
for (i = 0; i < natable->getColumnCount(); i++)
{
columnsRET[i]->setViewColPosition(
((natable->getNAColumnArray())[i])->getPosition());
columnsRET[i]->setViewFileName((const char*)natable->getViewFileName());
}
}
//
// Bind the base class.
//
return bindSelf(bindWA);
} // RenameTable::bindNode()
// -----------------------------------------------------------------------
// member functions for class RenameReference
// -----------------------------------------------------------------------
// This method replaces the RETDesc of the current scope, with a new RETDesc
// that contains the columns of the transition values (OLD@ and NEW@) but
// with correlation names specified by the user in the REFERENCING clause
// of the row trigger.
void RenameReference::prepareRETDescWithTableRefs(BindWA *bindWA)
{
CollIndex refsToFind = getRefList().entries();
CollIndex refsFound = 0;
RETDesc *retDesc;
// First find the NEW@ and OLD@ tables in one of the scopes.
BindScope *scope = bindWA->getCurrentScope();
// For each BindScope,
while ((scope!=NULL) && (refsToFind > refsFound))
{ // until we find all the references.
retDesc = scope->getRETDesc();
// Skip if an empty RETDesc
if ((retDesc!=NULL) && !retDesc->isEmpty())
{
// For each reference to change
for (CollIndex i=0; i<refsToFind; i++)
// Find the table name in the RETDesc, and save a pointer to it's
// column list in the TableRefName object.
if(getRefList().at(i).lookupTableName(retDesc))
refsFound++;
}
// Get the next BindScope to search.
scope = bindWA->getPreviousScope(scope);
} // while not done
RETDesc *resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
// Create an empty RETDesc for the current scope.
bindWA->getCurrentScope()->setRETDesc(resultTable);
// For each table reference, add to the RETDesc of the current scope, the
// columns of the columns of the referenced tables with the new referencing
// names as correlation names.
for (CollIndex i=0; i<refsToFind; i++)
getRefList()[i].bindRefColumns(bindWA);
}
// The RenaneReference node renames values flowing down through it.
// It is used above a row trigger body, to implement the REFERENCING clause
// of the trigger definition - renaming the OLD and NEW transition variables
// to user specified names.
//
// This bind is top-down, so we first prepare the RETDesc, and then bind
// the children using this RETDesc.
RelExpr *RenameReference::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Save the current RETDesc.
RETDesc *prevRETDesc = bindWA->getCurrentScope()->getRETDesc();
// Replace the RETDesc of the current scope with one that contains the user
// names (MY_NEW, MY_OLD) instead of the reference names (NEW@, OLD@).
prepareRETDescWithTableRefs(bindWA);
// Bind the child nodes, in a new BindScope.
// If we don't open a new scope here, the bindChildren() method will
// overwrite the RETDesc of the current scope with NULL.
bindWA->initNewScope();
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Bind the base class.
RelExpr *boundNode = bindSelf(bindWA);
// Save this scope's outer references before removing the scope.
const ValueIdSet myOuterRefs = bindWA->getCurrentScope()->getOuterRefs();
setRETDesc(bindWA->getCurrentScope()->getRETDesc());
bindWA->removeCurrentScope();
bindWA->getCurrentScope()->setRETDesc(prevRETDesc);
// Now merge the outer references into the previous scope.
bindWA->getCurrentScope()->mergeOuterRefs(myOuterRefs, FALSE);
return boundNode;
} // RenameReference::bindNode()
// -----------------------------------------------------------------------
// member functions for class BeforeTrigger
// -----------------------------------------------------------------------
//////////////////////////////////////////////////////////////////////////////
// Find the name and position of a column SET to by this before Trigger.
// The targetColName is an output parameter, saving the bindSetClause()
// method the work of finding the column name.
// The naTable parameter is NULL during DML. and is only used for DDL
// semantic checks.
//////////////////////////////////////////////////////////////////////////////
Lng32 BeforeTrigger::getTargetColumn(CollIndex i, // Index of Assign expr.
ColRefName* targetColName,
const NATable *naTable)
{
ItemExpr *currentAssign = setList_->at(i);
CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN);
ItemExpr *targetColReference = currentAssign->child(0);
CMPASSERT(targetColReference->getOperatorType() == ITM_REFERENCE);
ColRefName& targetColRefName =
((ColReference *)targetColReference)->getColRefNameObj();
if (targetColName != NULL) // return the column name to the binder.
*targetColName = targetColRefName;
const NAString& colName = targetColRefName.getColName();
// If called during DML binding of the BeforeTrigger node, the
// column position will not be used, because the check for duplicate
// SET columns was done in DDL time.
if (naTable == NULL)
return 0;
// We get here from DDL binding of the BeforeTrigger node, or from
// the Inlining code.
NAColumn *colObj = naTable->getNAColumnArray().getColumn(colName);
// If colObj is NULL, it's a bad column name.
if (colObj == NULL)
return -1;
return colObj->getPosition();
}
//////////////////////////////////////////////////////////////////////////////
// This method is called only during DDL (CREATE TRIGGER) of a before trigger
// with a SET clause.
// Each of the columns updated by the SET clause goes through several
// semantic checks, that cannot be done in the parser.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::doSetSemanticChecks(BindWA *bindWA, RETDesc *origRETDesc)
{
UpdateColumns localCols = UpdateColumns(FALSE);
ColRefName currentCol;
const NATable *scanNaTable = NULL;
NABoolean isUpdateOp=FALSE;
Scan *scanNode = getLeftmostScanNode();
CMPASSERT(scanNode != NULL);
scanNaTable = scanNode->getTableDesc()->getNATable();
CorrName oldCorr(OLDCorr);
if (origRETDesc->getQualColumnList(oldCorr))
isUpdateOp = TRUE;
for (CollIndex i=0; i<setList_->entries(); i++)
{
// Get the name and position of the Assign target column.
Lng32 targetColPosition = getTargetColumn(i, ¤tCol, scanNaTable);
if (!currentCol.getCorrNameObj().isATriggerTransitionName(bindWA, TRUE))
{
// 11017 Left hand of SET assignment must be qualified with the name of the NEW transition variable
*CmpCommon::diags() << DgSqlCode(-11017) ; // must be NEW name
bindWA->setErrStatus();
return;
}
if (targetColPosition == -1)
{
// 11022 Column $0~ColumnName is not a column in table $0~TableName
NAString tableName = scanNaTable->getTableName().getQualifiedNameAsString();
*CmpCommon::diags() << DgSqlCode(-11022)
<< DgColumnName(currentCol.getColName())
<< DgTableName(tableName);
bindWA->setErrStatus();
return;
}
// We need to check for duplicate SET columns in DDL time only.
if (localCols.contains(targetColPosition))
{
// 4022 column specified more than once
*CmpCommon::diags() << DgSqlCode(-4022)
<< DgColumnName(currentCol.getColName());
bindWA->setErrStatus();
return;
}
localCols.addColumn(targetColPosition);
// Is this a SET into a column that is part of the clustering key?
// This is only allowed on Inserts, not on Updates (Deletes never get here).
if (isUpdateOp &&
scanNaTable->getNAColumnArray().getColumn(targetColPosition)->isClusteringKey())
{
// 4033 Column $0~ColumnName is a primary or clustering key column and cannot be updated.
*CmpCommon::diags() << DgSqlCode(-4033)
<< DgColumnName(currentCol.getColName());
bindWA->setErrStatus();
return;
}
}
}
//////////////////////////////////////////////////////////////////////////////
// This method is called for before triggers that use the SET clause.
// For each column to be set using SET MYNEW.<colname> = <setExpr> do:
// 1. Find NEW@.<colname> in origRETDesc.
// 2. Verify that there is such a column, and that the user is allowd to
// change it.
// 3. Get the column's ItemExpr expression, and save it in passThruExpr.
// 4. Create an ItemExpr tree as follows:
// case
// |
// IfThenElse
// / | \
// condition setExpr passThruExpr
//
// where condition is the WHEN clause expression.
// 5. Bind this new expression in the RETDesc of the current scope.
// 6. remove NEW@.<colname> from origRETDesc, and re-insert it as the new
// expression.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::bindSetClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap)
{
// Semantic checks are only needed during DDL.
if (bindWA->inDDL())
{
doSetSemanticChecks(bindWA, origRETDesc);
if (bindWA->errStatus())
return;
}
CorrName newCorr(NEWCorr);
const TableRefName *newRefName = getRefList().findTable(newCorr);
CMPASSERT(newRefName!=NULL);
CorrName newRef = newRefName->getTableCorr();
ColRefName currentCol;
// For each Assign expression in the list.
for (CollIndex i=0; i<setList_->entries(); i++)
{
// Get the name and position of the Assign target column.
Lng32 targetColPosition = getTargetColumn(i, ¤tCol, NULL);
currentCol.getCorrNameObj() = newRef;
ItemExpr *setExpr = setList_->at(i)->child(1);
// Find the current value of this NEW@ column.
ColumnNameMap *currentColExpr = origRETDesc->findColumn(currentCol);
CMPASSERT(currentColExpr != NULL); // Otherwise we would have been thrown with error 11022 - see above.
ItemExpr *passThruExpr = currentColExpr->getValueId().getItemExpr();
ItemExpr *colExpr = NULL;
if (whenClause_ == NULL)
// After we add the support for reading the trigger status from
// the resource fork, and adding it to the condition, we should
// never get here.
colExpr = setExpr;
else
{
IfThenElse *ifExpr = new(heap)
IfThenElse(whenClause_, setExpr, passThruExpr);
colExpr = new(heap) Case(NULL, ifExpr);
}
colExpr = colExpr->bindNode(bindWA);
if (bindWA->errStatus())
return;
// Now remove and re-insert the column to the original RETDesc,
// that will be restored at the bottom of the method.
currentCol.getCorrNameObj() = newCorr;
origRETDesc->delColumn(bindWA, currentCol, USER_COLUMN);
origRETDesc->addColumn(bindWA, currentCol, colExpr->getValueId());
// force binding of the assign here so that type incompatability is caught
// during DDL
if (bindWA->inDDL())
{
ItemExpr *currentAssign = setList_->at(i);
CMPASSERT(currentAssign->getOperatorType() == ITM_ASSIGN);
currentAssign->bindNode(bindWA);
}
}
}
//////////////////////////////////////////////////////////////////////////////
// This method is called for before triggers that use the SIGNAL clause.
// 1. Find the "virtual execId column" in origRETDesc.
// 3. Get the column's ItemExpr expression, and save it in passThruExpr.
// 4. Create an ItemExpr tree as follows:
// case
// |
// IfThenElse
// / | \
// AND passThruExpr passThruExpr
// / \
// condition RaiseError
//
// where condition is the WHEN clause expression, and RaiseError is the
// SIGNAL expression.
// 5. Bind this new expression in the RETDesc of the current scope.
// 6. remove "virtual execId column" from origRETDesc, and re-insert it as
// the new expression.
//
// The value of the expression is always the passThruExpr, for type
// compatibility. since if the SIGNAL fires, the actual value returned does
// not matter. The AND will evaluate the RaiseError only if the condition
// evaluates to TRUE.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::bindSignalClause(BindWA *bindWA, RETDesc *origRETDesc, CollHeap *heap)
{
if (bindWA->inDDL())
{
// In DDL time (CREATE TRIGGER) all we need is to bind the signal
// expression for semantic checks.
signal_->bindNode(bindWA);
if (bindWA->errStatus())
return;
}
else
{
// The SIGNAL expression is piggy-backed on the Unique ExecuteID
// value inserted into the temp table.
ColumnNameMap *execIdCol =
origRETDesc->findColumn(InliningInfo::getExecIdVirtualColName());
CMPASSERT(execIdCol != NULL);
const ColRefName& ExecIdColName = execIdCol->getColRefNameObj();
ItemExpr *passThruExpr = execIdCol->getValueId().getItemExpr();
ItemExpr *whenAndSignal = NULL;
// Case 10-040604-5021:
// General AND logic uses "short circuiting" as follows: if the
// left side is FALSE, evaluation of the right side is skipped, and
// the result returned is FALSE. The following expression depends on
// evaluation of the right side being skipped whenever the left side
// is NOT TRUE, (i.e., FALSE or NULL). Therefore, an IS TRUE unary
// predicate must be placed above the actual WHEN condition. Otherwise,
// the signal will fire when the WHEN condition evaluates to NULL.
if (whenClause_ != NULL)
{
if (whenClause_->getOperatorType() == ITM_AND ||
whenClause_->getOperatorType() == ITM_OR)
{
ItemExpr *isTrueExpr = new (heap) UnLogic(ITM_IS_TRUE, whenClause_);
whenAndSignal = new(heap) BiLogic(ITM_AND, isTrueExpr, signal_);
}
else
{
whenAndSignal = new(heap) BiLogic(ITM_AND, whenClause_, signal_);
}
}
else
// After we add the support for reading the trigger status from
// the resource fork, and adding it to the condition, we should
// never get here.
whenAndSignal = signal_;
// For type compatibity, the original value is used whatever the
// WHEN clause evaluates to. However, if it evaluates to TRUE, the
// evaluation of the signal expression will throw an SQLERROR.
ItemExpr *condSignalExpr = new(heap)
Case(NULL, new(heap)
IfThenElse(whenAndSignal, passThruExpr, passThruExpr));
condSignalExpr = condSignalExpr->bindNode(bindWA);
if (bindWA->errStatus())
return;
// Now delete the original "virtual column" from the RETDesc, and
// re-insert it with the new value.
origRETDesc->delColumn(bindWA, ExecIdColName, USER_COLUMN);
origRETDesc->addColumn(bindWA, ExecIdColName, condSignalExpr->getValueId());
}
}
//////////////////////////////////////////////////////////////////////////////
// This bind is bottom-up, so we first bind the children, and then use
// and change the RETDesc they created.
//////////////////////////////////////////////////////////////////////////////
RelExpr *BeforeTrigger::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Now we know that we have the columns of OLD@ and NEW@ in the RETDesc
// of the current scope. Save this scope so we can update it and restore
// it when we are done.
RETDesc *origRETDesc = bindWA->getCurrentScope()->getRETDesc();
CollHeap *heap = bindWA->wHeap();
CollIndex refsToFind = getRefList().entries();
// For each reference to change, Find the table name in the RETDesc,
// and save a pointer to it's column list in the TableRefName object.
CollIndex i=0;
for (i=0; i<refsToFind; i++)
getRefList().at(i).lookupTableName(origRETDesc);
// Create an empty RETDesc for the current scope.
// It will contain the names the user specified (MY_NEW, MY_OLD) for the
// OLD@ and NEW@ transition variables, and will be used to bind this
// node only.
bindWA->getCurrentScope()->setRETDesc(new(heap) RETDesc(bindWA));
// For each table reference, add to the RETDesc of the current scope,
// the columns of the referenced tables with the new referencing names
// as correlation names.
for (i=0; i<refsToFind; i++)
getRefList().at(i).bindRefColumns(bindWA);
// First bind the condition. The ValueId will be used later (possibly
// multiple times) so that during execution, the expression will be
// evaluated only once.
if (whenClause_ != NULL)
{
whenClause_ = whenClause_->bindNode(bindWA);
if (bindWA->errStatus())
return this;
}
// Use the bound condition to prepare the conditional expression
// for each column modified by the trigger (SET MY_NEW.a = ...)
if (setList_ != NULL)
bindSetClause(bindWA, origRETDesc, heap);
// Use the bound condition to prepare the conditional SIGNAL
// expression, on the ExecuteId "virtual column".
if (signal_ != NULL)
bindSignalClause(bindWA, origRETDesc, heap);
if (bindWA->errStatus())
return this;
// We don't need the RETDesc of the current scope anymore. Restore the
// original RETDesc with the updated columns.
bindWA->getCurrentScope()->setRETDesc(origRETDesc);
if (parentTSJ_ != NULL)
{
// If this is the top most before trigger, save a copy of the RETDesc
// for use by the transformNode() pass.
RETDesc *savedRETDesc = new(heap) RETDesc(bindWA, *origRETDesc);
setRETDesc(savedRETDesc);
}
//
// Bind the base class.
//
RelExpr *boundNode = bindSelf(bindWA);
return boundNode;
} // BeforeTrigger::bindNode()
// -----------------------------------------------------------------------
// member functions for class Insert
// -----------------------------------------------------------------------
// LCOV_EXCL_START - cnu
static void bindInsertRRKey(BindWA *bindWA, Insert *insert,
ValueIdList &sysColList, CollIndex i)
{
// For a KS round-robin partitioned table, the system column
// (for now there is only one, SYSKEY) is initialized via the expression
// "ProgDistribKey(partNum, rowPos, totalNumParts)".
//
const NAFileSet *fs =
insert->getTableDesc()->getClusteringIndex()->getNAFileSet();
// For now, round-robin partitioned tables are always stored in
// key-sequenced files, and there is only one system column (SYSKEY)
// which is at the beginning of the record.
CMPASSERT(fs->isKeySequenced() && i==0);
CollHeap *heap = bindWA->wHeap();
// Host variables that provide access to partition number,
// row position, and total number of partitions --
// supplied at run-time by the executor insert node.
//
ItemExpr *partNum = new (heap)
HostVar("_sys_hostVarInsertPartNum",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
partNum->synthTypeAndValueId();
insert->partNumInput() = partNum->getValueId(); // for later use in codeGen
ItemExpr *rowPos = new (heap)
HostVar("_sys_hostVarInsertRowPos",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
rowPos->synthTypeAndValueId();
insert->rowPosInput() = rowPos->getValueId(); // for later use in codeGen
ItemExpr *totNumParts = new (heap)
HostVar("_sys_hostVarInsertTotNumParts",
new (heap) SQLInt(FALSE,FALSE), // int unsigned not null
TRUE // is system-generated
);
totNumParts->synthTypeAndValueId();
insert->totalNumPartsInput() = totNumParts->getValueId(); // for later use
// Generate expression to compute a round-robin key. Parameters to
// ProgDistribKey are the partition number, the row position (which
// is chosen randomly; the insert node will retry if a number is
// selected that is already in use), and the total number of
// partitions.
ItemExpr *rrKey = new (heap) ProgDistribKey(partNum, rowPos, totNumParts);
// Build and set round-robin key expression.
Assign *assign = new (heap)
Assign(sysColList[i].getItemExpr(), rrKey, FALSE /*not user-specified*/);
assign->bindNode(bindWA);
insert->rrKeyExpr() = assign->getValueId();
} // bindInsertRRKey
// LCOV_EXCL_STOP
RelExpr *Insert::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Set local binding flags
setInUpdateOrInsert(bindWA, this, REL_INSERT);
// The 8108 (unique constraint on an ID column) error must be raised
// only for system generated IDENTITY values and not for
// user generated ID values. We use the GenericUpdate::identityColumnUniqueIndex_
// to indicate to the EID that 8108 should be raised in place of 8102.
// This variable is used to indicate that there is an IDENTITY column
// in the table for which the system is generating the value
// This is NULL if "DEFAULT VALUES" was specified,
// non-NULL if a query-expr child was specified: VALUES.., TABLE.., SELECT..
RelExpr *someNonDefaultValuesSpecified = child(0);
// Set flag for firstN in context
if (child(0) && child(0)->getOperatorType() == REL_ROOT) // Indicating subquery
if (child(0)->castToRelExpr() &&
child(0)->castToRelExpr()->getFirstNRows() >= 0)
if (bindWA &&
bindWA->getCurrentScope() &&
bindWA->getCurrentScope()->context())
bindWA->getCurrentScope()->context()->firstN() = TRUE;
if (NOT someNonDefaultValuesSpecified) { // "DEFAULT VALUES" specified
// Kludge up a dummy child before binding the GenericUpdate tree
setChild(0, new(bindWA->wHeap()) Tuple(new(bindWA->wHeap()) SystemLiteral(0)));
}
// Bind the GenericUpdate tree.
//
RETDesc *incomingRETDescForSource = bindWA->getCurrentScope()->getRETDesc();
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus())
return boundExpr;
const NAFileSet* fileset = getTableDesc()->getNATable()->getClusteringIndex();
const NAColumnArray& partKeyCols = fileset->getPartitioningKeyColumns();
if (getTableDesc()->getNATable()->isHiveTable())
{
if (partKeyCols.entries() > 0)
{
// Insert into partitioned tables would require computing the target
// partition directory name, something we don't support yet.
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Insert into partitioned Hive tables");
bindWA->setErrStatus();
return this;
}
RelExpr * mychild = child(0);
const HHDFSTableStats* hTabStats =
getTableDesc()->getNATable()->getClusteringIndex()->getHHDFSTableStats();
const char * hiveTablePath;
NAString hostName;
Int32 hdfsPort;
NAString tableDir;
NABoolean result;
char fldSep[2];
char recSep[2];
memset(fldSep,'\0',2);
memset(recSep,'\0',2);
fldSep[0] = hTabStats->getFieldTerminator();
recSep[0] = hTabStats->getRecordTerminator();
// don't rely on timeouts to invalidate the HDFS stats for the target table,
// make sure that we invalidate them right after compiling this statement,
// at least for this process
((NATable*)(getTableDesc()->getNATable()))->setClearHDFSStatsAfterStmt(TRUE);
// inserting into tables with multiple partitions is not yet supported
CMPASSERT(hTabStats->entries() == 1);
hiveTablePath = (*hTabStats)[0]->getDirName();
result = ((HHDFSTableStats* )hTabStats)->splitLocation
(hiveTablePath, hostName, hdfsPort, tableDir) ;
if (!result) {
*CmpCommon::diags() << DgSqlCode(-4224)
<< DgString0(hiveTablePath);
bindWA->setErrStatus();
return this;
}
// specifying a list of column names to insert to is not yet supported
if (insertColTree_) {
*CmpCommon::diags() << DgSqlCode(-4223)
<< DgString0("Target column list for insert into Hive table");
bindWA->setErrStatus();
return this;
}
// NABoolean isSequenceFile = (*hTabStats)[0]->isSequenceFile();
const NABoolean isSequenceFile = hTabStats->isSequenceFile();
RelExpr * unloadRelExpr =
new (bindWA->wHeap())
FastExtract( mychild,
new (bindWA->wHeap()) NAString(hiveTablePath),
new (bindWA->wHeap()) NAString(hostName),
hdfsPort,
getTableDesc(),
new (bindWA->wHeap()) NAString(getTableName().getQualifiedNameObj().getObjectName()),
FastExtract::FILE,
bindWA->wHeap());
RelExpr * boundUnloadRelExpr = unloadRelExpr->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
((FastExtract*)boundUnloadRelExpr)->setRecordSeparator(recSep);
((FastExtract*)boundUnloadRelExpr)->setDelimiter(fldSep);
((FastExtract*)boundUnloadRelExpr)->setOverwriteHiveTable(getOverwriteHiveTable());
((FastExtract*)boundUnloadRelExpr)->setSequenceFile(isSequenceFile);
if (getOverwriteHiveTable())
{
RelExpr * newRelExpr = new (bindWA->wHeap())
ExeUtilHiveTruncate(getTableName(), NULL, bindWA->wHeap());
//new root to prevent error 4056 when binding
newRelExpr = new (bindWA->wHeap()) RelRoot(newRelExpr);
RelExpr *blockedUnion = new (bindWA->wHeap()) Union(newRelExpr, boundUnloadRelExpr);
((Union*)blockedUnion)->setBlockedUnion();
((Union*)blockedUnion)->setSerialUnion();
RelExpr *boundBlockedUnion = blockedUnion->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
return boundBlockedUnion;
}
return boundUnloadRelExpr;
}
if(!(getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION)) && // VALUES with subquery
(getOperatorType() != REL_LEAF_INSERT))
{
setInsertSelectQuery(TRUE);
}
// if table has a lob column, then fix up any reference to LOBinsert
// function in the source values list.
//
if ((getOperatorType() == REL_UNARY_INSERT) &&
(getTableDesc()->getNATable()->hasLobColumn()) &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y')
{
if (child(0)->getOperatorType() == REL_TUPLE_LIST)
{
TupleList * tl = (TupleList*)(child(0)->castToRelExpr());
for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++)
{
ValueIdList tup;
if (!tl->getTuple(bindWA, tup, x))
{
bindWA->setErrStatus();
return boundExpr; // something went wrong
}
for (CollIndex n = 0; n < tup.entries(); n++)
{
ItemExpr * ie = tup[n].getItemExpr();
if (ie->getOperatorType() == ITM_LOBINSERT)
{
// cannot have this function in a values list with multiple
// tuples. Use a single tuple.
*CmpCommon::diags() << DgSqlCode(-4483);
bindWA->setErrStatus();
return boundExpr;
LOBinsert * li = (LOBinsert*)ie;
li->insertedTableObjectUID() =
getTableDesc()->getNATable()->objectUid().castToInt64();
li->lobNum() = n;
li->insertedTableSchemaName() =
getTableDesc()->getNATable()->
getTableName().getSchemaName();
}
} // for
} // for
} // if tuplelist
} // if
// Prepare for any IDENTITY column checking later on
NAString identityColumnName;
NABoolean identityColumnGeneratedAlways = FALSE;
identityColumnGeneratedAlways =
getTableDesc()->isIdentityColumnGeneratedAlways(&identityColumnName);
if ((getTableName().isVolatile()) &&
(CmpCommon::context()->sqlSession()->volatileSchemaInUse()) &&
(getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE) &&
((ActiveSchemaDB()->getDefaults()).getAsLong(IMPLICIT_UPD_STATS_THRESHOLD) > -1) &&
(bindWA->isInsertSelectStatement()) &&
(NOT getTableDesc()->getNATable()->isVolatileTableMaterialized()))
{
if (NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
// if (NOT Get_SqlParser_Flags(NO_IMPLICIT_VOLATILE_TABLE_UPD_STATS))
{
// treat this insert as a volatile load stmt.
RelExpr * loadVolTab =
new (bindWA->wHeap())
ExeUtilLoadVolatileTable(getTableName(),
this,
bindWA->wHeap());
boundExpr = loadVolTab->bindNode(bindWA);
if (bindWA->errStatus())
return boundExpr;
return boundExpr;
}
else
{
NATable * nat = (NATable*)(getTableDesc()->getNATable());
nat->setIsVolatileTableMaterialized(TRUE);
}
}
// Now we have to create the following three collections:
//
// - newRecExpr()
// An unordered set of Assign nodes of the form
// "col1 = value1, col2 = value2, ..." which is used by Norm/Optimizer.
//
// - newRecExprArray()
// An ordered array of Assign nodes of the same form,
// ordered by column position, which is used by Generator.
// This array must have the following properties:
//
// - All columns not specified in the insert statement must be
// Assign'ed with their default values.
//
// - If this is a key-sequenced table with a (non-RR) SYSKEY column,
// we must create the first entry in the newRecExprArray
// to be "SYSKEY_COL = 0". This is a placeholder where the timestamp
// value will be moved at runtime. Round-robin SYSKEY columns are
// initialized via an expression of the form "SYSKEY_COL =
// ProgDistribKey(..params..)". SYSKEY columns for other table
// organizations are handled by the file system or disk process.
//
// - updateToSelectMap()
// A ValueIdMap that can be used to rewrite value ids of the
// target table in terms of the source table and vice versa.
// The top value ids are target value ids, the bottom value ids
// are those of the source.
//
NABoolean view = bindWA->getNATable(getTableName())->getViewText() != NULL;
ValueIdList tgtColList, userColList, sysColList, *userColListPtr;
CollIndexList colnoList;
CollIndex totalColCount, defaultColCount, i;
getTableDesc()->getSystemColumnList(sysColList);
//
// Detach the column list and bind the columns to the target table.
// Set up "colnoList" to map explicitly specified columns to where
// in the ordered array we will be inserting later.
//
ItemExpr *columnTree = removeInsertColTree();
CMPASSERT(NOT columnTree || someNonDefaultValuesSpecified);
if (columnTree || (view && someNonDefaultValuesSpecified)) {
//
// INSERT INTO t(colx,coly,...) query-expr;
// INSERT INTO v(cola,colb,...) query-expr;
// INSERT INTO v query-expr;
// where query-expr is VALUES..., TABLE..., or SELECT...,
// but not DEFAULT VALUES.
// userColList is the full list of columns in the target table
// colnoList contains, for those columns specified in tgtColList,
// their ordinal position in the target table user column list
// (i.e., not counting system columns, which can't be specified
// in the insert column list); e.g. '(Z,X,Y)' -> [3,1,2]
//
CMPASSERT(NOT columnTree ||
columnTree->getOperatorType() == ITM_REFERENCE ||
columnTree->getOperatorType() == ITM_ITEM_LIST);
getTableDesc()->getUserColumnList(userColList);
userColListPtr = &userColList;
RETDesc *columnLkp;
if (columnTree) {
// bindRowValues will bind using the currently scoped RETDesc left in
// by GenericUpdate::bindNode, which will be that of the naTableTop
// (topmost view or table), *not* that of the base table (getTableDesc()).
columnLkp = bindRowValues(bindWA, columnTree, tgtColList, this, FALSE);
if (bindWA->errStatus()) return boundExpr;
}
else
{
columnLkp = bindWA->getCurrentScope()->getRETDesc();
columnLkp->getColumnList()->getValueIdList(tgtColList);
}
if (GU_DEBUG) {
// LCOV_EXCL_START - dpm
cerr << "columnLkp " << flush;
columnLkp->display();
// LCOV_EXCL_STOP
}
for (i = 0; i < columnLkp->getDegree(); i++) {
// Describes column in the base table:
ValueId source = columnLkp->getValueId(i);
const NAColumn *nacol = source.getNAColumn();
// Gets name of the column in this (possibly view) table:
const ColRefName colName = columnLkp->getColRefNameObj(i);
// solution 10-081114-7315
if (bindWA->inDDL() && bindWA->isInTrigger ())
{
if (!userColListPtr->contains(source))
{
// 4001 column not found
*CmpCommon::diags() << DgSqlCode(-4001)
<< DgColumnName(colName.getColName())
<< DgString0(getTableName().getQualifiedNameObj().getQualifiedNameAsAnsiString())
<< DgString1(bindWA->getDefaultSchema().getSchemaNameAsAnsiString());
bindWA->setErrStatus();
delete columnLkp;
return boundExpr;
}
}
if (columnLkp->findColumn(colName)->isDuplicate()) {
// 4022 column specified more than once
*CmpCommon::diags() << DgSqlCode(-4022)
<< DgColumnName(colName.getColName());
bindWA->setErrStatus();
delete columnLkp;
return boundExpr;
}
colnoList.insert(nacol->getPosition());
// Commented out this assert, as Assign::bindNode below emits nicer errmsg
// CMPASSERT((long)nacol->getPosition() - (long)firstColNumOnDisk >= 0);
}
if (columnTree) {
delete columnLkp;
columnLkp = NULL;
}
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
else {
//
// INSERT INTO t query-expr;
// INSERT INTO t DEFAULT VALUES;
// INSERT INTO v DEFAULT VALUES;
// userColListPtr points to tgtColList (which is the full list)
// userColList not used (because tgtColList already is the full list)
// colnoList remains empty (because tgtColList is already in order)
// if no system columns, set to list of user cols otherwise
getTableDesc()->getUserColumnList(tgtColList);
userColListPtr = &tgtColList;
if (sysColList.entries()) {
// set up colnoList to indicate the user columns, to help
// binding DEFAULT clauses in DefaultSpecification::bindNode()
for (CollIndex uc=0; uc<tgtColList.entries(); uc++) {
colnoList.insert(tgtColList[uc].getNAColumn()->getPosition());
}
}
}
// Compute total number of columns. Note that there may be some unused
// entries in newRecExprArray(), in the following cases:
// - An SQL/MP entry sequenced table, entry 0 will not be used as
// the syskey (col 0) is not stored in that type of table
// - For computed columns that are not stored on disk
totalColCount = userColListPtr->entries() + sysColList.entries();
newRecExprArray().resize(totalColCount);
// Make sure children are bound -- GenericUpdate::bindNode defers
// their binding to now if this is an INSERT..VALUES(..),
// because only now do we have target column position info for
// correct binding of INSERT..VALUES(..,DEFAULT,..)
// in DefaultSpecification::bindNode.
//
// Save current RETDesc and XTNM.
// Bind the source in terms of the original RETDesc,
// with target column position info available through
// bindWA->getCurrentScope()->context()->updateOrInsertNode()
// (see DefaultSpecification::bindNode, calls Insert::getColDefaultValue).
// Restore RETDesc and XTNM.
//
RETDesc *currRETDesc = bindWA->getCurrentScope()->getRETDesc();
bindWA->getCurrentScope()->setRETDesc(incomingRETDescForSource);
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
setTargetUserColPosList(colnoList);
// if my child is a TupleList, then all tuples are to be converted/cast
// to the corresponding target type of the tgtColList.
// Pass on the tgtColList to TupleList so it can generate the Cast nodes
// with the target types during the TupleList::bindNode.
TupleList *tl = NULL;
if (child(0)->getOperatorType() == REL_TUPLE_LIST) {
tl = (TupleList *)child(0)->castToRelExpr();
tl->castToList() = tgtColList;
}
if (getTolerateNonFatalError() != RelExpr::UNSPECIFIED_) {
HostArraysWA * arrayWA = bindWA->getHostArraysArea() ;
if (arrayWA && arrayWA->hasHostArraysInTuple()) {
if (getTolerateNonFatalError() == RelExpr::NOT_ATOMIC_)
arrayWA->setTolerateNonFatalError(TRUE);
else
arrayWA->setTolerateNonFatalError(FALSE); // Insert::tolerateNonfatalError == ATOMIC_
}
else if (NOT arrayWA->getRowwiseRowset()) {
// NOT ATOMIC only for rowset inserts
*CmpCommon::diags() << DgSqlCode(-30025) ;
bindWA->setErrStatus();
return boundExpr;
}
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// if this is an insert into native hbase table in _ROW_ format, then
// validate that only REL_TUPLE or REL_TUPLE_LIST is being used.
if ((getOperatorType() == REL_UNARY_INSERT) &&
(getTableDesc()->getNATable()->isHbaseRowTable()))
{
NABoolean isError = FALSE;
if (NOT (child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST)) // VALUES (1,'b'),(2,'Y')
{
isError = TRUE;
}
// Also make sure that inserts into column_details field of _ROW_ format
// hbase virtual table are being done through column_create function.
// For ex: insert into hbase."_ROW_".hb values ('1', column_create('cf:a', '100'))
//
if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE))
{
ValueIdList &tup = ((Tuple*)(child(0)->castToRelExpr()))->tupleExpr();
if (tup.entries() == 2) // can only have 2 entries
{
ItemExpr * ie = tup[1].getItemExpr();
if (ie && ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE)
{
isError = TRUE;
}
}
else
isError = TRUE;
}
if ((NOT isError) && (child(0)->getOperatorType() == REL_TUPLE_LIST))
{
TupleList * tl = (TupleList*)(child(0)->castToRelExpr());
for (CollIndex x = 0; x < (UInt32)tl->numTuples(); x++)
{
ValueIdList tup;
if (!tl->getTuple(bindWA, tup, x))
{
isError = TRUE;
}
if (NOT isError)
{
if (tup.entries() == 2) // must have 2 entries
{
ItemExpr * ie = tup[1].getItemExpr();
if (ie->getOperatorType() != ITM_HBASE_COLUMN_CREATE)
{
isError = TRUE;
}
}
else
isError = TRUE;
} // if
} // for
} // if
if (isError)
{
*CmpCommon::diags() << DgSqlCode(-1429);
bindWA->setErrStatus();
return boundExpr;
}
}
// the only time that tgtColList.entries()(Insert's colList) != tl->castToList().entries()
// (TupleList's colList) is when DEFAULTS are removed in TupleList::bindNode() for insert
// into table with IDENTITY column, where the system generates the values
// for it using SG (Sequence Generator).
// See TupleList::bindNode() for detailed comments.
// When tgtColList.entries()(Insert's col list) is not
// equal to tl->castToList().entries() (TupleList's column list)
// make sure the correct colList is used during binding.
ValueIdList newTgtColList;
if(tl && (tgtColList.entries() != tl->castToList().entries()))
{
newTgtColList = tl->castToList();
CMPASSERT(newTgtColList.entries() == (tgtColList.entries() -1));
}
else
newTgtColList = tgtColList;
setTargetUserColPosList();
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
bindWA->getCurrentScope()->setRETDesc(currRETDesc);
NABoolean bulkLoadIndex = bindWA->isTrafLoadPrep() && noIMneeded() ;
if (someNonDefaultValuesSpecified)
// query-expr child specified
{
const RETDesc &sourceTable = *child(0)->getRETDesc();
if ((sourceTable.getDegree() != newTgtColList.entries())&& !bulkLoadIndex) {
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
#pragma nowarn(1506) // warning elimination
<< DgInt0(sourceTable.getDegree()) << DgInt1(tgtColList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return boundExpr;
}
OptSqlTableOpenInfo* stoiInList = NULL;
for (CollIndex ii=0; ii < bindWA->getStoiList().entries(); ii++)
{
if (getOptStoi() && getOptStoi()->getStoi())
{
if (strcmp((bindWA->getStoiList())[ii]->getStoi()->fileName(),
getOptStoi()->getStoi()->fileName()) == 0)
{
stoiInList = bindWA->getStoiList()[ii];
break;
}
}
}
// Combine the ValueIdLists for the column list and value list into a
// ValueIdSet (unordered) of Assign nodes and a ValueIdArray (ordered).
// Maintain a ValueIdMap between the source and target value ids.
CollIndex i2 = 0;
const ColumnDescList *viewColumns = NULL;
if (getBoundView())
viewColumns = getBoundView()->getRETDesc()->getColumnList();
if (bulkLoadIndex) {
setRETDesc(child(0)->getRETDesc());
}
for (i = 0; i < tgtColList.entries() && i2 < newTgtColList.entries(); i++) {
if(tgtColList[i] != newTgtColList[i2])
continue;
ValueId target = tgtColList[i];
ValueId source ;
if (!bulkLoadIndex)
source = sourceTable.getValueId(i2);
else {
ColRefName & cname = ((ColReference *)(baseColRefs()[i2]))->getColRefNameObj();
source = sourceTable.findColumn(cname)->getValueId();
}
CMPASSERT(target != source);
const NAColumn *nacol = target.getNAColumn();
const NAType &sourceType = source.getType();
const NAType &targetType = target.getType();
if ( DFS2REC::isFloat(sourceType.getFSDatatype()) &&
DFS2REC::isNumeric(targetType.getFSDatatype()) &&
(getTableDesc()->getNATable()->getPartitioningScheme() ==
COM_HASH_V1_PARTITIONING ||
getTableDesc()->getNATable()->getPartitioningScheme() ==
COM_HASH_V2_PARTITIONING) )
{
const NAColumnArray &partKeyCols = getTableDesc()->getNATable()
->getClusteringIndex()->getPartitioningKeyColumns();
for (CollIndex j=0; j < partKeyCols.entries(); j++)
{
if (partKeyCols[j]->getPosition() == nacol->getPosition())
{
ItemExpr *ie = source.getItemExpr();
ItemExpr *cast = new (bindWA->wHeap())
Cast(ie, &targetType, ITM_CAST);
cast = cast->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
source = cast->getValueId();
}
}
}
Assign *assign = new (bindWA->wHeap())
Assign(target.getItemExpr(), source.getItemExpr());
assign->bindNode(bindWA);
if(bindWA->errStatus())
return NULL;
if (stoiInList && !getUpdateCKorUniqueIndexKey())
{
if(!getBoundView())
stoiInList->addInsertColumn(nacol->getPosition());
else
{
NABoolean found = FALSE;
for (CollIndex k=0; k < viewColumns->entries(); k++) {
if ((*viewColumns)[k]->getValueId() == target) {
stoiInList->addInsertColumn((Lng32) k);
found = TRUE;
// Updatable views cannot have any underlying basetable column
// appear more than once, so it's safe to break out of the loop.
break;
}
} // loop k
CMPASSERT(found);
}
}
//
// Check for automatically inserted TRANSLATE nodes.
// Such nodes are inserted by the Implicit Casting And Translation feature.
// If this node has a child TRANSLATE node, then that TRANSLATE node
// is the real "source" that we must use from here on.
//
ItemExpr *assign_child = assign->child(1);
if ( assign_child->getOperatorType() == ITM_CAST )
{
const NAType& type = assign_child->getValueId().getType();
if ( type.getTypeQualifier() == NA_CHARACTER_TYPE )
{
ItemExpr *assign_grndchld = assign_child->child(0);
if ( assign_grndchld->getOperatorType() == ITM_TRANSLATE )
{
source = assign_grndchld->getValueId();
CMPASSERT(target != source);
}
}
}
const NAType *colType = nacol->getType();
if (!colType->isSupportedType()) {
*CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable
<< DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
}
if (bindWA->errStatus()) return boundExpr;
newRecExprArray().insertAt(nacol->getPosition(), assign->getValueId());
newRecExpr().insert(assign->getValueId());
const NAType& assignSrcType = assign->getSource().getType();
// if ( <we added some type of conversion> AND
// ( <tgt and src are both character> AND
// (<they are big and errors can occur> OR <charsets differ> OR <difference between tgt and src lengths is large>)))
// OR
// ( <we changed the basic type and we allow incompatible types> )
// )
// <then incorporate this added conversion into the updateToSelectMap>
if ( source != assign->getSource() &&
((assignSrcType.getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE &&
((assign->getSource().getItemExpr()->getOperatorType() == ITM_CAST &&
sourceType.errorsCanOccur(assignSrcType) &&
sourceType.getNominalSize() >
CmpCommon::getDefaultNumeric(LOCAL_MESSAGE_BUFFER_SIZE)*1024) ||
// Temporary code to fix QC4395 in M6. For M7, try to set source
// to the right child of the assign after calling assign->bindNode.
// We should then be able to eliminate this entire if statement
// as well as the code to check for TRANSLATE nodes above.
((CharType &) assignSrcType).getCharSet() !=
((CharType &) sourceType).getCharSet() ||
// The optimizer may ask for source data to be partitioned or sorted on original source columns
// This is the reason we need to choose the else branch below unless we have a particular reason
// to do otherwise. Each of the conditions in this if statement reflects one of those partcular
// conditions. The bottomValues of updateToSelectMap will be placed in their entirety in the
// characteristic outputs of the source node. Outputs of the source node may be used to allocate
// buffers at runtime and therefore we would like to keep the output as small as possible.
// If the source cannot be partioned/sorted on a column because we have assign-getSource in the bottomValues
// then the cost is that data will be repartitioned with an additional exchange node. If the difference in
// length between source and assignSrc is large then the cost of repartition is less than the cost of
// allocating and using large buffers.
sourceType.getNominalSize() > (assignSrcType.getNominalSize() +
(ActiveSchemaDB()->getDefaults()).getAsLong(COMP_INT_98)) // default value is 512
))
||
// If we allow incompatible type assignments, also include the
// added cast into the updateToSelectMap
assignSrcType.getTypeQualifier() != sourceType.getTypeQualifier() &&
CmpCommon::getDefault(ALLOW_INCOMPATIBLE_ASSIGNMENT) == DF_ON))
{
updateToSelectMap().addMapEntry(target,assign->getSource());
}
else
{
updateToSelectMap().addMapEntry(target,source);
}
i2++;
}
}
setBoundView(NULL);
// Is the table round-robin (horizontal) partitioned?
PartitioningFunction *partFunc =
getTableDesc()->getClusteringIndex()->getNAFileSet()->
getPartitioningFunction();
NABoolean isRRTable =
partFunc && partFunc->isARoundRobinPartitioningFunction();
// Fill in default values for any columns not explicitly specified.
//
if (someNonDefaultValuesSpecified) // query-expr child specified, set system cols
defaultColCount = totalColCount - newTgtColList.entries();
else // "DEFAULT VALUES" specified
defaultColCount = totalColCount;
if (identityColumnGeneratedAlways)
defaultColCount = totalColCount;
NABoolean isAlignedRowFormat = getTableDesc()->getNATable()->isSQLMXAlignedTable();
NABoolean omittedDefaultCols = FALSE;
NABoolean omittedCurrentDefaultClassCols = FALSE;
if (defaultColCount) {
NAWchar zero_w_Str[2]; zero_w_Str[0] = L'0'; zero_w_Str[1] = L'\0'; // wide version
CollIndex sysColIx = 0, usrColIx = 0;
for (i = 0; i < totalColCount; i++) {
ValueId target;
NABoolean isASystemColumn = FALSE;
const NAColumn *nacol = NULL;
// find column on position i in the system or user column lists
if (sysColIx < sysColList.entries() &&
sysColList[sysColIx].getNAColumn()->getPosition() == i)
{
isASystemColumn = TRUE;
target = sysColList[sysColIx];
}
else
{
CMPASSERT((*userColListPtr)[usrColIx].getNAColumn()->getPosition() == i);
target = (*userColListPtr)[usrColIx];
}
nacol = target.getNAColumn();
// if we need to add the default value, we don't have a new rec expr yet
if (NOT newRecExprArray().used(i)) {
// check for SQL/MP entry sequenced tables omitted above
const char* defaultValueStr = NULL;
ItemExpr * defaultValueExpr = NULL;
NABoolean needToDeallocateColDefaultValueStr = FALSE;
// Used for datetime columns with COM_CURRENT_DEFAULT.
//
NAType *castType = NULL;
if (isASystemColumn) {
if (isRRTable) {
bindInsertRRKey(bindWA, this, sysColList, sysColIx);
if (bindWA->errStatus()) return boundExpr;
}
if (nacol->isComputedColumn())
{
CMPASSERT(target.getItemExpr()->getOperatorType() == ITM_BASECOLUMN);
ValueId defaultExprValId = ((BaseColumn *) target.getItemExpr())->
getComputedColumnExpr();
ValueIdMap updateToSelectMapCopy(updateToSelectMap());
// Use a copy to rewrite the value, to avoid requesting additional
// values from the child. We ask the child for all entries in this
// map in GenericUpdate::pushdownCoveredExpr().
updateToSelectMapCopy.rewriteValueIdDown(defaultExprValId, defaultExprValId);
defaultValueExpr = defaultExprValId.getItemExpr();
}
else
defaultValueStr = (char *)zero_w_Str;
}
else { // a user column (cf. Insert::getColDefaultValue)
CMPASSERT(NOT nacol->isComputedColumn()); // computed user cols not yet supported
defaultValueStr = nacol->getDefaultValue();
}
if (NOT defaultValueStr && NOT defaultValueExpr) {
// 4024 column has neither a default nor an explicit value.
*CmpCommon::diags() << DgSqlCode(-4024) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
return boundExpr;
}
if (defaultValueStr) {
// If the column has a default class of COM_CURRENT_DEFAULT,
// cast the default value (which is CURRENT_TIMESTAMP) to
// the type of the column. Here we capture the type of the
// column. COM_CURRENT_DEFAULT is only used for Datetime
// columns.
//
if (nacol->getDefaultClass() == COM_CURRENT_DEFAULT) {
castType = nacol->getType()->newCopy(bindWA->wHeap());
omittedCurrentDefaultClassCols = TRUE;
omittedDefaultCols = TRUE;
}
else if ((nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS) ||
(nacol->getDefaultClass() == COM_IDENTITY_GENERATED_BY_DEFAULT)) {
setSystemGeneratesIdentityValue(TRUE);
}
else if (nacol->getDefaultClass() != COM_NO_DEFAULT)
omittedDefaultCols = TRUE;
// Bind the default value, make an Assign, etc, as above
Parser parser(bindWA->currentCmpContext());
// save the current parserflags setting
ULng32 savedParserFlags = Get_SqlParser_Flags (0xFFFFFFFF);
Set_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL);
Set_SqlParser_Flags(ALLOW_VOLATILE_SCHEMA_IN_TABLE_NAME);
defaultValueExpr = parser.getItemExprTree(defaultValueStr);
CMPASSERT(defaultValueExpr);
// Restore parser flags settings to what they originally were
Assign_SqlParser_Flags (savedParserFlags);
} // defaultValueStr != NULL
Assign *assign = NULL;
// If the default value string was successfully parsed,
// Create an ASSIGN node and bind.
//
if (defaultValueExpr) {
// If there is a non-NULL castType, then cast the default
// value to the castType. This is used in the case of
// datetime value with COM_CURRENT_DEFAULT. The default
// value will be CURRENT_TIMESTAMP for all datetime types,
// so must cast the CURRENT_TIMESTAMP to the type of the
// column.
//
if(castType) {
defaultValueExpr = new (bindWA->wHeap())
Cast(defaultValueExpr, castType);
}
// system generates value for IDENTITY column.
if (defaultValueExpr->getOperatorType() == ITM_IDENTITY &&
(CmpCommon::getDefault(COMP_BOOL_210) == DF_ON))
{
// SequenceGenerator::createSequenceSubqueryExpression()
// is called for introducing the subquery in
// defaultValueExpr::bindNode() (IdentityVar::bindNode()).
// We bind here to make sure the correct subquery
// is used.
defaultValueExpr = defaultValueExpr->bindNode(bindWA);
}
if (((isUpsertLoad()) ||
((isUpsert()) && (getTableDesc()->getNATable()-> isSQLMXAlignedTable()))) &&
(NOT defaultValueExpr->getOperatorType() == ITM_IDENTITY) &&
(NOT isASystemColumn))
{
// for 'upsert using load' construct, all values must be specified so
// data could be loaded using inserts.
// If some values are missing, then it becomes an update.
*CmpCommon::diags() << DgSqlCode(-4246) ;
bindWA->setErrStatus();
return boundExpr;
}
assign = new (bindWA->wHeap())
Assign(target.getItemExpr(), defaultValueExpr,
FALSE /*Not user Specified */);
if ((nacol->getDefaultClass() != COM_CURRENT_DEFAULT) &&
(nacol->getDefaultClass() != COM_USER_FUNCTION_DEFAULT))
assign->setToBeSkipped(TRUE);
assign->bindNode(bindWA);
}
//
// Note: Parser or Binder errors from MP texts are possible.
//
if (!defaultValueExpr || bindWA->errStatus()) {
// 7001 Error preparing default on <column> for <table>.
*CmpCommon::diags() << DgSqlCode(-7001)
<< DgString0(defaultValueStr)
<< DgString1(nacol->getFullColRefNameAsAnsiString());
bindWA->setErrStatus();
return boundExpr;
}
newRecExprArray().insertAt(i, assign->getValueId());
newRecExpr().insert(assign->getValueId());
updateToSelectMap().addMapEntry(target,defaultValueExpr->getValueId());
if (needToDeallocateColDefaultValueStr && defaultValueStr != NULL)
{
NADELETEBASIC((NAWchar*)defaultValueStr, bindWA->wHeap());
defaultValueStr = NULL;
}
if (--defaultColCount == 0)
break; // tiny performance hack
} // NOT newRecExprArray().used(i)
else
{
if (nacol->getDefaultClass() == COM_IDENTITY_GENERATED_ALWAYS)
{
Assign * assign = (Assign*)newRecExprArray()[i].getItemExpr();
ItemExpr * ie = assign->getSource().getItemExpr();
if (NOT ie->wasDefaultClause())
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(nacol->getColName());
bindWA->setErrStatus();
return boundExpr;
}
}
}
if (isASystemColumn)
sysColIx++;
else
usrColIx++;
} // for i < totalColCount
} // defaultColCount
// Now add the default values created as part of the Assigns above
// to the charcteristic inputs. The user specified values are added
// to the characteristic inputs during GenericUpdate::bindNode
// executed earlier as part of this method.
getGroupAttr()->addCharacteristicInputs(bindWA->
getCurrentScope()->
getOuterRefs());
if (isRRTable) {
// LCOV_EXCL_START -
const LIST(IndexDesc *) indexes = getTableDesc()->getIndexes();
for(i = 0; i < indexes.entries(); i++) {
indexes[i]->getPartitioningFunction()->setAssignPartition(TRUE);
}
// LCOV_EXCL_STOP
}
// It is a system generated identity value if
// identityColumn() != NULL_VALUE_ID. The identityColumn()
// is set two places (1) earlier in this method.
// (2) DefaultSpecification::bindNode()
// The IDENTITY column of type GENERATED ALWAYS AS IDENTITY
// must be specified in the values list as (DEFAULT) or
// must be excluded from the values list forcing the default.
if (identityColumnGeneratedAlways &&
NOT systemGeneratesIdentityValue())
{
// The IDENTITY column type of GENERATED ALWAYS AS IDENTITY
// can not be used with user specified values.
// However, if the override CQD is set, then
// allow user specified values to be added
// for a GENERATED ALWAYS AS IDENTITY column.
if (CmpCommon::getDefault(OVERRIDE_GENERATED_IDENTITY_VALUES) == DF_OFF)
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(identityColumnName.data());
bindWA->setErrStatus();
return boundExpr;
}
}
ItemExpr *orderByTree = removeOrderByTree();
if (orderByTree) {
bindWA->getCurrentScope()->context()->inOrderBy() = TRUE;
bindWA->getCurrentScope()->setRETDesc(child(0)->getRETDesc());
orderByTree->convertToValueIdList(reqdOrder(), bindWA, ITM_ITEM_LIST);
bindWA->getCurrentScope()->context()->inOrderBy() = FALSE;
if (bindWA->errStatus()) return NULL;
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
setInUpdateOrInsert(bindWA);
// Triggers --
NABoolean insertFromValuesList =
getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES (1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES (1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION); // VALUES with subquery
// Insert from values that gets input from above should not use flow,
// for performance. Cases, other than TUPLE, should be investigated.
if (bindWA->findNextScopeWithTriggerInfo() != NULL
&& (getGroupAttr()->getCharacteristicInputs() != NULL)
&& (insertFromValuesList))
setNoFlow(TRUE);
if (getUpdateCKorUniqueIndexKey())
{
SqlTableOpenInfo * scanStoi = getLeftmostScanNode()->getOptStoi()->getStoi();
short updateColsCount = scanStoi->getColumnListCount();
getOptStoi()->getStoi()->setColumnListCount(updateColsCount);
getOptStoi()->getStoi()->setColumnList(new (bindWA->wHeap()) short[updateColsCount]);
for (short i=0; i<updateColsCount; i++)
getOptStoi()->getStoi()->setUpdateColumn(i,scanStoi->getUpdateColumn(i));
}
if ((getIsTrafLoadPrep()) &&
(getTableDesc()->getCheckConstraints().entries() != 0 ||
getTableDesc()->getNATable()->getRefConstraints().entries() != 0 ))
{
// enabling/disabling constraints is not supported yet
//4486--Constraints not supported with bulk load. Disable the constraints and try again.
*CmpCommon::diags() << DgSqlCode(-4486)
<< DgString0("bulk load") ;
}
if (getIsTrafLoadPrep())
{
PartitioningFunction *pf = getTableDesc()->getClusteringIndex()->getPartitioningFunction();
const NodeMap* np;
Lng32 partns = 1;
if ( pf && (np = pf->getNodeMap()) )
{
partns = np->getNumEntries();
if(partns > 1 && CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF)
// 4490 - BULK LOAD into a salted table is not supported if ESP parallelism is turned off
*CmpCommon::diags() << DgSqlCode(-4490);
}
}
if (isUpsertThatNeedsMerge(isAlignedRowFormat, omittedDefaultCols, omittedCurrentDefaultClassCols)) {
boundExpr = xformUpsertToMerge(bindWA);
return boundExpr;
}
else if (NOT (isMerge() || noIMneeded()))
boundExpr = handleInlining(bindWA, boundExpr);
// turn OFF Non-atomic Inserts for ODBC if we have detected that Inlining is needed
// necessary warnings have been generated in handleInlining method.
if (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON) {
if (bindWA->getHostArraysArea() &&
(NOT bindWA->getHostArraysArea()->getRowwiseRowset()) &&
!(bindWA->getHostArraysArea()->getTolerateNonFatalError()))
setTolerateNonFatalError(RelExpr::UNSPECIFIED_);
}
// When mtsStatement_ or bulkLoadIndex is set Insert needs to return rows;
// so potential outputs are added (note that it's not replaced) to
// the Insert node. Currently mtsStatement_ is set
// for MTS queries and embedded insert queries.
if (isMtsStatement() || bulkLoadIndex)
{
if(isMtsStatement())
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
ValueIdSet potentialOutputs;
getPotentialOutputValues(potentialOutputs);
potentialOutputs.insertList(outputs);
setPotentialOutputValues(potentialOutputs);
// this flag is set to indicate optimizer not to pick the
// TupleFlow operator
setNoFlow(TRUE);
}
return boundExpr;
} // Insert::bindNode()
/* Upsert into a table with an index is converted into a Merge to avoid
the problem described in Trafodion-14. An upsert may overwrite an existing row
in the base table (identical to the update when matched clause of Merge) or
it may insert a new row into the base table (identical to insert when not
matched clause of merge). If the upsert caused a row to be updated in the
base table then the old version of the row will have to be deleted from
indexes, and a new version inserted. Upsert is being transformed to merge
so that we can delete the old version of an updated row from the index.
Upsert is also converted into merge when TRAF_UPSERT_MODE is set to MERGE and
there are omitted cols with default values in case of aligned format table or
omitted current timestamp cols in case of non-aligned row format
*/
NABoolean Insert::isUpsertThatNeedsMerge(NABoolean isAlignedRowFormat, NABoolean omittedDefaultCols,
NABoolean omittedCurrentDefaultClassCols) const
{
// The necessary conditions to convert upsert to merge and
if (isUpsert() &&
(NOT getIsTrafLoadPrep()) &&
(NOT (getTableDesc()->isIdentityColumnGeneratedAlways() && getTableDesc()->hasIdentityColumnInClusteringKey())) &&
(NOT (getTableDesc()->getClusteringIndex()->getNAFileSet()->hasSyskey())) &&
// table has secondary indexes or
(getTableDesc()->hasSecondaryIndexes() ||
// CQD is set to MERGE
((CmpCommon::getDefault(TRAF_UPSERT_MODE) == DF_MERGE) &&
// omitted current default columns with non-aligned row format tables
// or omitted default columns with aligned row format tables
(((NOT isAlignedRowFormat) && omittedCurrentDefaultClassCols) ||
(isAlignedRowFormat && omittedDefaultCols))) ||
// CQD is set to Optimal, for non-aligned row format with omitted
// current columns, it is converted into merge though it is not
// optimal for performance - This is done to ensure that when the
// CQD is set to optimal, non-aligned format would behave like
// merge when any column is omitted
((CmpCommon::getDefault(TRAF_UPSERT_MODE) == DF_OPTIMAL) &&
((NOT isAlignedRowFormat) && omittedCurrentDefaultClassCols))
)
)
return TRUE;
else
return FALSE;
}
// take an insert(src) node and transform it into
// tsj_flow(src, merge_update(input_scan))
// with a newly created input_scan
RelExpr* Insert::xformUpsertToMerge(BindWA *bindWA)
{
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return NULL;
if ((naTable->getViewText() != NULL) && (naTable->getViewCheck()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" View with check option not allowed.");
bindWA->setErrStatus();
return NULL;
}
// columns of the target table
const ValueIdList &tableCols = updateToSelectMap().getTopValues();
const ValueIdList &sourceVals = updateToSelectMap().getBottomValues();
NABoolean isAlignedRowFormat = getTableDesc()->getNATable()->isSQLMXAlignedTable();
// Create a new BindScope, to encompass the new nodes merge_update(input_scan)
// and any inlining nodes that will be created. Any values the merge_update
// and children will need from src will be marked as outer references in that
// new BindScope. We assume that "src" is already bound.
ValueIdSet currOuterRefs = bindWA->getCurrentScope()->getOuterRefs();
CMPASSERT(child(0)->nodeIsBound());
bindWA->initNewScope();
BindScope *mergeScope = bindWA->getCurrentScope();
// create a new scan of the target table, to be used in the merge
Scan * inputScan =
new (bindWA->wHeap())
Scan(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()));
ItemExpr * keyPred = NULL;
ItemExpr * keyPredPrev = NULL;
ItemExpr * setAssign = NULL;
ItemExpr * setAssignPrev = NULL;
ItemExpr * insertVal = NULL;
ItemExpr * insertValPrev = NULL;
ItemExpr * insertCol = NULL;
ItemExpr * insertColPrev = NULL;
BaseColumn* baseCol;
ColReference * targetColRef;
int predCount = 0;
int setCount = 0;
ValueIdSet newOuterRefs;
// loop over the columns of the target table
for (CollIndex i = 0; i<tableCols.entries(); i++)
{
baseCol = (BaseColumn *)(tableCols[i].getItemExpr()) ;
if (baseCol->getNAColumn()->isSystemColumn())
continue;
targetColRef = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap()));
if (baseCol->getNAColumn()->isClusteringKey())
{
// create a join/key predicate between source and target table,
// on the clustering key columns of the target table, making
// ColReference nodes for the target table, so that we can bind
// those to the new scan
keyPredPrev = keyPred;
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, targetColRef,
sourceVals[i].getItemExpr(),
baseCol->getType().supportsSQLnull());
predCount++;
if (predCount > 1)
{
keyPred = new(bindWA->wHeap()) BiLogic(ITM_AND,
keyPredPrev,
keyPred);
}
}
if (sourceVals[i].getItemExpr()->getOperatorType() != ITM_CONSTANT)
{
newOuterRefs += sourceVals[i];
mergeScope->addOuterRef(sourceVals[i]);
}
// create the INSERT (WHEN NOT MATCHED) part of the merge for this column, again
// with a ColReference that we will then bind to the MergeUpdate target table
insertValPrev = insertVal;
insertColPrev = insertCol ;
insertVal = sourceVals[i].getItemExpr();
insertCol = new(bindWA->wHeap()) ColReference(
new(bindWA->wHeap()) ColRefName(
baseCol->getNAColumn()->getFullColRefName(), bindWA->wHeap()));
if (i > 0)
{
insertVal = new(bindWA->wHeap()) ItemList(insertVal,insertValPrev);
insertCol = new(bindWA->wHeap()) ItemList(insertCol,insertColPrev);
}
}
inputScan->addSelPredTree(keyPred);
for (CollIndex i = 0 ; i < newRecExprArray().entries(); i++)
{
const Assign *assignExpr = (Assign *)newRecExprArray()[i].getItemExpr();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
NAColumn *col = tgtValueId.getNAColumn( TRUE );
NABoolean copySetAssign = FALSE;
if (col->isSystemColumn())
continue;
else
if (! col->isClusteringKey())
{
// Create the UPDATE (WHEN MATCHED) part of the new MergeUpdate for
// a non-key column. We need to bind in the new = old values
// in GenericUpdate::bindNode. So skip the columns that are not user
// specified. Note that we had a discussion on whether such a transformed
// UPSERT shouldn't update all columns.
//
if (assignExpr->isUserSpecified())
copySetAssign = TRUE;
// If copy the Default values in case of replace mode or optiomal mode with
// aligned row tables
else if ((CmpCommon::getDefault(TRAF_UPSERT_MODE) == DF_REPLACE) ||
(isAlignedRowFormat && CmpCommon::getDefault(TRAF_UPSERT_MODE) == DF_OPTIMAL))
copySetAssign = TRUE;
if (copySetAssign)
{
setAssignPrev = setAssign;
setAssign = (ItemExpr *)assignExpr;
setCount++;
if (setCount > 1)
setAssign = new(bindWA->wHeap()) ItemList(setAssignPrev, setAssign);
}
}
}
MergeUpdate *mu = new (bindWA->wHeap())
MergeUpdate(CorrName(getTableDesc()->getCorrNameObj(), bindWA->wHeap()),
NULL,
REL_UNARY_UPDATE,
inputScan, // USING
setAssign, // WHEN MATCHED THEN UPDATE
insertCol, // WHEN NOT MATCHED THEN INSERT (cols) ...
insertVal, // ... VALUES()
bindWA->wHeap(),
NULL);
mu->setXformedUpsert();
// Use mergeScope, the scope we created here, for the MergeUpdate. We are
// creating some expressions with outer references here in this method, so
// we need to control the scope from here.
mu->setNeedsBindScope(FALSE);
RelExpr *boundMU = mu->bindNode(bindWA);
// remove the BindScope created earlier in this method
bindWA->removeCurrentScope();
// Remove the outer refs from the parent scope, they are provided
// by the left child of the TSJ_FLOW, unless they were already outer refs
// when we started this method. The binder logic doesn't handle
// that well, since they come from a child scope, not the current one,
// so we help a little.
newOuterRefs -= currOuterRefs;
bindWA->getCurrentScope()->removeOuterRefs(newOuterRefs);
Join * jn = new(bindWA->wHeap()) Join(child(0), boundMU, REL_TSJ_FLOW, NULL);
jn->doNotTransformToTSJ();
jn->setTSJForMerge(TRUE);
jn->setTSJForMergeWithInsert(TRUE);
jn->setTSJForMergeUpsert(TRUE);
jn->setTSJForWrite(TRUE);
RelExpr *result = jn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
// Copy the userSecified and canBeSkipped attribute to mergeUpdateInsertExprArray
ValueIdList mergeInsertExprArray = ((MergeUpdate *)mu)->mergeInsertRecExprArray();
for (CollIndex i = 0 ; i < newRecExprArray().entries(); i++)
{
const Assign *assignExpr = (Assign *)newRecExprArray()[i].getItemExpr();
((Assign *)mergeInsertExprArray[i].getItemExpr())->setToBeSkipped(assignExpr->canBeSkipped());
((Assign *)mergeInsertExprArray[i].getItemExpr())->setUserSpecified(assignExpr->isUserSpecified());
}
return result;
}
RelExpr *HBaseBulkLoadPrep::bindNode(BindWA *bindWA)
{
//CMPASSERT((CmpCommon::getDefault(TRAF_LOAD) == DF_ON &&
// CmpCommon::getDefault(TRAF_LOAD_HFILE) == DF_ON));
if (nodeIsBound())
{
return this;
}
Insert * newInsert = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
child(0)->castToRelExpr());
newInsert->setInsertType(UPSERT_LOAD);
newInsert->setIsTrafLoadPrep(true);
newInsert->setCreateUstatSample(getCreateUstatSample());
// Pass the flag to bindWA to guarantee that a range partitioning is
// always used for all source and target tables.
bindWA->setIsTrafLoadPrep(TRUE);
RelExpr *boundNewInsert = newInsert->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
return boundNewInsert;
}
// This is a callback from DefaultSpecification::bindNode
// called from Insert::bindNode
// (you need to understand the latter to understand this).
//
const char *Insert::getColDefaultValue(BindWA *bindWA, CollIndex i) const
{
CMPASSERT(canBindDefaultSpecification());
CollIndexList &colnoList = *targetUserColPosList_;
CollIndex pos = colnoList.entries() ? colnoList[i] : i;
const ValueIdList &colList = getTableDesc()->getColumnList();
if (colList.entries() <= pos) {
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
#pragma nowarn(1506) // warning elimination
<< DgInt0(++pos)
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
<< DgInt1(colList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ValueId target = colList[pos];
const NAColumn *nacol = target.getNAColumn();
const char* defaultValueStr = nacol->getDefaultValue();
CharInfo::CharSet mapCS = CharInfo::ISO88591;
NABoolean mapCS_hasVariableWidth = CharInfo::isVariableWidthMultiByteCharSet(mapCS);
size_t defaultValueWcsLen = 0;
NAWchar *defaultValueWcs = (NAWchar *) defaultValueStr;
NABoolean ucs2StrLitPrefix = FALSE;
if (nacol->getDefaultClass() == COM_USER_DEFINED_DEFAULT &&
nacol->getType() &&
nacol->getType()->getTypeQualifier() == NA_CHARACTER_TYPE &&
((CharType*)(nacol->getType()))->getCharSet() == CharInfo::ISO88591 &&
mapCS_hasVariableWidth &&
defaultValueWcs != NULL &&
nacol->getNATable()->getObjectSchemaVersion() >= COM_VERS_2300 &&
(defaultValueWcsLen = NAWstrlen(defaultValueWcs)) > 6 &&
( ( ucs2StrLitPrefix = ( NAWstrncmp(defaultValueWcs, NAWSTR("_UCS2\'"), 6) == 0 )) ||
( defaultValueWcsLen > 10 &&
NAWstrncmp(defaultValueWcs, NAWSTR("_ISO88591\'"), 10) == 0 )) &&
defaultValueWcs[defaultValueWcsLen-1] == NAWCHR('\''))
{
NAWcharBuf *pWcharBuf = NULL;
if (ucs2StrLitPrefix)
{
// Strip the leading _UCS2 prefix.
pWcharBuf =
new (bindWA->wHeap()) NAWcharBuf(&defaultValueWcs[5],
defaultValueWcsLen - 5,
bindWA->wHeap());
}
else
{
// Keep the leading _ISO88591 prefix.
pWcharBuf =
new (bindWA->wHeap()) NAWcharBuf(defaultValueWcs,
defaultValueWcsLen,
bindWA->wHeap());
}
charBuf *pCharBuf = NULL; // must set this variable to NULL so the
// following function call will allocate
// space for the output literal string
Int32 errorcode = 0;
pCharBuf = unicodeTocset(*pWcharBuf, bindWA->wHeap(),
pCharBuf, mapCS, errorcode);
// Earlier releases treated the converted multibyte character
// string, in ISO_MAPPING character set, as if it is a string of
// ISO88591 characters and then convert it back to UCS-2 format;
// i.e., for each byte in the string, insert an extra byte
// containing the binary zero value.
NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap());
pWcharBuf = NULL; // must set this variable to NULL to force the
// following call to allocate space for the
// the output literal string
pWcharBuf = ISO88591ToUnicode(*pCharBuf, bindWA->wHeap(), pWcharBuf);
// Prepare the converted literal string for the following CAST
// function by setting pColDefaultValueStr to point to the string
NAWchar *pWcs = NULL;
if (ucs2StrLitPrefix)
{
pWcs = new (bindWA->wHeap()) NAWchar[10+NAWstrlen(pWcharBuf->data())];
NAWstrcpy(pWcs, NAWSTR("_ISO88591"));
}
else
{
pWcs = new (bindWA->wHeap()) NAWchar[1+NAWstrlen(pWcharBuf->data())];
pWcs[0] = NAWCHR('\0');
}
NAWstrcat(pWcs, pWcharBuf->data());
defaultValueStr = (char *)pWcs;
NADELETE(pWcharBuf, NAWcharBuf, bindWA->wHeap());
NADELETE(pCharBuf, charBuf, bindWA->wHeap());
}
if (NOT defaultValueStr AND bindWA) {
// 4107 column has no default so DEFAULT cannot be specified.
*CmpCommon::diags() << DgSqlCode(-4107) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
}
return defaultValueStr;
} // Insert::getColDefaultValue()
// -----------------------------------------------------------------------
// member functions for class Update
// -----------------------------------------------------------------------
RelExpr *Update::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Set flag for firstN in context
if (child(0) && child(0)->getOperatorType() == REL_SCAN)
if (child(0)->castToRelExpr() &&
((Scan *)(child(0)->castToRelExpr()))->getFirstNRows() >= 0)
if (bindWA &&
bindWA->getCurrentScope() &&
bindWA->getCurrentScope()->context())
bindWA->getCurrentScope()->context()->firstN() = TRUE;
setInUpdateOrInsert(bindWA, this, REL_UPDATE);
RelExpr * boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
setInUpdateOrInsert(bindWA);
if (getTableDesc()->getNATable()->isHbaseCellTable())
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot update an hbase table in CELL format. Use ROW format for this operation.");
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (getGroupAttr()->isStream() &&
!getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4173);
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (NOT bindWA->errStatus() AND
NOT getTableDesc()->getVerticalPartitions().isEmpty())
{
// 4058 UPDATE query cannot be used on a vertically partitioned table.
*CmpCommon::diags() << DgSqlCode(-4058) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
// make sure scan done as part of an update runs in serializable mode so a
// tsj(scan,update) implementation of a update runs as an atomic operation
if (child(0)->getOperatorType() == REL_SCAN) {
Scan *scanNode = (Scan*)(child(0)->castToRelExpr());
if (!scanNode->accessOptions().userSpecified()) {
scanNode->accessOptions().updateAccessOptions
(TransMode::ILtoAT(TransMode::SERIALIZABLE_));
}
}
// if FIRST_N is requested, insert a FirstN node.
if ((getOperatorType() == REL_UNARY_UPDATE) &&
(child(0)->getOperatorType() == REL_SCAN))
{
Scan * scanNode = (Scan *)(child(0)->castToRelExpr());
if ((scanNode->getFirstNRows() != -1) &&
(getGroupAttr()->isEmbeddedUpdateOrDelete()))
{
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return NULL;
}
if (scanNode->getFirstNRows() >= 0)
{
FirstN * firstn = new(bindWA->wHeap())
FirstN(scanNode, scanNode->getFirstNRows(), NULL);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
}
}
// if rowset is used in set clause a direct rowset that is not in subquery
// must be present in the where clause
if ((bindWA->getHostArraysArea()) &&
(bindWA->getHostArraysArea()->hasHostArraysInSetClause()) &&
(!(bindWA->getHostArraysArea()->hasHostArraysInWhereClause()))) {
*CmpCommon::diags() << DgSqlCode(-30021) ;
bindWA->setErrStatus();
return this;
}
NABoolean transformUpdateKey = updatesClusteringKeyOrUniqueIndexKey(bindWA);
if (bindWA->errStatus()) // error occurred in updatesCKOrUniqueIndexKey()
return this;
// To be removed when TRAFODION-1610 is implemented.
NABoolean xnsfrmHbaseUpdate = FALSE;
if ((hbaseOper()) && (NOT isMerge()))
{
if (CmpCommon::getDefault(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT) == DF_ON)
{
xnsfrmHbaseUpdate = TRUE;
}
else if (getCheckConstraints().entries())
{
xnsfrmHbaseUpdate = TRUE;
}
}
if (xnsfrmHbaseUpdate)
{
boundExpr = transformHbaseUpdate(bindWA);
}
else
// till here and remove the function transformHbaseUpdate also
if ((transformUpdateKey) && (NOT isMerge()))
{
boundExpr = transformUpdatePrimaryKey(bindWA);
}
else
boundExpr = handleInlining(bindWA, boundExpr);
if (bindWA->errStatus()) // error occurred in transformUpdatePrimaryKey()
return this; // or handleInlining()
return boundExpr;
} // Update::bindNode()
// -----------------------------------------------------------------------
// member functions for class MergeUpdate
// -----------------------------------------------------------------------
RelExpr *MergeUpdate::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (needsBindScope_)
bindWA->initNewScope();
// For an xformaed upsert any UDF or subquery is guaranteed to be
// in the using clause. Upsert will not generate a merge without using
// clause. ON clause, when matched SET clause and when not matched INSERT
// clauses all use expressions from the using clause. (same vid).
// Therefore any subquery or UDF in the using clause will flow to the
// rest of he tree through the TSJ and will be available. Each subquery
// will be evaluated only once, and will be evaluated prior to the merge
if (isMerge() && child(0) && !xformedUpsert())
{
ItemExpr *selPred = child(0)->castToRelExpr()->selPredTree();
if (selPred || where_)
{
NABoolean ONhasSubquery = (selPred && selPred->containsSubquery());
NABoolean ONhasAggr = (selPred && selPred->containsAnAggregate());
NABoolean whrHasSubqry = FALSE;
if (ONhasSubquery || ONhasAggr ||
(where_ && ((whrHasSubqry=where_->containsSubquery()) ||
where_->containsAnAggregate())))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0
(ONhasSubquery ? "Subquery in ON clause not allowed." :
(ONhasAggr ? "aggregate function in ON clause not allowed." :
(whrHasSubqry ?
"subquery in UPDATE ... WHERE clause not allowed." :
"aggregate function in UPDATE ... WHERE clause not allowed.")));
bindWA->setErrStatus();
return this;
}
ItemExpr *ONhasUDF = (selPred ? selPred->containsUDF() : NULL);
ItemExpr *whereHasUDF = (where_ ? where_->containsUDF() : NULL);
if (ONhasUDF || whereHasUDF)
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0
(((UDFunction *)(ONhasUDF ? ONhasUDF : whereHasUDF))->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
}
if (isMerge() && recExprTree() && !xformedUpsert())
{
if (recExprTree()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in SET clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (recExprTree()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)recExprTree()->containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
// if insertValues, then this is an upsert stmt.
if (insertValues())
{
if (insertValues()->containsSubquery() && !xformedUpsert())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in INSERT clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (insertValues()->containsUDF() && !xformedUpsert())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)insertValues()->containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues());
Insert * ins = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
tuple,
insertCols(),
NULL);
ins->setInsertType(Insert::SIMPLE_INSERT);
if (isMergeUpdate())
ins->setIsMergeUpdate(TRUE);
else
ins->setIsMergeDelete(TRUE);
ins->setTableDesc(getTableDesc());
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
ins = (Insert*)ins->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
mergeInsertRecExpr() = ins->newRecExpr();
mergeInsertRecExprArray() = ins->newRecExprArray();
}
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return NULL;
if ((naTable->getViewText() != NULL) && (naTable->getViewCheck()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" View with check option not allowed.");
bindWA->setErrStatus();
return NULL;
}
if ((naTable->isHbaseCellTable()) ||
(naTable->isHbaseRowTable()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0("Hbase tables not supported.");
bindWA->setErrStatus();
return NULL;
}
if (naTable->isHiveTable())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0("Hive tables not supported.");
bindWA->setErrStatus();
return NULL;
}
bindWA->setMergeStatement(TRUE);
RelExpr * boundExpr = Update::bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (checkForMergeRestrictions(bindWA))
return NULL;
if (where_) {
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
where_->convertToValueIdSet(mergeUpdatePred(), bindWA, ITM_AND);
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
if (bindWA->errStatus()) return NULL;
// any values added by where_ to Outer References Set should be
// added to input values that must be supplied to this MergeUpdate
getGroupAttr()->addCharacteristicInputs
(bindWA->getCurrentScope()->getOuterRefs());
}
if (needsBindScope_)
bindWA->removeCurrentScope();
bindWA->setMergeStatement(TRUE);
return boundExpr;
} // MergeUpdate::bindNode()
// -----------------------------------------------------------------------
// member functions for class Delete
// -----------------------------------------------------------------------
RelExpr *Delete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Save the current scope and node for children to peruse if necessary.
BindContext *context = bindWA->getCurrentScope()->context();
if (context) {
context->deleteScope() = bindWA->getCurrentScope();
context->deleteNode() = this;
if (getFirstNRows() >= 0) context->firstN() = TRUE;
}
RelExpr * boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
if ((csl_) &&
(NOT getTableDesc()->getNATable()->isHbaseRowTable()))
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (getTableDesc()->getNATable()->isHbaseCellTable())
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot delete from an hbase table in CELL format. Use ROW format for this operation.");
bindWA->setErrStatus();
return this;
}
// QSTUFF
if (getGroupAttr()->isStream() &&
!getGroupAttr()->isEmbeddedUpdateOrDelete()) {
*CmpCommon::diags() << DgSqlCode(-4180);
bindWA->setErrStatus();
return this;
}
// QSTUFF
// Not only are check constraints on a DELETE nonsensical,
// but they can cause VEGReference::replaceVEGReference to assert
// with valuesToBeBound.isEmpty (Genesis 10-980202-0718).
//
// in case we are binding a generic update within a generic update
// due to view expansion we would like to ensure that all constraints
// are checked properly for the update operation performed on the
// underlying base table
if (NOT (bindWA->inViewExpansion() && bindWA->inGenericUpdate())) { // QSTUFF
getTableDesc()->checkConstraints().clear();
checkConstraints().clear();
}
if (getTableDesc()->getClusteringIndex()->getNAFileSet()->isEntrySequenced())
{
// 4018 DELETE query cannot be used against an Entry-Seq table.
*CmpCommon::diags() << DgSqlCode(-4018) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (NOT getTableDesc()->getVerticalPartitions().isEmpty())
{
// 4029 DELETE query cannot be used on a vertically partitioned table.
*CmpCommon::diags() << DgSqlCode(-4029) <<
DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
Scan *scanNode = NULL;
// make sure scan done as part of a delete runs in serializable mode so a
// tsj(scan,delete) implementation of a delete runs as an atomic operation
if (child(0)->getOperatorType() == REL_SCAN) {
scanNode = (Scan*)(child(0)->castToRelExpr());
if (!scanNode->accessOptions().userSpecified()) {
scanNode->accessOptions().updateAccessOptions
(TransMode::ILtoAT(TransMode::SERIALIZABLE_));
}
}
BindScope *prevScope = NULL;
BindScope *currScope = bindWA->getCurrentScope();
NABoolean inUnion = FALSE;
while (currScope && !inUnion)
{
BindContext *currContext = currScope->context();
if (currContext->inUnion())
{
inUnion = TRUE;
}
prevScope = currScope;
currScope = bindWA->getPreviousScope(prevScope);
}
RelRoot *root = bindWA->getTopRoot();
if (getFirstNRows() >= 0) // First N Delete
{
CMPASSERT(getOperatorType() == REL_UNARY_DELETE);
// First N Delete on a partitioned table. Not considered a MTS delete.
if (getTableDesc()->getClusteringIndex()->isPartitioned())
{
if (root->getCompExprTree() || inUnion ) // for unions we know there is a select
{ // outer selectnot allowed for "non-MTS" first N delete
*CmpCommon::diags() << DgSqlCode(-4216);
bindWA->setErrStatus();
return this;
}
RelExpr * childNode = child(0)->castToRelExpr();
FirstN * firstn = new(bindWA->wHeap())
FirstN(childNode, getFirstNRows(), NULL);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setChild(0, firstn);
setFirstNRows(-1);
}
else
{
// First N delete on a single partition. This is considered a MTS Delete.
if ((bindWA->getHostArraysArea()) &&
((bindWA->getHostArraysArea()->hasHostArraysInWhereClause()) ||
(bindWA->getHostArraysArea()->getHasSelectIntoRowsets())))
{ // MTS delete not supported with rowsets
*CmpCommon::diags() << DgSqlCode(-30037);
bindWA->setErrStatus();
return this;
}
if (scanNode && scanNode->getSelectionPred().containsSubquery())
{
// MTS Delete not supported with subquery in where clause
*CmpCommon::diags() << DgSqlCode(-4138);
bindWA->setErrStatus();
return this;
}
if (root->hasOrderBy())
{ // mts delete not supported with order by
*CmpCommon::diags() << DgSqlCode(-4189);
bindWA->setErrStatus();
return this;
}
if (root->getCompExprTree() || // MTS Delete has an outer select
bindWA->isInsertSelectStatement() || // Delete inside an Insert Select statement, Soln:10-061103-0274
inUnion ) // for unions we know there is a select
{
if (root->getFirstNRows() < -1 ||
inUnion) // for unions we wish to raise a union
{ // The outer select has a Last 1/0 clause // specific error later, so set the flag now.
setMtsStatement(TRUE);
}
else
{ // raise an error if no Last 1 clause is found.
*CmpCommon::diags() << DgSqlCode(-4136);
bindWA->setErrStatus();
return this;
}
}
}
}
// Triggers --
if ((NOT isFastDelete()) && (NOT noIMneeded()))
boundExpr = handleInlining(bindWA, boundExpr);
else if (hbaseOper() && (getGroupAttr()->isEmbeddedUpdateOrDelete()))
{
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
CorrName corrOLDTable (getScanNode(TRUE)->getTableDesc()->getCorrNameObj().getQualifiedNameObj(),
bindWA->wHeap(),"OLD");
// expose OLD table columns
getRETDesc()->addColumns(bindWA, *child(0)->getRETDesc(), &corrOLDTable);
ValueIdList outputs;
getRETDesc()->getValueIdList(outputs, USER_AND_SYSTEM_COLUMNS);
addPotentialOutputValues(outputs);
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
if (isMtsStatement())
bindWA->setEmbeddedIUDStatement(TRUE);
if (getFirstNRows() > 0)
{
// create a firstN node to delete FIRST N rows, if no such node was created
// during handleInlining. Occurs when DELETE FIRST N is used on table with no
// dependent objects.
FirstN * firstn = new(bindWA->wHeap())
FirstN(boundExpr, getFirstNRows());
if (NOT(scanNode && scanNode->getSelectionPred().containsSubquery()))
firstn->setCanExecuteInDp2(TRUE);
firstn->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
setFirstNRows(-1);
boundExpr = firstn;
}
if (csl())
{
for (Lng32 i = 0; i < csl()->entries(); i++)
{
NAString * nas = (NAString*)(*csl())[i];
bindWA->hbaseColUsageInfo()->insert
((QualifiedName*)&getTableDesc()->getNATable()->getTableName(), nas);
}
}
return boundExpr;
} // Delete::bindNode()
// -----------------------------------------------------------------------
// member functions for class MergeDelete
// -----------------------------------------------------------------------
RelExpr *MergeDelete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindWA->initNewScope();
if ((isMerge()) &&
(child(0)) &&
(child(0)->castToRelExpr()->selPredTree()))
{
if (child(0)->castToRelExpr()->selPredTree()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in ON clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (child(0)->castToRelExpr()->selPredTree()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)child(0)->
castToRelExpr()->selPredTree()->
containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
}
// if insertValues, then this is an upsert stmt.
if (insertValues())
{
if (insertValues()->containsSubquery())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Subquery in INSERT clause not allowed.");
bindWA->setErrStatus();
return this;
}
if (insertValues()->containsUDF())
{
*CmpCommon::diags() << DgSqlCode(-4471)
<< DgString0(((UDFunction *)insertValues()->
containsUDF())->
getFunctionName().getExternalName());
bindWA->setErrStatus();
return this;
}
if (CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF)
{
// MERGE DELETE + INSERT is buggy, so disallow it unless CQD is on. In
// particular, the optimizer sometimes fails to produce a plan in phase 1.
// JIRA TRAFODION-1509 covers completing the MERGE DELETE + INSERT feature.
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" MERGE DELETE not allowed with INSERT.");
}
Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues());
Insert * ins = new (bindWA->wHeap())
Insert(getTableName(),
NULL,
REL_UNARY_INSERT,
tuple,
insertCols(),
NULL);
ins->setInsertType(Insert::SIMPLE_INSERT);
ins->setIsMergeDelete(TRUE);
ins->setTableDesc(getTableDesc());
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
ins = (Insert*)ins->bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
mergeInsertRecExpr() = ins->newRecExpr();
mergeInsertRecExprArray() = ins->newRecExprArray();
}
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return NULL;
if ((naTable->getViewText() != NULL) && (naTable->getViewCheck()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" View with check option not allowed.");
bindWA->setErrStatus();
return NULL;
}
bindWA->setMergeStatement(TRUE);
RelExpr * boundExpr = Delete::bindNode(bindWA);
if (bindWA->errStatus())
return NULL;
if (checkForMergeRestrictions(bindWA))
return NULL;
bindWA->removeCurrentScope();
bindWA->setMergeStatement(TRUE);
return boundExpr;
} // MergeDelete::bindNode()
static const char NEWTable [] = "NEW"; // QSTUFF: corr for embedded d/u
static const char OLDTable [] = "OLD"; // QSTUFF: corr for embedded d/u
// QSTUFF
// this method binds both, the set clauses applied to the after
// image as well as the set clauses applied to the before image
// the new set on rollback clause allows an application to modify
// the before image.
// delete from tab set on rollback x = 1;
// update tab set x = 1 set on rollback x = 2;
#pragma nowarn(770) // warning elimination
void GenericUpdate::bindUpdateExpr(BindWA *bindWA,
ItemExpr *recExpr,
ItemExprList &assignList,
RelExpr *boundView,
Scan *scanNode,
SET(short) &stoiColumnSet,
NABoolean onRollback)
{
RETDesc *origScope = NULL;
ValueIdSet &newRecExpr =
(onRollback == TRUE) ? newRecBeforeExpr() : this->newRecExpr();
ValueIdArray &newRecExprArray =
(onRollback == TRUE) ? newRecBeforeExprArray() : this->newRecExprArray();
if (onRollback &&
((!getTableDesc()->getClusteringIndex()->getNAFileSet()->isAudited()) ||
(getTableDesc()->getNATable()->hasLobColumn()))) {
// SET ON ROLLBACK clause is not allowed on a non-audited table
*CmpCommon::diags() << DgSqlCode(-4214)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return;
}
CollIndex i, j;
CollIndexList colnoList; // map of col nums (row positions)
CollIndex a = assignList.entries();
const ColumnDescList *viewColumns = NULL;
// if this is a view then get the columns of the view
if (boundView) {
viewColumns = boundView->getRETDesc()->getColumnList();
}
// if the GU has a SET ON ROLLBACK clause this method is called
// twice: once to bind the columns in the SET clause and a second
// time to bind the columns in the SET ON ROLLBACK clause.
// Initially the update column list of the stoi_ is empty.
// If this is the second call, store the update column list
// from the first call.
short *stoiColumnList = NULL;
CollIndex currColumnCount = 0;
if (currColumnCount = stoi_->getStoi()->getColumnListCount())
{
stoiColumnList = new (bindWA->wHeap()) short[currColumnCount];
for (i = 0; i < currColumnCount; i++)
stoiColumnList[i] = stoi_->getStoi()->getUpdateColumn(i);
}
stoi_->getStoi()->setColumnList(new (bindWA->wHeap()) short[a + currColumnCount]);
for (i = 0; i < a; i++) {
CMPASSERT(assignList[i]->getOperatorType() == ITM_ASSIGN);
assignList[i]->child(0)->bindNode(bindWA); // LHS
if (bindWA->errStatus()) return;
const NAColumn *nacol = assignList[i]->child(0).getNAColumn();
if(getOperatorType() == REL_UNARY_UPDATE)
{
stoi_->getStoi()->setUpdateColumn(i, (short) nacol->getPosition());
stoi_->getStoi()->incColumnListCount();
stoi_->addUpdateColumn(nacol->getPosition());
}
const NAType *colType = nacol->getType();
if (!colType->isSupportedType()) {
*CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatatble
<< DgTableName(nacol->getNATable()->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return;
}
// If this is a sequence generator IDENTITY column
// with a default type of GENERATED ALWAYS,
// then post error -3428. GENERATED ALWAYS
// IDENTITY columns may not be updated.
if(getOperatorType() == REL_UNARY_UPDATE &&
CmpCommon::getDefault(COMP_BOOL_210) == DF_ON &&
nacol->isIdentityColumnAlways())
{
*CmpCommon::diags() << DgSqlCode(-3428)
<< DgString0(nacol->getColName());
bindWA->setErrStatus();
return;
}
colnoList.insert(nacol->getPosition()); // save colno for next loop
// in case its not a view we record the column position of the
// base table, otherwise that of the view
if (NOT boundView)
stoiColumnSet.insert((short) nacol->getPosition());
// if this is a view get the positions of the columns
// within the view that are being updated.
if (boundView) {
ValueId vid = assignList[i]->child(0).getValueId();
NABoolean found = FALSE;
for (CollIndex k=0; k < viewColumns->entries(); k++) {
if ((*viewColumns)[k]->getValueId() == vid) {
stoiColumnSet.insert((short) k);
found = TRUE;
// Updatable views cannot have any underlying basetable column
// appear more than once, so it's safe to break out of the loop.
break;
}
} // loop k
CMPASSERT(found);
} // boundView
} // loop i<a
// If this is the second call to this method, restore the update
// columns bound in the first call
if (currColumnCount)
{
for (i = a; i < (currColumnCount + a); i++)
{
stoi_->getStoi()->setUpdateColumn(i, stoiColumnList[i-a]);
stoi_->addUpdateColumn(stoiColumnList[i-a]);
}
}
// RHS: Bind the right side of the Assigns such that the source expressions
// reference the columns of the source table.
//
//### With a cascade of views, should this be "getRETDesc" as is,
//### or "scanNode->getRETDesc" ? --?
//### Should I set this->setRD to be the target(new)tbl at the beginning,
//### explicitly say "scanNode..." here? --i think not
//
if (GU_DEBUG) GU_DEBUG_Display(bindWA, this, "u");
origScope = bindWA->getCurrentScope()->getRETDesc();
// this sets the scope to the scan table for the before values
// the previous scope was to the "UPDATE" table
// we will reset the scope before returning in order not to introduce
// hidden side effects but have the generic update explicitely point
// to the scan scope
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//this has to be done after binding the LHS because of triggers
//Soln :10-050110-3403 : Don't side-effect the SET on ROLLBACK list
//when we come down to process it the next time over.So process only
//the assignList
ItemExpr* tempExpr = assignList.convertToItemExpr();
tempExpr->convertToValueIdSet(newRecExpr, bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return;
if (NOT onRollback)
{
for (ValueId v = newRecExpr.init(); newRecExpr.next(v);
newRecExpr.advance(v))
{
CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN);
// remove all the onrollack expressions
if (((Assign *)v.getItemExpr())->onRollback())
{
newRecExpr.remove(v);
}
}
}
else
{
for (ValueId v = newRecExpr.init(); newRecExpr.next(v);
newRecExpr.advance(v))
{
CMPASSERT(v.getItemExpr()->getOperatorType() == ITM_ASSIGN);
// remove all the NON-onrollack expressions
if ((getOperatorType() == REL_UNARY_UPDATE) &&
!(((Assign *)v.getItemExpr())->onRollback()))
{
newRecExpr.remove(v);
}
}
if (getOperatorType() == REL_UNARY_DELETE)
{
recExpr->convertToValueIdSet(this->newRecExpr(), bindWA, ITM_ITEM_LIST);
}
}
// now we built the RHS
// Now we have our colnoList map with which to build a temporary array
// (with holes) and get the update columns ordered (eliminating dups).
// Actually we store the ids of the bound Assign nodes corresponding
// to the columns, of course.
//
CollIndex totalColCount = getTableDesc()->getColumnList().entries();
#pragma nowarn(1506) // warning elimination
ValueIdArray holeyArray(totalColCount);
#pragma warn(1506) // warning elimination
ValueId assignId; // i'th newRecExpr valueid
for (i = 0, assignId = newRecExpr.init(); // bizarre ValueIdSet iter
newRecExpr.next(assignId);
i++, newRecExpr.advance(assignId)) {
j = colnoList[i];
if (holeyArray.used(j)) {
const NAColumn *nacol = holeyArray[j].getItemExpr()->child(0).getNAColumn();
//4022 target column multiply specified
*CmpCommon::diags() << DgSqlCode(-4022) << DgColumnName(nacol->getColName());
bindWA->setErrStatus();
return;
}
holeyArray.insertAt(j, assignId);
}
//
// Now we have the holey array. The next loop ignores unused entries
// and copies the used entries into newRecExprArray(), with no holes.
// It also builds a list of the columns being updated that contain
// a column on the right side of the SET assignment expression.
//
// Entering this loop, i is the number of specified update columns;
// exiting, j is.
//
CMPASSERT(i == a);
// we built a map between identifical old and new columns, i.e. columns
// which are not updated and thus identical. We insert the resulting
// equivalence relationships e.g. old.a = new.a during transformation
// into the respective VEGGIES this allows the optimizer to select index
// scan for satisfying order requirements specified by an order by clause
// on new columns, e.g.
// select * from (update t set y = y + 1 return new.a) t order by a;
// we cannot get the benefit of this VEG for a merge statement when IM is required
// allowing a VEG in this case causes corruption on base table key values because
// we use the "old" value of key column from fetchReturnedExpr, which can be junk
// in case there is no row to update/delete, and a brand bew row is being inserted
NABoolean mergeWithIndex = isMerge() && getTableDesc()->hasSecondaryIndexes() ;
if ((NOT onRollback) && (NOT mergeWithIndex)){
for (i = 0;i < totalColCount; i++){
if (!(holeyArray.used(i))){
oldToNewMap().addMapEntry(
scanNode->getTableDesc()->
getColumnList()[i].getItemExpr()->getValueId(),
getTableDesc()->
getColumnList()[i].getItemExpr()->getValueId());
}
}
}
// when binding a view which contains an embedded update
// we must map update valueids to scan value ids
// to allow for checking of access rights.
for (i = 0; i < getTableDesc()->getColumnList().entries();i++)
bindWA->getUpdateToScanValueIds().addMapEntry(
getTableDesc()->getColumnList()[i].getItemExpr()->getValueId(),
scanNode->getTableDesc()->getColumnList()[i].getItemExpr()->getValueId());
newRecExprArray.resize(i);
TableDesc *scanDesc = scanNode->getTableDesc();
NABoolean rightContainsColumn = FALSE;
for (i = j = 0; i < totalColCount; i++) {
if (holeyArray.used(i)) {
ValueId assignExpr = holeyArray[i];
newRecExprArray.insertAt(j++, assignExpr);
ItemExpr *right = assignExpr.getItemExpr()->child(1);
// even if a column is set to a constant we mark it
// as updated to prevent indices covering this column from
// being used for access
ItemExpr *left = assignExpr.getItemExpr()->child(0);
scanDesc->addColUpdated(left->getValueId());
if (right->containsColumn())
rightContainsColumn = TRUE;
}
}
// WITH NO ROLLBACK not supported if rightside of update
// contains a column expression. Also this feature is not
// supported with the SET ON ROLLBACK feature
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
if ((rightContainsColumn && CmpCommon::getDefault(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK) == DF_OFF) || onRollback)
{
NAString warnMsg = "";
if(rightContainsColumn)
{
warnMsg = "Suggestion: Set ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK CQD to ON to allow";
if (getOperatorType() == REL_UNARY_DELETE)
warnMsg += " DELETE ";
else
warnMsg += " UPDATE ";
warnMsg += "command with right-hand side SET clause consisting of columns.";
}
if (getOperatorType() == REL_UNARY_DELETE)
*CmpCommon::diags() << DgSqlCode(-3234) << DgString0(warnMsg);
else
*CmpCommon::diags() << DgSqlCode(-3233) << DgString0(warnMsg);
bindWA->setErrStatus();
return ;
}
}
CMPASSERT(j == a);
bindWA->getCurrentScope()->setRETDesc(origScope);
}
#pragma warn(770) // warning elimination
void getScanPreds(RelExpr *start, ValueIdSet &preds)
{
RelExpr *result = start;
while (result) {
preds += result->selectionPred();
if (result->getOperatorType() == REL_SCAN) break;
if (result->getArity() > 1) {
return ;
}
result = result->child(0);
}
return;
}
// Note that this is the R2 compatible way to handle Halloween problem.
// This update (only insert for now) contains a reference to the
// target in the source. This could potentially run into the so
// called Halloween problem. Determine if this is a case we may be
// able to handle. The cases that we handle are:
//
// -- The reference to the target is in a subquery
// -- There any number of references to the target in the source
// -- The subquery cannot be a row subquery.
// -- The subquery must contain only one source (the reference to the target)
// --
//
// Return TRUE if this does represent a Halloween problem and the caller will
// then issue the error message
//
// Return FALSE is this is a case we can handle. Set the
// 'avoidHalloweenR2' flag in the subquery and this generic Update so
// that the optimizer will pick a plan that is Halloween safe.
//
NABoolean GenericUpdate::checkForHalloweenR2(Int32 numScansToFind)
{
// If there are no scans, no problem, return okay (FALSE)
//
if(numScansToFind == 0) {
return FALSE;
}
// Allow any number of scans
// Do not support for general NEO users.
if (CmpCommon::getDefault(MODE_SPECIAL_1) == DF_OFF)
return TRUE;
// Number of scans of the target table found so far.
//
Int32 numHalloweenScans = 0;
// Get the primary source of the generic update. We are looking for
// the halloween scans in the predicates of this scan node
//
ValueIdSet preds;
getScanPreds(this, preds);
Subquery *subq;
// Search the preds of this scan for subqueries.
//
// ValueIdSet &preds = scanNode->selectionPred();
for(ValueId p = preds.init(); preds.next(p); preds.advance(p)) {
ItemExpr *pred = p.getItemExpr();
// If this pred contains a subquery, find the scans
//
if(pred->containsSubquery()) {
ValueIdSet subqPreds;
subqPreds += pred->getValueId();
// Search all the preds and their children
//
while(subqPreds.entries()) {
ValueIdSet children;
for(ValueId s = subqPreds.init();
subqPreds.next(s);
subqPreds.advance(s)) {
ItemExpr *term = s.getItemExpr();
// Found a subquery, now look for the scan...
//
if(term->isASubquery()) {
subq = (Subquery *)term;
// We don't support row subqueries, keep looking for the scan
// in the next subquery.
if(!subq->isARowSubquery()) {
// Is this the subquery that has the scan of the table
// we are updating?
//
Scan *halloweenScan = subq->getSubquery()->getScanNode(FALSE);
if(halloweenScan) {
// Is this the scan we are looking for?
//
if(halloweenScan->getTableDesc()->getNATable() ==
getTableDesc()->getNATable()) {
subq->setAvoidHalloweenR2(this);
numHalloweenScans++;
}
}
}
}
// Follow all the children as well.
//
for(Int32 i = 0; i < term->getArity(); i++) {
children += term->child(i)->getValueId();
}
}
subqPreds = children;
}
}
}
setAvoidHalloweenR2(numScansToFind);
// If we found and marked all the halloween scans, then return FALSE (allow).
// We have marked the subqueries to avoid the halloween problem. This will
// force the optimizer to pick a plan that will be safe.
//
if(numHalloweenScans == numScansToFind)
return FALSE;
return TRUE;
}
// See ANSI 7.9 SR 12 + 6.3 SR 8 for definition of "updatable" table
// references; in particular, note that one of the requirements for a view's
// being updatable is that ultimately underlying it (passing through a
// whole stack of views) is *exactly one* wbase table -- i.e., no joins
// allowed.
//
RelExpr *GenericUpdate::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// QSTUFF
// we indicate that we are in a generic update. If we are
// already in a generic update we know that this time we are
// binding a generic update within a view.
// however be aware of the following scenario. We currently
// reject embedded updates and streams in the source but
// obviously allow view with embedded updates as a target.
// Since its already within a generic update we will only
// return the scan node to the insert
//
// insert into select ... from (update/delete ....) t;
//
// but not cause the update to be bound in when doing
//
// insert into viewWithDeleteOrUpdate values(...);
//
// in both cases we got an insert->update/delete->scan
NABoolean inGenericUpdate = FALSE;
if (getOperatorType() != REL_UNARY_INSERT)
inGenericUpdate = bindWA->setInGenericUpdate(TRUE);
NABoolean returnScanNode =
(inGenericUpdate && bindWA->inViewExpansion() &&
( getOperatorType() == REL_UNARY_DELETE ||
getOperatorType() == REL_UNARY_UPDATE ));
// those group attributes should be set only by the topmost
// generic update once we are invoked when already binding
// another generic we reset those group attributes since we
// already know that we will only return a scan node
if ((returnScanNode) && (child(0))) {
child(0)->getGroupAttr()->setStream(FALSE);
child(0)->getGroupAttr()->setSkipInitialScan(FALSE);
child(0)->getGroupAttr()->setEmbeddedIUD(NO_OPERATOR_TYPE);
}
// if we have no user-specified access options then
// get it from nearest enclosing scope that has one (if any)
if (!accessOptions().userSpecified()) {
StmtLevelAccessOptions *axOpts = bindWA->findUserSpecifiedAccessOption();
if (axOpts) {
accessOptions() = *axOpts;
}
}
// The above code is in Scan::bindNode also.
// It would be nice to refactor this common code; someday.
// Make sure we have the appropriate transaction mode & isolation level
// in order to do the update. Genesis 10-970922-3488.
// Keep this logic in sync with Generator::verifyUpdatableTransMode()!
Lng32 sqlcodeA = 0, sqlcodeB = 0;
// fix case 10-040429-7402 by checking our statement level access options
// first before declaring any error 3140/3141.
TransMode::IsolationLevel il;
ActiveSchemaDB()->getDefaults().getIsolationLevel
(il,
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
verifyUpdatableTrans(&accessOptions(), CmpCommon::transMode(),
il,
sqlcodeA, sqlcodeB);
if (sqlcodeA || sqlcodeB) {
// 3140 The isolation level cannot be READ UNCOMMITTED.
// 3141 The transaction access mode must be READ WRITE.
if (sqlcodeA) *CmpCommon::diags() << DgSqlCode(sqlcodeA);
if (sqlcodeB) *CmpCommon::diags() << DgSqlCode(sqlcodeB);
bindWA->setErrStatus();
return this;
}
Int64 transId=-1;
if ((isNoRollback() &&
(NOT (Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))) &&
((CmpCommon::transMode()->getAutoCommit() != TransMode::ON_ ) ||
(NAExecTrans(0, transId)))) {
// do not return an error if this is a showplan query being compiled
// in the second arkcmp.
const NAString * val =
ActiveControlDB()->getControlSessionValue("SHOWPLAN");
if (NOT ((val) && (*val == "ON")))
{
*CmpCommon::diags() << DgSqlCode(-3231); // Autocommit must be ON,
bindWA->setErrStatus(); // if No ROLLBACK is specified in IUD statement syntax
return this;
}
}
if (isNoRollback() ||
(CmpCommon::transMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
if ((child(0)->getGroupAttr()->isStream()) ||
(child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete()) ||
(updateCurrentOf()))
{
if (getOperatorType() == REL_UNARY_DELETE)
*CmpCommon::diags() << DgSqlCode(-3234);
else
*CmpCommon::diags() << DgSqlCode(-3233);
bindWA->setErrStatus();
return this;
}
}
// The SQL standard as defined in ISO/IEC JTC 1/SC 32 date: 2009-01-12
// CD 9075-2:200x(E) published by ISO/IEC JTC 1/SC 32/WG 3
// "Information technology -- Database languages -- SQL --
// Part2: Foundation (SQL/Foundation)", page 920, section 14.14,
// page 918, section 14.13, page 900, section 14.9, page 898, section 14.8
// does allow correlation names in update & delete statements.
// Therefore, we delete this unnecessary restriction as part of the fix
// for genesis solution 10-090921-4747:
// Many places in this method assume the specified target table
// has no correlation name -- indeed, Ansi syntax does not allow one --
// this assert is to catch any future syntax-extensions we may do.
//
// E.g., see code marked
// ##SQLMP-SYNTAX-KLUDGE##
// in SqlParser.y + SqlParserAux.cpp,
// which add a non-Ansi corr name to all table refs
// when they really only should add to SELECTed tables.
// So here, in an INSERT/UPDATE/DELETEd table,
// we UNDO that kludge.
//
//if (!getTableName().getCorrNameAsString().isNull()) {
//CMPASSERT(SqlParser_NAMETYPE == DF_NSK ||
// HasMPLocPrefix(getTableName().getQualifiedNameObj().getCatalogName()));
//getTableName().setCorrName(""); // UNDO that kludge!
//}
// Genesis 10-980831-4973
if (((getTableName().isLocationNameSpecified() ||
getTableName().isPartitionNameSpecified()) &&
(!Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE))) &&
(getOperatorType() != REL_UNARY_DELETE)) {
*CmpCommon::diags() << DgSqlCode(-4061); // 4061 a partn not ins/upd'able
bindWA->setErrStatus();
return this;
}
// -- Triggers
// If this node is part of the action of a trigger,
// then don't count the rows that are affected.
if (bindWA->findNextScopeWithTriggerInfo() != NULL)
{
rowsAffected_ = DO_NOT_COMPUTE_ROWSAFFECTED;
// Does the table name match the name of one of the transition tables?
if (updatedTableName_.isATriggerTransitionName(bindWA))
{
// 11020 Ambiguous or illegal use of transition name $0~string0.
*CmpCommon::diags() << DgSqlCode(-11020)
<< DgString0(getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return this;
}
}
// Get the NATable for this object, and an initial ref count.
// Set up stoi.
//
// We do not suppress mixed name checking in getNATable for R1
// from here, because prototype name executes through here. We
// want to check prototype name.
const NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus()) return this;
if (naTable && naTable->isHbaseTable())
hbaseOper() = TRUE;
if ((CmpCommon::getDefault(ALLOW_DML_ON_NONAUDITED_TABLE) == DF_OFF) &&
naTable && naTable->getClusteringIndex() &&
(!naTable->getClusteringIndex()->isAudited())
// && !bindWA->isBindingMvRefresh() // uncomment if non-audit MVs are ever supported
)
{
*CmpCommon::diags() << DgSqlCode(-4211)
<< DgTableName(
naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
// By setting the CQD OVERRIDE_SYSKEY to 'ON', the users
// are allowed to specify a SYSKEY value on an INSERT.
// We achieve this by treating a system column as a user column.
// This support is only provided for key sequenced files
// for MX and MP tables.
if (getOperatorType() == REL_UNARY_INSERT &&
naTable->hasSystemColumnUsedAsUserColumn() &&
naTable->getClusteringIndex()->isEntrySequenced())
{
*CmpCommon::diags() << DgSqlCode(-3410)
<< DgTableName(naTable->getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return this;
}
Int32 beforeRefcount = naTable->getReferenceCount();
OptSqlTableOpenInfo *listedStoi
= setupStoi(stoi_, bindWA, this, naTable, getTableName());
if (getOperatorType() == REL_UNARY_INSERT &&
NOT naTable->isInsertable()) {
*CmpCommon::diags() << DgSqlCode(-4027) // 4027 table not insertable
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (NOT naTable->isUpdatable()) {
*CmpCommon::diags() << DgSqlCode(-4028) // 4028 table not updatable
<< DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
}
if (naTable->isVerticalPartition()) {
// LCOV_EXCL_START - cnu
// On attempt to update an individual VP, say: 4082 table not accessible
*CmpCommon::diags() << DgSqlCode(-4082) <<
DgTableName(naTable->getTableName().getQualifiedNameAsAnsiString());
bindWA->setErrStatus();
return this;
// LCOV_EXCL_STOP
}
if (naTable->isAnMV())
{
// we currently don't allow updating (deleting) MVs in a trigger action
if (bindWA->inDDL() && bindWA->isInTrigger ())
{
*CmpCommon::diags() << DgSqlCode(-11051);
bindWA->setErrStatus();
return this;
}
// This table is a materialized view. Are we allowed to change it?
if ((getTableName().getSpecialType() != ExtendedQualName::MV_TABLE) &&
(getTableName().getSpecialType() != ExtendedQualName::GHOST_MV_TABLE))
{
// The special syntax flag was not used -
// Only on request MV allows direct DELETE operations by the user.
MVInfoForDML *mvInfo = ((NATable *)naTable)->getMVInfo(bindWA);
if (mvInfo->getRefreshType() == COM_ON_REQUEST &&
getOperatorType() == REL_UNARY_DELETE)
{
// Set NOLOG flag.
setNoLogOperation();
}
else
{
// Direct update is only allowed for User Maintainable MVs.
if (mvInfo->getRefreshType() != COM_BY_USER)
{
// A Materialized View cannot be directly updated.
*CmpCommon::diags() << DgSqlCode(-12074);
bindWA->setErrStatus();
return this;
}
}
}
// If this is not an INTERNAL REFRESH command, make sure the MV is
// initialized and available.
// If this is FastDelete using parallel purgedata, do not enforce
// that MV is initialized.
if (!bindWA->isBindingMvRefresh())
{
if (NOT ((getOperatorType() == REL_UNARY_DELETE) &&
(((Delete*)this)->isFastDelete())))
{
if (naTable->verifyMvIsInitializedAndAvailable(bindWA))
return NULL;
}
}
}
if (naTable->isAnMVMetaData() &&
getTableName().getSpecialType() != ExtendedQualName::MVS_UMD)
{
if (getTableName().getPrototype() == NULL ||
getTableName().getPrototype()->getSpecialType() != ExtendedQualName::MVS_UMD)
{ // ERROR 12075: A Materialized View Metadata Table cannot be directly updated.
*CmpCommon::diags() << DgSqlCode(-12075);
bindWA->setErrStatus();
return this;
}
}
if ((naTable->isSeabaseTable()) &&
(naTable->isSeabaseMDTable() ||
naTable->isSeabasePrivSchemaTable()) &&
(NOT naTable->isUserUpdatableSeabaseMDTable()) &&
(NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
// IUD on hbase metadata is only allowed for internal queries.
*CmpCommon::diags() << DgSqlCode(-1391)
<< DgString0(naTable->getTableName().getQualifiedNameAsAnsiString())
<< DgString1("metadata");
bindWA->setErrStatus();
return this;
}
else if ((naTable->isSeabaseTable()) &&
(naTable->getTableName().getSchemaName() == SEABASE_REPOS_SCHEMA) &&
(NOT Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)))
{
// IUD on hbase metadata is only allowed for internal queries.
*CmpCommon::diags() << DgSqlCode(-1391)
<< DgString0(naTable->getTableName().getQualifiedNameAsAnsiString())
<< DgString1("repository");
bindWA->setErrStatus();
return this;
}
if ((naTable->isHbaseTable()) &&
(naTable->isHbaseCellTable() || naTable->isHbaseRowTable()) &&
(CmpCommon::getDefault(HBASE_NATIVE_IUD) == DF_OFF))
{
*CmpCommon::diags() << DgSqlCode(-4223)
<< DgString0("Insert/Update/Delete on native hbase tables or in CELL/ROW format is");
bindWA->setErrStatus();
return this;
}
if (naTable->isHiveTable() &&
(getOperatorType() != REL_UNARY_INSERT) &&
(getOperatorType() != REL_LEAF_INSERT))
{
*CmpCommon::diags() << DgSqlCode(-4223)
<< DgString0("Update/Delete on Hive table is");
bindWA->setErrStatus();
return this;
}
NABoolean insertFromValuesList =
(getOperatorType() == REL_UNARY_INSERT &&
(child(0)->getOperatorType() == REL_TUPLE || // VALUES(1,'b')
child(0)->getOperatorType() == REL_TUPLE_LIST || // VALUES(1,'b'),(2,'Y')
child(0)->getOperatorType() == REL_UNION)) || // VALUES..(with subquery inside the list)
getOperatorType() == REL_LEAF_INSERT; // index type of inserts
if((!insertFromValuesList) && (getOperatorType() == REL_UNARY_INSERT))
bindWA->setInsertSelectStatement(TRUE);
// an update/delete node is created as an update/delete with child
// of a scan node by parser. If this is the case, then no security
// checks are needed on child Scan node.
if ((getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) &&
(child(0) && (child(0)->getOperatorType() == REL_SCAN))) {
Scan * scanNode = (Scan *)(child(0)->castToRelExpr());
scanNode->setNoSecurityCheck(TRUE);
}
// Setting the begin index for TableViewUsageList to zero, instead
// of the bindWA->tableViewUsageList().entries(); Becasue
// bindWA->tableViewUsageList().entries() sets the index to the current
//entry in the list, which excludes previous statements executed in a CS.
CollIndex begSrcUsgIx = 0;
if (!insertFromValuesList) {
//
// Create a new table name scope for the source table (child node).
// Bind the source.
// Reset scope context/naming.
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
// QSTUFF
// we currently don't support streams and embedded updates
// for "insert into select from" statements.
if (getOperatorType() == REL_UNARY_INSERT){
if (child(0)->getGroupAttr()->isStream()){
*CmpCommon::diags() << DgSqlCode(-4170);
bindWA->setErrStatus();
return this;
}
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() ||
child(0)->getGroupAttr()->isEmbeddedInsert()){
*CmpCommon::diags() << DgSqlCode(-4171)
<< DgString0(getGroupAttr()->getOperationWithinGroup());
bindWA->setErrStatus();
return this;
}
}
// binding a generic update within a generic update
// can only occur when binding an updatable view containing
// an embedded delete or embedded update. We don't continue
// binding the generic update and but return the bound scan node.
// the scan node may be either a base table scan or a RenameTable
// node in case we are updating a view
// Since an embedded generic update may have referred to the OLD
// and NEW table we set a binder flag causing the table name to
// be changed to the name of the underlying scan table in the
// RelRoot on top of the generic update. Since we
// know that the normalizer has checked before allowing an update
// on the view that not both, i.e.new and old column values have been
// referred this is a safe operation.
if (returnScanNode){
// this line is a hack to get through Update::bindNode on the return
setTableDesc(getScanNode()->getTableDesc());
bindWA->setInGenericUpdate(inGenericUpdate);
bindWA->setRenameToScanTable (TRUE);
NATable *nTable = bindWA->getNATable(getTableName());
// decr once for just getting it here
// and again to compensate for the reference taken out
// previously which becomes obsolete since we just return a scan node
nTable->decrReferenceCount();
nTable->decrReferenceCount();
return getViewScanNode();
}
// QSTUFF
}
else {
// else, Insert::bindNode does VALUES(...) in its Assign::bindNode loop
// in particular, it does VALUES(..,DEFAULT,..)
}
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "incoming", NULL, TRUE);
#endif
// QSTUFF
// in case of an insert operation we don't set it initially in order
// to prevent that an embedded update or delete may be accidentially
// removed from a source view. However we need it for binding the
// target because it may be a view and its embedded updates have to
// be removed.
if (getOperatorType() == REL_UNARY_INSERT)
inGenericUpdate = bindWA->setInGenericUpdate(TRUE);
CMPASSERT(NOT(updateCurrentOf() &&
getGroupAttr()->isEmbeddedUpdateOrDelete()));
// this is a patch to allow for embedded updates in view definitions
ParNameLocList * pLoc = NULL;
if (getGroupAttr()->isEmbeddedUpdate()) {
pLoc = bindWA->getNameLocListPtr();
bindWA->setNameLocListPtr(NULL);
}
// QSTUFF
// Allocate a TableDesc and attach it to the node.
//
// Note that for Update/Delete, which always have a Scan node attached
// (see below), we cannot reuse the Scan's TableDesc:
// GenMapTable.C doesn't find the proper ValueIds when processing an
// update/delete on a table with an index.
// So we must always create a new (target) TableDesc, always a base table.
//
// Note that bindWA->getCurrentScope()->setRETDesc() is implicitly called:
// 1) by createTableDesc, setting it to this new (target) base table;
// 2) by bindView (if called), resetting it to the view's RenameTable RETDesc
// atop the new (target) table.
//
const NATable *naTableTop = naTable;
NABoolean isView = naTable->getViewText() != NULL;
RelExpr *boundView = NULL; // ## delete when done with it?
Scan *scanNode = NULL;
if (getOperatorType() == REL_UNARY_INSERT ||
getOperatorType() == REL_LEAF_INSERT) {
if (isView) { // INSERT into a VIEW:
//
// Expand the view definition as if it were a Scan child of the Insert
// (like all children, must have its own table name scope).
//
bindWA->getCurrentScope()->xtnmStack()->createXTNM();
boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr());
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "bv1", boundView);
#endif
if (bindWA->errStatus()) return this;
scanNode = boundView->getScanNode();
bindWA->getCurrentScope()->xtnmStack()->removeXTNM();
}
}
else if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) {
scanNode = getScanNode();
}
if (updateCurrentOf()) {
CMPASSERT(scanNode);
scanNode->bindUpdateCurrentOf(bindWA,
(getOperatorType() == REL_UNARY_UPDATE));
if (bindWA->errStatus()) return this;
}
// As previous comments indicated, we're creating a TableDesc for the target,
// the underlying base table. Here we go and do it:
NABoolean isScanOnDifferentTable = FALSE;
if (isView) {
// This binding of the view sets up the target RETDesc.
// This is the first bindView for UPDATE and DELETE on a view,
// and the second for INSERT into a view (yes, we *do* need to do it again).
boundView = bindWA->bindView(getTableName(),
naTable,
accessOptions(),
removeSelPredTree(),
getGroupAttr(),
TRUE); // QSTUFF
setTableDesc(boundView->getScanNode()->getTableDesc());
if ((getOperatorType() == REL_INSERT)||
(getOperatorType() == REL_UNARY_INSERT) ||
(getOperatorType() == REL_LEAF_INSERT))
{
((Insert *)this)->setBoundView(boundView);
}
// for triggers
if (scanNode)
{
const NATable *naTableLocal = scanNode->getTableDesc()->getNATable();
if ((naTableLocal != naTable) && (naTableLocal->getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE))
isScanOnDifferentTable = TRUE;
}
} else if (NOT (getUpdateCKorUniqueIndexKey() && (getOperatorType() == REL_UNARY_INSERT))) {
// an insert that is introduced to implement a phase of update primary key already
// has the right tabledesc (obtained from the update that it is replacing), so
// do not create another tablesdesc for such an insert.
if (scanNode)
naTable = scanNode->getTableDesc()->getNATable();
CorrName tempName(naTableTop->getTableName(),
bindWA->wHeap(),
"",
getTableName().getLocationName(),
getTableName().getPrototype());
tempName.setUgivenName(getTableName().getUgivenName());
tempName.setSpecialType(getTableName().getSpecialType());
// tempName.setIsVolatile(getTableName().isVolatile());
TableDesc * naTableToptableDesc = bindWA->createTableDesc(
naTableTop,
tempName);
if(naTableToptableDesc)
{
naTableToptableDesc->setSelectivityHint(NULL);
naTableToptableDesc->setCardinalityHint(NULL);
}
setTableDesc(naTableToptableDesc);
// Now naTable has the Scan's table, and naTableTop has the GU's table.
isScanOnDifferentTable = (naTable != naTableTop);
}
if (bindWA->errStatus())
return this;
// QSTUFF
// in case of a delete or update we may have to bind set clauses.
// first we bind the left target column, second we bind the right hand side
// we also have to separate the set on rollback clauses in a separate
// list. The set clauses generate a newRecExpr list, the set on rollback
// clause generate a newRecBeforeExpr list.
// we add the old to new valueid map as it allows us to generate
// a subset operator in the presence of order by. the compiler
// needs to understand that the old and new valueids are identical
// inlined trigger may update and scan different tables
if ((getOperatorType() == REL_UNARY_DELETE) &&
(!isScanOnDifferentTable && !getUpdateCKorUniqueIndexKey())) {
const ValueIdList &dkeys =
getTableDesc()->getClusteringIndex()->getClusteringKeyCols();
const ValueIdList &skeys =
scanNode->getTableDesc()->getClusteringIndex()->getClusteringKeyCols();
CollIndex j = skeys.entries();
for (CollIndex i = 0; i < j; i++) {
oldToNewMap().addMapEntry(skeys[i].getItemExpr()->getValueId(),
dkeys[i].getItemExpr()->getValueId());
}
}
ItemExpr *recExpr = removeNewRecExprTree();
if (recExpr &&
(getOperatorType() == REL_UNARY_DELETE ||
getOperatorType() == REL_UNARY_UPDATE)) {
ItemExprList recList(recExpr, bindWA->wHeap());
ItemExprList recBeforeList(bindWA->wHeap());
SET(short) stoiColumnSet(bindWA->wHeap());
// in case a delete statement has a recEpxr, set on rollback
// clauses have been defined and need to be bound
// as part of binding any set on rollback clause we have check
// that no contraints are defined for the specific clauses; otherwise
// the statement is rejected.
// the target columns are bound to the update table, the source
// columns are bound to the scan table
if (getOperatorType() == REL_UNARY_DELETE){
recBeforeList.insert(recList);
bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE);
if (bindWA->errStatus()) return this;
}
// in case of an update operator we have to separate the set and
// set on rollback clauses
if (getOperatorType() == REL_UNARY_UPDATE) {
CMPASSERT(recList.entries());
NABoolean leftIsList = FALSE;
NABoolean rightIsList = FALSE;
NABoolean legalSubqUdfExpr = FALSE;
for (CollIndex i = 0;i < recList.entries(); i++){
CMPASSERT(recList[i]->getOperatorType() == ITM_ASSIGN);
if (recList[i]->child(0)->getOperatorType() == ITM_ITEM_LIST)
leftIsList = TRUE;
if (recList[i]->child(1)->getOperatorType() == ITM_ITEM_LIST)
rightIsList = TRUE;
if (((Assign *)recList[i])->onRollback()){
// On rollback clause currently not allowed with update lists.
if ((leftIsList) || (rightIsList))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0(" ON ROLLBACK not supported with SET lists.");
bindWA->setErrStatus();
return this;
}
// CMPASSERT((NOT leftIsList) && (NOT rightIsList))
recBeforeList.insert(recList[i]);
recList.removeAt(i);
i--;
}
}
if ((leftIsList) &&
(NOT rightIsList) &&
(recList.entries() == 1) &&
((recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY) ||
(recList[0]->child(1)->getOperatorType() == ITM_USER_DEF_FUNCTION)))
{
ItemExpr * expr = NULL;
// Both Subqueries and UDFs are now using the ValueIdProxy
// to carry the each of the valueIds representing the select list
// or UDF outputs. The transformation of the ValueIdProxy will do the
// right thing, and we don't need setSubqInUpdateAssing() anymore.
// Bind the subquery
if (recList[0]->child(1)->getOperatorType() == ITM_ROW_SUBQUERY)
{
RowSubquery * rs =
(RowSubquery*)(recList[0]->child(1)->castToItemExpr());
// Not sure that we ever have a subquery without a REL_ROOT
// left this additional check from the old code.
if (rs->getSubquery()->getOperatorType() == REL_ROOT)
{
rs = (RowSubquery *) rs->bindNode(bindWA);
if (bindWA->errStatus())
return this;
legalSubqUdfExpr = TRUE;
expr = (ItemExpr *) rs;
}
}
else
{
UDFunction * rudf =
(UDFunction*)(recList[0]->child(1)->castToItemExpr());
// Need to bind the UDFunction to get its outputs.
rudf = (UDFunction *) rudf->bindNode(bindWA);
if (bindWA->errStatus())
return this;
legalSubqUdfExpr = TRUE;
expr = (ItemExpr *) rudf;
}
// Update the recList with the bound itemExpr
recList[0]->child(1) = expr;
// Use the ItemExprList to flatten the Subquery or UDF
ItemExprList *exprList = (ItemExprList *) new(bindWA->wHeap())
ItemExprList(expr,bindWA->wHeap());
// Convert the ItemExprList to a Tree
ItemExpr * ie = exprList->convertToItemExpr();
ie = ie->bindNode(bindWA);
if (bindWA->errStatus())
return this;
Assign * assignNode = (Assign *)recList[0];
assignNode->child(1) = ie;
rightIsList = TRUE;
}
if ((leftIsList) || (rightIsList)) // some elements as lists
{
ItemExprList newRecList(bindWA->wHeap());
for (CollIndex i = 0; i < recList.entries(); i++)
{
Assign * assignNode = (Assign *)recList[i];
// Need to bind any UDFs or Subqieries in the expression
// so that we know the degree before we expand the list.
assignNode->child(0) =
assignNode->child(0)->bindUDFsOrSubqueries(bindWA);
if (bindWA->errStatus())
return this;
// Need to bind any UDFs or Subqieries in the expression
// so that we know the degree before we expand the list.
assignNode->child(1) =
assignNode->child(1)->bindUDFsOrSubqueries(bindWA);
if (bindWA->errStatus())
return this;
ItemExprList leftList(assignNode->child(0), bindWA->wHeap());
ItemExprList rightList(assignNode->child(1), bindWA->wHeap());
Lng32 numLeftElements = (Lng32) leftList.entries();
Lng32 numRightElements = (Lng32) rightList.entries();
// See if ALLOW_SUBQ_IN_SET is enabled. It is enabled if
// the default is ON, or if the default is SYSTEM and
// ALLOW_UDF is ON.
NABoolean allowSubqInSet_Enabled = FALSE;
DefaultToken allowSubqTok =
CmpCommon::getDefault(ALLOW_SUBQ_IN_SET);
if ((allowSubqTok == DF_ON) ||
(allowSubqTok == DF_SYSTEM))
allowSubqInSet_Enabled = TRUE;
if (!allowSubqInSet_Enabled)
{
for (CollIndex j = 0; j < rightList.entries(); j++)
{
if (((numLeftElements > 1) ||
(numRightElements > 1)) &&
(((rightList[j]->getOperatorType() == ITM_ROW_SUBQUERY) ||
(rightList[j]->getOperatorType() == ITM_VALUEID_PROXY)) &&
(legalSubqUdfExpr == FALSE)))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0(" Multiple elements or multiple subqueries are not allowed in this SET clause.");
bindWA->setErrStatus();
return this;
}
}
}
if (numLeftElements != numRightElements)
{
*CmpCommon::diags() << DgSqlCode(-4023)
<< DgInt0(numRightElements)
<< DgInt1(numLeftElements);
bindWA->setErrStatus();
return this;
}
// create newRecList with one Assign node for each element.
for (CollIndex k = 0; k < leftList.entries(); k++)
{
ItemExpr * leftIE = leftList[k];
ItemExpr * rightIE = rightList[k];
Assign *assign = new (bindWA->wHeap())
Assign(leftIE, rightIE);
// We do not bind the above Assign as it will be done
// in bindUpdateExpr below. (bug #1893)
newRecList.insert(assign);
}
} // for
bindUpdateExpr(bindWA,recExpr,newRecList,boundView,scanNode,stoiColumnSet);
if (bindWA->errStatus())
return this;
} // some elements as lists
else
{ // no elements as lists
if (recList.entries()){
bindUpdateExpr(bindWA,recExpr,recList,boundView,scanNode,stoiColumnSet);
if (bindWA->errStatus()) return this;
}
}
if (recBeforeList.entries()){
bindUpdateExpr(bindWA,recExpr,recBeforeList,boundView,scanNode,stoiColumnSet,TRUE);
if (bindWA->errStatus()) return this;
}
} // UNARY_UPDATE
// now we record the columns updated for the SqlTableOpenInfo
if (listedStoi) {
listedStoi->getStoi()->setColumnListCount((short)stoiColumnSet.entries());
short *stoiColumnList = new (bindWA->wHeap())
short[stoiColumnSet.entries()];
for (CollIndex i = 0; i < stoiColumnSet.entries(); i++)
{
stoiColumnList[i] = stoiColumnSet[i];
listedStoi->addUpdateColumn(stoiColumnSet[i]);
}
listedStoi->getStoi()->setColumnList(stoiColumnList);
}
// the previous implementation assumed that the scope points
// to the scan table; we don't want to disturb the code and
// make that happen --
#ifndef NDEBUG
GU_DEBUG_Display(bindWA, this, "u");
#endif
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
// QSTUFFF
CollIndex endSrcUsgIx = bindWA->tableViewUsageList().entries();
if ((!isScanOnDifferentTable) &&
(((getOperatorType() == REL_UNARY_INSERT) &&
!insertFromValuesList && !getGroupAttr()->isEmbeddedInsert()) ||
(getOperatorType() == REL_UNARY_UPDATE) ||
(getOperatorType() == REL_UNARY_DELETE))){
// Special handling of statements that could suffer the
// Halloween problem, e.g., "insert into t select from t"
// or "insert into v select from t", if v references t
DBG( if (getenv("TVUSG_DEBUG")) bindWA->tableViewUsageList().display(); )
const NATable *naTableBase = naTable;
const QualifiedName *viewName = NULL;
if (isView) {
// Currently, per Ansi rules, we can only insert through a view if
// there is a single underlying base table without joins or unions.
// Since we are binding the view twice for INSERTS,
// the variable beforeRefcount for the *single* base table has to be 2.
//
beforeRefcount = beforeRefcount + 1;
naTableBase = getTableDesc()->getNATable();
viewName = &naTable->getTableName();
}
if ((getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) &&
(child(0)->getOperatorType() == REL_SCAN)) {
// The table is referenced twice; once for the update/delete and
// the second time for the scan below it.
beforeRefcount = beforeRefcount + 1;
}
const QualifiedName &tableBaseName = naTableBase->getTableName();
Int32 afterRefcount = naTableBase->getReferenceCount();
NABoolean isSGTableType = getTableName().getSpecialType() == ExtendedQualName::SG_TABLE;
NAString viewFmtdList(bindWA->wHeap());
Int32 baseSeenInSrc = 0;
// The views on the table do not need to be obtained
// if the table type is a SEQUENCE GENERATOR
if (!isSGTableType)
baseSeenInSrc = bindWA->tableViewUsageList().getViewsOnTable(
begSrcUsgIx, endSrcUsgIx,
bindWA->viewCount(),
tableBaseName,
getTableName().getSpecialType(),
viewName,
viewFmtdList);
NABoolean halloween = FALSE;
if (CmpCommon::getDefault(R2_HALLOWEEN_SUPPORT) == DF_ON) {
if (beforeRefcount != afterRefcount) {
// Check to see if we can support this update.
//
if(checkForHalloweenR2(afterRefcount - beforeRefcount)) {
halloween = TRUE;
}
}
else {
Scan *scanSrc = getScanNode(FALSE/*no assert*/);
if ((baseSeenInSrc > beforeRefcount) &&
((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified()))) {
halloween = TRUE;
}
if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) {
if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified())){
// Do not enforce Halloween check if it is a
// partition only operation.
// We assume the programmer knows what he's doing
// -- hopefully, by doing insert/update/delete
// operations as part of Partition Management
// (Move Partition Boundary or Split Partition or
// Merge Partition. See TEST057 and TEST058)
halloween = FALSE;
}
}
}
if (halloween) {
CMPASSERT(!(isView && viewFmtdList.isNull()));
*CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060)
<< DgTableName(
tableBaseName.getQualifiedNameAsAnsiString())
<< DgString0(viewFmtdList);
bindWA->setErrStatus();
return this;
}
}
else {
// Support for self-referencing updates/Halloween problem.
if (beforeRefcount != afterRefcount) {
setAvoidHalloween(TRUE);
bindWA->getTopRoot()->setAvoidHalloween(TRUE);
// Decide if access mode (default or specified) is compatible
// with the use of DP2 locks. If access mode was specified,
// it is a property of the naTableBase.
NABoolean cannotUseDP2Locks =
naTableBase->getRefsIncompatibleDP2Halloween();
// Now check the transaction isolation level, which can override
// the access mode. Note that il was initialized above for the
// check for an updatable trans, i.e., errors 3140 and 3141.
if((CmpCommon::transMode()->ILtoAT(il) == REPEATABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == STABLE_ ) ||
(CmpCommon::transMode()->ILtoAT(il) == BROWSE_ ))
cannotUseDP2Locks = TRUE;
// Save the result with this GenericUpdate object. It will be
// used when the nextSubstitute methods of TSJFlowRule or TSJRule
// call GenericUpdate::configTSJforHalloween.
if (NOT getHalloweenCannotUseDP2Locks())
setHalloweenCannotUseDP2Locks(cannotUseDP2Locks);
// Keep track of which table in the query is the self-ref table.
// This is a part of the fix for solution 10-071204-9253.
((NATable *)naTableBase)->setIsHalloweenTable();
}
else {
Scan *scanSrc = getScanNode(FALSE/*no assert*/);
if ((baseSeenInSrc > beforeRefcount) &&
((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified()))) {
halloween = TRUE;
}
if (Get_SqlParser_Flags(ALLOW_SPECIALTABLETYPE)) {
if ((scanSrc && scanSrc->getTableName().isLocationNameSpecified())||
(getTableName().isLocationNameSpecified())){
// Do not enforce Halloween check if it is a
// partition only operation.
// We assume the programmer knows what he's doing
// -- hopefully, by doing insert/update/delete
// operations as part of Partition Management
// (Move Partition Boundary or Split Partition or
// Merge Partition. See TEST057 and TEST058)
halloween = FALSE;
}
}
if (halloween) {
CMPASSERT(!(isView && viewFmtdList.isNull()));
*CmpCommon::diags() << DgSqlCode(viewFmtdList.isNull() ? -4026 : -4060)
<< DgTableName(
tableBaseName.getQualifiedNameAsAnsiString())
<< DgString0(viewFmtdList);
bindWA->setErrStatus();
return this;
}
}
}
}
// Bind the base class.
// Allocate an empty RETDesc and attach it to this node, *but* leave the
// currently scoped RETDesc (that of naTableTop) as is, for further binding
// in caller Insert::bindNode or LeafInsert/LeafDelete::bindNode.
//
RelExpr *boundExpr = bindSelf(bindWA);
CMPASSERT(boundExpr == this); // assumed by RETDesc/RI/IM code below
if (bindWA->errStatus()) return boundExpr;
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
// Copy the check constraints to the private memory of the GenericUpdate.
//
checkConstraints() = getTableDesc()->getCheckConstraints();
// Create a key expression for the table to be updated.
// The code specific to the Insert node is handled in Insert::bindNode.
//
if (getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE) {
// SQL syntax requires (and the parser ensures) that a direct descendant
// (passing thru views) of an update/delete node is a scan node on the
// same table that is being updated (note that normalizer transformations
// may change this at a later time).
// An exception to this rule happens when before triggers are inlined.
// In this case, the update/delete on the subject table is driven by
// a Scan on a temp table. The primary key columns of the subject table are
// a subset of the primary key columns of the temp table, and using the
// same column names, but not neccessarily in the same order.
//
// Update/Delete nodes require expressions in their newRecExpr that can
// be used to form the primary key of the table to update/delete.
//
const NAColumnArray &keyColArray =
getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns();
CollIndex numKeyCols = keyColArray.entries();
const NAColumnArray &scanColArray =
scanNode->getTableDesc()->getNATable()->getNAColumnArray();
for (CollIndex i = 0; i < numKeyCols; i++) {
// The scan node and the update/delete node both use the SAME table,
// so their column names are also the same.
//
Lng32 colPos = keyColArray[i]->getPosition();
ItemExpr *guCol = getTableDesc()->getColumnList()[colPos].getItemExpr();
ItemExpr *scanCol; // - Triggers
if (!isScanOnDifferentTable)
scanCol = scanNode->getTableDesc()->getColumnList()[colPos].getItemExpr();
else
{
// Make sure this is a BaseColumn.
CMPASSERT(guCol->getOperatorType() == ITM_BASECOLUMN);
// Find the column name.
const NAString& colName = ((BaseColumn *)guCol)->getColName();
// Find a column with the same name, in the table from the Scan node.
// SYSKEY is an exception since its name in the temp table is "@SYSKEY"
ExtendedQualName::SpecialTableType tableType =
scanNode->getTableDesc()->getCorrNameObj().getSpecialType();
NAColumn *scanNaCol = NULL;
if (ExtendedQualName::TRIGTEMP_TABLE == tableType && colName == "SYSKEY")
{
scanNaCol = scanColArray.getColumn("@SYSKEY");
}
else
{
scanNaCol = scanColArray.getColumn(colName);
}
CMPASSERT(scanNaCol != NULL)
// Get the position of this column in the Scan table.
Lng32 scanColPos = scanNaCol->getPosition();
// Get the Scan BaseColumn.
scanCol = scanNode->getTableDesc()->getColumnList()[scanColPos].getItemExpr();
}
ItemExpr *newKeyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, guCol, scanCol);
newKeyPred->bindNode(bindWA);
beginKeyPred().insert(newKeyPred->getValueId());
updateToSelectMap().addMapEntry(
newKeyPred->child(0)->getValueId(),
newKeyPred->child(1)->getValueId());
} // loop over key columns
// All of the indexes also require expressions that can be used to
// form the primary key of the index to update/delete. Create these
// item expressions here.
// (From here to the end of the loop over indexes structurally resembles
// GenericUpdate::imBindAllIndexes(), but has significant differences.)
//
// Remember the value ID's of the scan node index columns for
// code generation time.
//
if ((this->getOperatorType() == REL_UNARY_UPDATE) && isScanOnDifferentTable)
{
setScanIndexDesc(NULL); // for triggers
}
else
{
setScanIndexDesc(scanNode->getTableDesc()->getClusteringIndex());
}
} // REL_UNARY_UPDATE or REL_UNARY_DELETE
// QSTUFF
// we need to check whether this code is executed as part of a create view
// ddl operation using bindWA->inDDL() and prevent indices, contraints and
// triggers to be added as the catalog manager binding functions cannot
// handle it right now
// QSTUFF
// QSTUFF hack !
if (getGroupAttr()->isEmbeddedUpdate())
bindWA->setNameLocListPtr(pLoc);
bindWA->setInGenericUpdate(inGenericUpdate);
// QSTUFF
// set flag that we are binding an Insert/Update/Delete operation
// Used to disable Join optimization when necessary
bindWA->setBindingIUD();
return boundExpr;
} // GenericUpdate::bindNode()
NABoolean GenericUpdate::checkForMergeRestrictions(BindWA *bindWA)
{
if (!isMerge())
return FALSE;
ValueIdList tempVIDlist;
getTableDesc()->getIdentityColumn(tempVIDlist);
NAColumn *identityCol = NULL;
if (tempVIDlist.entries() > 0)
{
ValueId valId = tempVIDlist[0];
identityCol = valId.getNAColumn();
}
// MERGE on a table with BLOB columns is not supported
if (getTableDesc()->getNATable()->hasLobColumn())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" LOB column not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if (getTableDesc()->hasUniqueIndexes() &&
(CmpCommon::getDefault(MERGE_WITH_UNIQUE_INDEX) == DF_OFF))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" unique indexes not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if ((accessOptions().accessType() == SKIP_CONFLICT_) ||
(getGroupAttr()->isStream()) ||
(newRecBeforeExprArray().entries() > 0)) // set on rollback
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Stream, skip conflict or SET ON ROLLBACK not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Embedded update/deletes not allowed.");
bindWA->setErrStatus();
return TRUE;
}
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
{
if (getInliningInfo().hasTriggers())
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Triggers not allowed.");
bindWA->setErrStatus();
return TRUE;
}
}
return FALSE;
}
// This class LeafInsert and its companion LeafDelete
// are currently used only by Index Maintenance,
// but we ought not make any assumptions.
// ##IM: It might be useful to add a flag such as GenericUpdate::isIndexTable_
// ##IM: and set it to TRUE in createIMNode().
//
RelExpr *LeafInsert::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
#ifndef NDEBUG
if (GU_DEBUG) cerr << "\nLeafInsert " << getUpdTableNameText() << endl;
#endif
setInUpdateOrInsert(bindWA, this, REL_INSERT);
if (getPreconditionTree()) {
ValueIdSet pc;
getPreconditionTree()->convertToValueIdSet(pc, bindWA, ITM_AND);
if (bindWA->errStatus())
return this;
setPreconditionTree(NULL);
setPrecondition(pc);
}
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
// Make newRecExprArray_ be an ordered set of assign nodes of the form
// "ixcol1 = basetblcol1, ixcol2 = basecol2, ..." (for Index Maintenance)
// Note: For SQL/MP tables, ixcol0 is the keytag, and will need to be
// handled differently from other columns.
const ValueIdList &tgtcols = getTableDesc()->getColumnList();
CMPASSERT(tgtcols.entries() == baseColRefs().entries());
for (CollIndex i = 0; i < tgtcols.entries(); i++) {
Assign *assign;
assign = new (bindWA->wHeap())
Assign(tgtcols[i].getItemExpr(), baseColRefs()[i], FALSE);
assign->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
newRecExprArray().insertAt(i, assign->getValueId());
newRecExpr().insert(assign->getValueId());
updateToSelectMap().addMapEntry(assign->getTarget(), assign->getSource());
}
// RelExpr::bindSelf (in GenericUpdate::bindNode) has done this line, but now
// any outer refs discovered in bindNode's in the above loop must be added.
// For Index Maintenance, these must be exactly the set of baseColRefs vids
// (all the target index cols are from the locally-scoped RETDesc left by
// the GenericUpdate::bindNode).
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
// The NATable of getTableName() had been set to INDEX_TABLE so that
// getNATable would search the right namespace.
// Now we make the Optimizer treat this as a regular table, not an index
// (in particular, don't have it choose VSBB sidetree-insert).
//
// The TableDesc setting may be redundant/unnecessary, but we do it
// for completeness and safety.
//
// -- Triggers
// If it is NOT an index table (like maybe a TRIGTEMP_TABLE), leave it alone
if (getTableName().getSpecialType() == ExtendedQualName::INDEX_TABLE)
{
getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE);
getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE);
}
setInUpdateOrInsert(bindWA);
return boundExpr;
} // LeafInsert::bindNode()
RelExpr *LeafDelete::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
#ifndef NDEBUG
if (GU_DEBUG) cerr << "\nLeafDelete " << getUpdTableNameText() << endl;
#endif
if (getPreconditionTree()) {
ValueIdSet pc;
getPreconditionTree()->convertToValueIdSet(pc, bindWA, ITM_AND);
if (bindWA->errStatus())
return this;
setPreconditionTree(NULL);
setPrecondition(pc);
}
RelExpr *boundExpr = GenericUpdate::bindNode(bindWA);
if (bindWA->errStatus()) return boundExpr;
//Set the beginKeyPred
if (TriggersTempTable *tempTableObj = getTrigTemp())
{
const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey();
ItemExpr *keyExpr;
// Normal case - use the UniqueExecuteId builtin function.
keyExpr = new(bindWA->wHeap()) UniqueExecuteId();
ItemExpr *tempKeyPred = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[0].getItemExpr(), keyExpr);
tempKeyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(tempKeyPred->getValueId());
// Create the ItemExpr for the constant UniqueIudNum
ItemExpr *col2 = new(bindWA->wHeap())
ColReference(new(bindWA->wHeap()) ColRefName(UNIQUEIUD_COLUMN));
// Compare it to the correct offset.
BindWA::uniqueIudNumOffset offset = BindWA::uniqueIudNumForInsert ;
ItemExpr *iudConst = new(bindWA->wHeap()) ConstValue(bindWA->getUniqueIudNum(offset));
ItemExpr *predIudId = new(bindWA->wHeap()) BiRelat(ITM_EQUAL, keycols[1].getItemExpr(), iudConst);
predIudId->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(predIudId->getValueId());
for (CollIndex i = 2; i<keycols.entries(); i++)
{
ItemExpr *keyPred = NULL;
ItemExpr *keyItemExpr = keycols[i].getItemExpr();
ItemExpr *baseItemExpr = NULL;
Lng32 keyColPos = keycols[i].getNAColumn()->getPosition();
baseItemExpr = baseColRefs()[keyColPos];
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr);
keyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(keyPred->getValueId());
}
}
else
{
const ValueIdList &keycols = getTableDesc()->getClusteringIndex()->getIndexKey();
for (CollIndex i = 0; i < keycols.entries() ; i++)
{
ItemExpr *keyPred = 0;
ItemExpr *keyItemExpr = keycols[i].getItemExpr();
Lng32 keyColPos = keycols[i].getNAColumn()->getPosition();
ItemExpr *baseItemExpr = NULL;
// For a unique index (for undo) we are passing in all the index
// columns in baseColRefs. So we need to find the index key col
// position in the index col list and compare the key columns with
// it's corresponding column in the index column list
if (isUndoUniqueIndex())
baseItemExpr = baseColRefs()[keyColPos];
else
baseItemExpr = baseColRefs()[i];
keyPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, keyItemExpr, baseItemExpr);
keyPred->bindNode(bindWA);
if (bindWA->errStatus()) return NULL;
beginKeyPred().insert(keyPred->getValueId());
}
}
if (isUndoUniqueIndex())
{
setUpExecPredForUndoUniqueIndex(bindWA) ;
}
if (getTrigTemp())
{
setUpExecPredForUndoTempTable(bindWA);
}
// See LeafInsert::bindNode for comments on remainder of this method.
getGroupAttr()->addCharacteristicInputs(bindWA->getCurrentScope()->getOuterRefs());
getTableName().setSpecialType(ExtendedQualName::NORMAL_TABLE);
getTableDesc()->getCorrNameObj().setSpecialType(ExtendedQualName::NORMAL_TABLE);
return boundExpr;
} // LeafDelete::bindNode()
void LeafDelete::setUpExecPredForUndoUniqueIndex(BindWA *bindWA)
{
// Set up the executor predicate . Used in the case of Undo to undo the
// exact row that caused an error.Note that if we used only the key
// columns to undo, we may end up undoing existing rows .
// This is done only for unique indexes
ItemExpr *execPred = NULL;
const ValueIdList &indexCols = getTableDesc()->getClusteringIndex()->getIndexColumns();
for ( CollIndex i = 0; i < indexCols.entries(); i++)
{
execPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, indexCols[i].getItemExpr(), baseColRefs()[i]);
execPred->bindNode(bindWA);
if (bindWA->errStatus()) return ;
executorPred() += execPred->getValueId();
}
return;
}
void LeafDelete::setUpExecPredForUndoTempTable(BindWA *bindWA)
{
ItemExpr *execPred = NULL;
const ValueIdList &tempCols = getTableDesc()->getClusteringIndex()->getIndexColumns();
for ( CollIndex i = 0; i < tempCols.entries(); i++)
{
NAString colName(tempCols[i].getNAColumn()->getColName());
if (colName.data()[0] == '@' && colName.compareTo("@SYSKEY"))
continue;
execPred = new (bindWA->wHeap())
BiRelat(ITM_EQUAL, tempCols[i].getItemExpr(), baseColRefs()[i]);
execPred->bindNode(bindWA);
if (bindWA->errStatus()) return;
executorPred() += execPred->getValueId();
}
return;
}
// -----------------------------------------------------------------------
// RelRoutine
// -----------------------------------------------------------------------
// LCOV_EXCL_START - rfi
RelExpr *RelRoutine::bindNode(BindWA *bindWA)
{
CMPASSERT(0); // For the time being, all classes above implement their own.
//
// Allocate an RETDesc and attach it to this and the BindScope.
// Needs to occur in later classes when we know if we are at table
// type or not..
// XXX setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
// bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
//
// XXX done in later clasees
// getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // RelRoutine::bindNode()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// BuiltinTableValuedFunction
// will be called by
// ExplainFunc and StatisticsFunc
// Rely on function implementation in TableValuedFunction
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Explain/Statistics/HiveMD Func
// -----------------------------------------------------------------------
RelExpr *BuiltinTableValuedFunction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
//
// Check if there is already an NATable for the Explain/Statistics table.
//
if (getOperatorType() == REL_EXPLAIN ||
getOperatorType() == REL_STATISTICS ||
getOperatorType() == REL_HIVEMD_ACCESS ||
getOperatorType() == REL_HBASE_ACCESS)
{
NATable *naTable = NULL;
if (getOperatorType() == REL_HBASE_ACCESS)
{
// should not reach here
CMPASSERT(0);
}
else
{
CorrName corrName(getVirtualTableName());
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
NATable *naTable = bindWA->getSchemaDB()->getNATableDB()->
get(&corrName.getExtendedQualNameObj());
if (NOT naTable)
{
desc_struct *tableDesc = createVirtualTableDesc();
if (tableDesc)
naTable = bindWA->getNATable(corrName, FALSE/*catmanUsages*/, tableDesc);
if ( ! tableDesc || bindWA->errStatus() )
return this;
}
// Allocate a TableDesc and attach it to this.
//
TableDesc * td = bindWA->createTableDesc(naTable, corrName);
if (! td || bindWA->errStatus())
return this;
setTableDesc(td);
if (bindWA->errStatus())
return this;
}
if (getProcAllParamsTree())
{
((ItemExpr *)getProcAllParamsTree())->convertToValueIdList(getProcAllParamsVids(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus())
return this;
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
Lng32 sqlcode = 0;
if (getProcAllParamsVids().entries() != numParams())
{
sqlcode = -4067;
// 4067 Explain/Statistics requires two operands, of type character.
*CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError());
bindWA->setErrStatus();
return NULL;
}
// type any param arguments to fixed char since runtime explain
// expects arguments to be fixed char.
Lng32 len = (Lng32)CmpCommon::getDefaultNumeric(VARCHAR_PARAM_DEFAULT_SIZE);
SQLChar c(len);
for (Lng32 i = 0; i < numParams(); i++)
{
getProcAllParamsVids()[i].coerceType(c, NA_CHARACTER_TYPE);
if (getProcAllParamsVids()[i].getType().getTypeQualifier() != NA_CHARACTER_TYPE)
{
sqlcode = -4067;
// 4067 Explain/Statistics requires two operands, of type character.
*CmpCommon::diags() << DgSqlCode(sqlcode) << DgString0(getTextForError());
bindWA->setErrStatus();
return NULL;
}
const NAType &typ = getProcAllParamsVids()[i].getType();
CharInfo::CharSet chld_cs = ((const CharType&)typ).getCharSet();
ItemExpr *ie;
if ( chld_cs == CharInfo::UNICODE )
{
ie = new (bindWA->wHeap()) Translate(
getProcAllParamsVids()[i].getItemExpr(),
Translate::UNICODE_TO_ISO88591);
ie = ie->bindNode(bindWA);
getProcAllParamsVids()[i] = ie->getValueId();
}
if (bindWA->errStatus())
return NULL;
// For Explain and Statistics all parameters are inputs
getProcInputParamsVids().insert(getProcAllParamsVids());
} // for
}
} // if
return TableValuedFunction::bindNode(bindWA);
}
// -----------------------------------------------------------------------
// TableValuedFunction
// -----------------------------------------------------------------------
RelExpr *TableValuedFunction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
//
// Allocate an RETDesc and attach it to this and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // TableValuedFunction::bindNode()
// -----------------------------------------------------------------------
// Member functions for classes Control*
// must be written allowing for a NULL BindWA to be passed in!
//
// This happens when called from the SQLC/SQLCO Preprocessor,
// which needs to bind certain "static-only" statements --
// those which evaluate to STATIC_ONLY_WITH_WORK_FOR_PREPROCESSOR --
// see ControlAbstractClass::isAStaticOnlyStatement().
// -----------------------------------------------------------------------
RelExpr * ControlAbstractClass::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// Early return if called by SQLC/SQLCO Preprocessor
if (!bindWA) return this;
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return bindSelf(bindWA);
} // ControlAbstractClass::bindNode()
RelExpr * ControlQueryShape::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// remember the required shape in the control table
if (alterArkcmpEnvNow())
{
if (getShape())
ActiveControlDB()->setRequiredShape(this);
else
{
// no shape passed in. Hold or Restore.
if (holdShape())
ActiveControlDB()->saveCurrentCQS();
else
ActiveControlDB()->restoreCurrentCQS();
if (ActiveControlDB()->getRequiredShape())
ActiveControlDB()->getRequiredShape()->holdShape() = holdShape();
}
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlQueryShape::bindNode()
RelExpr * ControlQueryDefault::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Alter the current Defaults settings if this is a static CQD.
//
// "AffectYourself" is coming to you courtesy of the Staple Singers:
// 'Affect yourself, na na na, na na na na, affect yourself, re re re re.'
// It's neat to find such Binder-relevant lyrics, eh?
//
NABoolean affectYourself = alterArkcmpEnvNow();
assert(!bindWA || bindWA->getSchemaDB() == ActiveSchemaDB());
NADefaults &defs = ActiveSchemaDB()->getDefaults();
defs.setState(NADefaults::SET_BY_CQD);
if ( defs.isReadonlyAttribute(token_) == TRUE )
{
Int32 attrNum = defs.lookupAttrName(token_);
if (stricmp(value_, defs.getValue(attrNum)) != 0 )
{
if (CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_OFF)
{
if (bindWA) bindWA->setErrStatus();
*CmpCommon::diags() << DgSqlCode(-4130) << DgString0(token_);
return NULL;
}
}
}
if (holdOrRestoreCQD_ == 0)
{
attrEnum_ = affectYourself ? defs.validateAndInsert(token_, value_, reset_)
: defs.validate (token_, value_, reset_);
if (attrEnum_ < 0)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
// remember this control in the control table
if (affectYourself)
ActiveControlDB()->setControlDefault(this);
}
else if ((holdOrRestoreCQD_ > 0) && (affectYourself))
{
attrEnum_ = defs.holdOrRestore(token_, holdOrRestoreCQD_);
if (attrEnum_ < 0)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlQueryDefault::bindNode()
RelExpr * ControlTable::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
CMPASSERT(bindWA); // can't handle it yet if called from SQLC Preprocessor
// remember this control in the control table
tableName_->applyDefaults(bindWA, bindWA->getDefaultSchema());
NABoolean ok = alterArkcmpEnvNow() ?
ActiveControlDB()->setControlTableValue(this) :
ActiveControlDB()->validate(this);
if (NOT ok)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlTable::bindNode()
RelExpr * ControlSession::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// remember this control in the control session
NABoolean ok = alterArkcmpEnvNow() ?
ActiveControlDB()->setControlSessionValue(this) :
ActiveControlDB()->validate(this);
if (NOT ok)
{
if (bindWA) bindWA->setErrStatus();
return NULL;
}
return ControlAbstractClass::bindNode(bindWA);
} // ControlSession::bindNode()
RelExpr * SetSessionDefault::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (getOperatorType() == REL_SET_SESSION_DEFAULT)
{
// trim leading and trailing spaces from token_ and value_
// and upcase token
token_ = token_.strip(NAString::both);
value_ = value_.strip(NAString::both);
token_.toUpper();
// TBD: perhaps add a component privilege that allows others
// to set parserflags
if ((token_ == "SET_PARSERFLAGS") ||
(token_ == "RESET_PARSERFLAGS"))
{
if (!ComUser::isRootUserID())
{
*CmpCommon::diags() << DgSqlCode(-1017);
bindWA->setErrStatus();
return this;
}
}
}
return ControlAbstractClass::bindNode(bindWA);
} // SetSessionDefault::bindNode()
// -----------------------------------------------------------------------
// member function for class RelSetTimeout
// -----------------------------------------------------------------------
RelExpr * RelSetTimeout::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) return this;
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
if (timeoutValueExpr_) { // bind the timeout-value expression
timeoutValueExpr_->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
if ( ! strcmp("*", userTableName_.getCorrNameAsString()) )
isForAllTables_ = TRUE ;
HostVar *proto = userTableName_.getPrototype() ;
// Check for the not-supported "SET STREAM TIMEOUT" on a specific stream
if ( isStream_ && ! isForAllTables_ ) {
*CmpCommon::diags() << DgSqlCode(-3187);
bindWA->setErrStatus();
return this;
}
if ( isForAllTables_ ) { /* do nothing */ }
else if ( proto ) { // it is a HOSTVAR or DEFINE
userTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
CMPASSERT ( proto->isPrototypeValid() ) ;
userTableName_.getPrototype()->bindNode(bindWA);
} else { // i.e., an explicit table name was specified
// Get the NATable for this table.
NATable *naTable = bindWA->getNATable(userTableName_, FALSE);
if (bindWA->errStatus()) return this; // e.g. error: table does not exist
if ( naTable->getViewText() ) { // can not set lock timeout on a view
*CmpCommon::diags() << DgSqlCode(-3189);
bindWA->setErrStatus();
return this;
}
// Extract and keep the physical file name
const NAFileSet * clstInd = naTable->getClusteringIndex() ;
setPhysicalFileName( clstInd->getFileSetName().getQualifiedNameAsString().data() );
}
// Bind the base class.
return bindSelf(bindWA);
}
// -----------------------------------------------------------------------
// member functions for class Describe
// (see sqlcomp/CmpDescribe.cpp for execution of the request)
// -----------------------------------------------------------------------
RelExpr *Describe::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// SHOWCONTROL DEFAULT "magic string"; -- see ShowSchema.h and ExSqlComp.cpp
if (getFormat() == CONTROL_DEFAULTS_) {
if (getDescribedTableName().getQualifiedNameObj().getObjectName() ==
ShowSchema::ShowControlDefaultSchemaMagic())
{
// Return info in an error message (a warning msg doesn't cut it).
const SchemaName &catsch = bindWA->getDefaultSchema();
NAString cat(catsch.getCatalogNameAsAnsiString(),bindWA->wHeap());
NAString sch(catsch.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap());
//
if (SqlParser_NAMETYPE == DF_NSK) {
// LCOV_EXCL_START - nsk
// The cat & sch from the BindWA are really from MPLOC.
// Get the real ANSI cat & sch, prepending them to the strings
// and put the MPLOC info in parens.
const SchemaName &csAnsi = ActiveSchemaDB()->getDefaultSchema();
NAString cAnsi(csAnsi.getCatalogNameAsAnsiString(),bindWA->wHeap());
NAString sAnsi(csAnsi.getUnqualifiedSchemaNameAsAnsiString(),bindWA->wHeap());
cat.prepend(cAnsi + " (");
cat += ")";
sch.prepend(sAnsi + " (");
sch += ")";
// LCOV_EXCL_STOP
}
*CmpCommon::diags() << DgSqlCode(-ABS(ShowSchema::DiagSqlCode()))
<< DgCatalogName(cat) << DgSchemaName (sch);
bindWA->setErrStatus();
return this;
}
if (getDescribedTableName().getQualifiedNameObj().getObjectName() ==
GetControlDefaults::GetExternalizedDefaultsMagic())
{
// Return info in an error message (a warning msg doesn't cut it).
NAString cqdPairs(bindWA->wHeap());
size_t lenN, lenV;
char lenbufN[10], lenbufV[10];
const char *nam, *val;
NADefaults &defs = bindWA->getSchemaDB()->getDefaults();
for (CollIndex i = 0; i < defs.numDefaultAttributes(); i++ ) {
if (defs.getCurrentDefaultsAttrNameAndValue(i, nam, val, TRUE)) {
lenN = strlen(nam);
lenV = strlen(val);
CMPASSERT(lenN <= 999 && lenV <= 999); // %3d coming up next
sprintf(lenbufN, "%3d", (UInt32)lenN);
sprintf(lenbufV, "%3d", (UInt32)lenV);
cqdPairs += NAString(lenbufN) + nam + lenbufV + val;
}
}
*CmpCommon::diags()
<< DgSqlCode(-ABS(GetControlDefaults::DiagSqlCode()))
<< DgString0(cqdPairs);
bindWA->setErrStatus();
return this;
}
}
// Create a descriptor for a virtual table to look like this:
//
// CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000) NOT NULL);
// For SeaQuest Unicode:
// CREATE TABLE DESCRIBE__ (DESCRIBE__COL VARCHAR(3000 BYTES) CHARACTER SET UTF8 NOT NULL);
//
#define MAX_DESCRIBE_LEN 3000 // e.g., SQL/MP Views.ViewText column
// readtabledef_allocate_desc requires that HEAP (STMTHEAP) be used for new's!
desc_struct * table_desc = readtabledef_allocate_desc(DESC_TABLE_TYPE);
table_desc->body.table_desc.tablename = new HEAP char[strlen("DESCRIBE__")+1];
strcpy(table_desc->body.table_desc.tablename, "DESCRIBE__");
// see nearly identical code below for indexes file desc
desc_struct * files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE);
table_desc->body.table_desc.files_desc = files_desc;
files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE;
Lng32 colnumber = 0, offset = 0;
desc_struct * column_desc = readtabledef_make_column_desc(
table_desc->body.table_desc.tablename,
"DESCRIBE__COL",
colnumber, // INOUT
REC_BYTE_V_ASCII,
MAX_DESCRIBE_LEN,
offset); // INOUT
column_desc->body.columns_desc.character_set = CharInfo::UTF8;
column_desc->body.columns_desc.encoding_charset = CharInfo::UTF8;
table_desc->body.table_desc.colcount = colnumber;
table_desc->body.table_desc.record_length = offset;
desc_struct * index_desc = readtabledef_allocate_desc(DESC_INDEXES_TYPE);
index_desc->body.indexes_desc.tablename = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.indexname = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.ext_indexname = table_desc->body.table_desc.tablename;
index_desc->body.indexes_desc.keytag = 0; // primary index
index_desc->body.indexes_desc.record_length = table_desc->body.table_desc.record_length;
index_desc->body.indexes_desc.colcount = table_desc->body.table_desc.colcount;
index_desc->body.indexes_desc.blocksize = 4096; // anything > 0
// Cannot simply point to same files desc as the table one,
// because then ReadTableDef::deleteTree frees same memory twice (error)
desc_struct * i_files_desc = readtabledef_allocate_desc(DESC_FILES_TYPE);
index_desc->body.indexes_desc.files_desc = i_files_desc;
i_files_desc->body.files_desc.fileorganization = KEY_SEQUENCED_FILE;
desc_struct * key_desc = readtabledef_allocate_desc(DESC_KEYS_TYPE);
key_desc->body.keys_desc.indexname = index_desc->body.indexes_desc.indexname;
key_desc->body.keys_desc.keyseqnumber = 1;
key_desc->body.keys_desc.tablecolnumber = 0;
key_desc->body.keys_desc.ordering= 0;
index_desc->body.indexes_desc.keys_desc = key_desc;
table_desc->body.table_desc.columns_desc = column_desc;
table_desc->body.table_desc.indexes_desc = index_desc;
//
// Get the NATable for this object.
//
CorrName corrName("DESCRIBE__");
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
NATable *naTable = bindWA->getNATable(corrName, FALSE/*CatBind*/, table_desc);
if (bindWA->errStatus())
return this;
//
// Allocate a TableDesc (which is not the table_desc we just constructed)
// and attach it to the Scan node.
//
setTableDesc(bindWA->createTableDesc(naTable, corrName));
if (bindWA->errStatus())
return this;
//
// Allocate an RETDesc and attach it to the Scan node and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
//
// Bind the described table CorrName member, the children, and the base class.
//
if (! describedTableName_.getQualifiedNameObj().getObjectName().isNull())
{
if ((getFormat() >= CONTROL_FIRST_) &&
(getFormat() <= CONTROL_LAST_))
{
describedTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
}
else
{
// do not override schema for showddl
bindWA->setToOverrideSchema(FALSE);
// if this is a showlabel command on a resource fork,
// but the describedTableName
// is not a fully qualified rfork name, then get the rfork name
// for the specified table.
if ((getFormat() == Describe::LABEL_) &&
(describedTableName_.getSpecialType() == ExtendedQualName::RESOURCE_FORK) &&
(describedTableName_.getLocationName().isNull()))
{
describedTableName_.setSpecialType(ExtendedQualName::NORMAL_TABLE);
NATable *naTable = bindWA->getNATable(describedTableName_);
if (NOT bindWA->errStatus())
{
// replace the describedTableName with its rfork name.
describedTableName_.setSpecialType(ExtendedQualName::RESOURCE_FORK);
NAString rforkName = naTable->getClusteringIndex()->getFileSetName().getQualifiedNameAsString();
char * rforkNameData = (char*)(rforkName.data());
rforkNameData[rforkName.length()-1] += 1;
describedTableName_.setLocationName(rforkName);
}
}
// check if we need to consider public schema before
// describedTableName_ is qualified by getNATable
if (describedTableName_.getQualifiedNameObj().getSchemaName().isNull())
setToTryPublicSchema(TRUE);
bindWA->getNATable(describedTableName_);
if (bindWA->errStatus())
{
// if volatile related error, return it.
// Otherwise, clear diags and let this error be caught
// when describe is executed.
if ((CmpCommon::diags()->mainSQLCODE() == -4190) ||
(CmpCommon::diags()->mainSQLCODE() == -4191) ||
(CmpCommon::diags()->mainSQLCODE() == -4192) ||
(CmpCommon::diags()->mainSQLCODE() == -4193) ||
(CmpCommon::diags()->mainSQLCODE() == -4155) || // define not supported
(CmpCommon::diags()->mainSQLCODE() == -4086) || // catch Define Not Found error
(CmpCommon::diags()->mainSQLCODE() == -30044)) // default schema access error
return this;
CmpCommon::diags()->clear();
bindWA->resetErrStatus();
}
}
if (pUUDFName_ NEQ NULL AND NOT pUUDFName_->getObjectName().isNull())
{
pUUDFName_->applyDefaults(bindWA->getDefaultSchema());
}
}
bindChildren(bindWA);
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return boundExpr;
//
// Assign the set of columns that belong to the table to be scanned
// as the output values that can be produced by this scan.
//
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // Describe::bindNode()
// -----------------------------------------------------------------------
// member functions for class RelLock
// -----------------------------------------------------------------------
RelExpr * RelLock::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// do not do override schema for this
bindWA->setToOverrideSchema(FALSE);
// Get the NATable for this object.
NATable *naTable = bindWA->getNATable(getTableName());
if (bindWA->errStatus())
return this;
NABoolean isView = !!naTable->getViewText();
if (isView && !naTable->isAnMV())
{
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Views");
bindWA->setErrStatus();
return this;
}
else
{
baseTableNameList_.insert((CorrName *)getPtrToTableName());
}
Int32 locSpec = 0;
NAString tabNames(bindWA->wHeap());
for (CollIndex i = 0; i < baseTableNameList_.entries(); i++) {
naTable = bindWA->getNATable(*baseTableNameList_[i]);
if (bindWA->errStatus())
return this;
// Genesis 10-990212-6908:
// Ignore the user-specified correlation name --
// use just the 3-part tblname (and any LOCATION clause, etc).
// Then, insert only unique names into tabIds_ --
// to prevent XTNM duplicates (errmsg 4056)
// when multiple layered views reference the same table or corr-name.
CorrName bt(*baseTableNameList_[i]);
bt.setCorrName("");
NABoolean haveTDforThisBT = FALSE;
for (CollIndex j = 0; j < tabIds_.entries(); j++) {
if (bt == tabIds_[j]->getCorrNameObj()) {
haveTDforThisBT = TRUE;
break;
}
}
if (!haveTDforThisBT) {
if (bt.isLocationNameSpecified()) locSpec++;
tabNames += NAString(", ") +
bt.getQualifiedNameObj().getQualifiedNameAsAnsiString();
tabIds_.insert(bindWA->createTableDesc(naTable, bt));
if (bindWA->errStatus()) return this;
}
}
if (tabIds_.entries() > 1) {
CMPASSERT(locSpec == 0);
tabNames.remove(0, 2); // remove leading ", "
// Warning 4124: More than one table will be locked: $0~String0.
// (warning, so user realizes the effects of this command
// when run on a view which joins tables...).
*CmpCommon::diags() << DgSqlCode(+4124) << DgString0(tabNames);
}
if ((isView) ||
(tabIds_.entries() > 1) ||
(baseTableNameList_.entries() > 1) ||
(CmpCommon::getDefault(ATTEMPT_ESP_PARALLELISM) == DF_OFF))
{
parallelExecution_ = FALSE;
}
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// Bind the base class.
return bindSelf(bindWA);
} // RelLock::bindNode()
// -----------------------------------------------------------------------
// member functions for class RelTransaction
// -----------------------------------------------------------------------
RelExpr * RelTransaction::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Allocate an empty RETDesc and attach it to this node and the BindScope.
setRETDesc(new(bindWA->wHeap()) RETDesc(bindWA));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
if (diagAreaSizeExpr_) {
diagAreaSizeExpr_->bindNode(bindWA);
if (bindWA->errStatus()) return this;
}
// "mode_" is NULL if BEGIN/COMMIT/ROLLBACK WORK, nonNULL if SET TRANSACTION.
if (mode_) {
if ((mode_->autoCommit() != TransMode::AC_NOT_SPECIFIED_) ||
(mode_->getAutoBeginOn() != 0) ||
(mode_->getAutoBeginOff() != 0))
{
CMPASSERT(mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_ &&
mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_);
}
else
{
// See Ansi 14.1, especially SR 4.
// Similar code must be maintained in
// comexe/ExControlArea::addControl() and NADefaults::validateAndInsert().
// SET TRANSACTION w/o specifying ISOLATION LEVEL reverts TransMode to
// the NADefaults setting of ISOLATION_LEVEL
// (which the user should set to SERIALIZABLE if they want
// SET TRANSACTION to be Ansi conformant).
if (mode_->isolationLevel() == TransMode::IL_NOT_SPECIFIED_)
{
if (CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES) == DF_NONE)
bindWA->getSchemaDB()->getDefaults().getIsolationLevel(
mode_->isolationLevel()); // short int
else
bindWA->getSchemaDB()->getDefaults().getIsolationLevel(
mode_->isolationLevel(), // short int
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
}
if (mode_->accessMode() == TransMode::AM_NOT_SPECIFIED_)
mode_->updateAccessModeFromIsolationLevel(
mode_->getIsolationLevel()); // enum
// 3114 Transaction access mode RW is incompatible with isolation level RU
if (mode_->accessMode() == TransMode::READ_WRITE_ &&
mode_->isolationLevel() == TransMode::READ_UNCOMMITTED_) {
*CmpCommon::diags() << DgSqlCode(-3114);
bindWA->setErrStatus();
return this;
}
if (mode_->rollbackMode() == TransMode::ROLLBACK_MODE_NOT_SPECIFIED_)
mode_->rollbackMode() = TransMode::ROLLBACK_MODE_WAITED_ ;
// 4352 -
if (mode_->multiCommit() == TransMode::MC_ON_)
{
if (mode_->invalidMultiCommitCompatibility())
{
*CmpCommon::diags() << DgSqlCode(-4352);
bindWA->setErrStatus();
return this;
}
}
}
} // SET TRANSACTION stmt
// Bind the base class.
return bindSelf(bindWA);
}
// Transpose::bindNode - Bind the transpose node.
// Coming into the node (from the parser) there are two ItemExpr Trees:
//
// keyCol_: The ItemExpr contains a ColReference to the key column which
// is added by the transpose node. This pointer ia set to NULL by bindNode.
// If keyCol_ is NULL coming into the bindNode, then no key Column is
// generated for this transpose.
//
// transValsTree_: This ItemExpr tree contains a list of pairs which is
// NULL terminated (for ease of processing). Each pair contains in child(0),
// a list of transpose items for a given transpose set and in child(1), a
// list of ColReferences to the new value columns associated with this
// transpose set. A transpose item is a list of value expressions.
// This pointer is set to NULL by bindNode.
//
// For Example:
//
// SELECT *
// FROM Table
// TRANSPOSE A,B AS C1
// X,Y,Z as C2
// (1,'hello'),(2,'world') AS (C3, C4)
// KEY BY K1
//
// For the above query, after parsing, the TRANSPOSE node will look like:
//
// TRANSPOSE
// keyCol_ transValsTree_
// | |
// K1 O------O---------O---NULL
// | | |
// O O O--O
// |\ |\ | |\
// O C1 O C2 | C3 C4
// |\ |\ O---------O---NULL
// A O X O | |
// |\ |\ O O
// B NULL Y O |\ |\
// |\ 1 'hello' 2 'world'
// Z NULL
//
// O - represent ITM_LIST nodes.
//
// bindNode binds this structure to form a new structure contained in
// the vector of ValueIdLists, transUnionVector_.
//
// transUnionVector_: This is a vector of ValueIdLists. There is one entry
// for each transpose set, plus one entry for the key values. Each entry
// contains a list of ValueIdUnion Nodes. The first entry contains a list
// with one ValueIdUnion node. This node is for the Const. Values (1 - N)
// representing the Key Values. The other entries contain lists of
// ValueIdUnion nodes for the Transposed Values. Each of these entries of
// the vector represent a transpose set. If the transpose set contains a
// list of values, then there will be only one ValueIdUnion node in the
// list. If the transpose set contains a list of lists of values, then
// there will be as many ValueIdUnion nodes as there are items in the
// sublists. (see example below.)
// transUnionVector_ is generated in bindNode().
//
// transUnionVectorSize_: This is the number of entries in transUnionVector_.
//
// For the above query, after binding, the TRANSPOSE node will look like:
//
// TRANSPOSE
// transUnionVectorSize_: 4
// transUnionVector_:
// ValueIdUnion(1,2,3,4,5,6,7)
// ValueIdUnion(A,B)
// ValueIdUnion(X,Y,Z)
// ValueIdUnion(1,2) , ValueIdUnion('hello','world')
//
//
RelExpr *Transpose::bindNode(BindWA *bindWA)
{
// If this node has already been bound, we are done.
//
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
BindContext *curContext = bindWA->getCurrentScope()->context();
curContext->inTransposeClause() = TRUE;
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// At this point the Transpose relational operator has two or three
// expressions:
// keyCol_ --- A ColReference to the new keyCol. (possibly NULL)
// transValsTree_ --- expressions for the transposed values and their
// ColReferences.
//
// transpose::bindNode() performs the following steps:
//
// 1 - Construct a list of transpose set expressions
// and a list of ColReferences associated with each transpose set
// expression.
//
// 2 - Allocate a return descriptor and add the columns from the
// childs descriptor to it.
//
// 3 - Allocate the transUnionVector_
//
// 4 - Construct a ValueIdUnion node for the Key Values. Bind this node.
// Add the keyColName to the return descriptor with the valueId of this
// node. Add the valueId of this node as the first entry of
// a ValueIdList in the first entry of transUnionVector_.
//
// 5 - For each transpose set, Construct as many ValueIdUnion nodes as
// there are values in each item of the transpose set. Within a
// given transpose set, the number of values per item must be the
// same. In the example above, the third transpose set contains the
// items (1, 'hello') and (2, 'world'). These both have two values per
// item. The others all have 1 value per item. The ValueIdUnions
// generated will contain the i'th value from each item. Bind each
// of these ValueUnionId nodes. Add the value column name to the
// return descriptor with the valueId of this node. Add the valueId
// of this node the ValueIdList in the proper entry of
// transUnionVector_.
//
// 6 - Set the return descriptor, and bindSelf.
//
CollIndex i, j, k;
CollIndex numTransSets = 0;
// Get a pointer to the head of this list of pairs.
// This is the last time we will have to reference this tree.
//
ItemExpr *transTree = (ItemExpr *)removeTransValsTree();
// Allocate two ItemExpr Lists. One for the list of lists of (lists of)
// expressions. And the other for the list of (lists of) ColReferences.
//
ItemExprList transSetsList(bindWA->wHeap());
ItemExprList newColsList(bindWA->wHeap());
// Populate these lists and
// determine how many transpose sets there are in this tree.
// In the example above, there should be three.
//
while (transTree) {
transSetsList.insert(transTree->child(0)->child(0));
newColsList.insert(transTree->child(0)->child(1));
numTransSets++;
transTree = transTree->child(1);
}
// Must have at least one value expression in the transpose values list.
//
CMPASSERT(numTransSets > 0);
// Using the example above, at this point:
//
// transSetsList newColsList
// | | | | | |
// O O O---------O---NULL C1 C2 O
// |\ |\ | | |\
// A O X O O O C3 C4
// |\ |\ |\ |\
// B NULL Y O 1 'hello' 2 'world'
// |\
// Z NULL
//
// Allocate the return descriptor. This will contain the
// columns of the child node as well as the new columns added
// by the transpose operator. The column order is:
//
// [childs columns][keyCol][valCol1][valCol2] ...
//
// Using the example, this would be:
//
// [childs columns], K1, C1, C2, C3, C4
//
RETDesc *resultTable = new(bindWA->wHeap()) RETDesc(bindWA);
// Add the columns from the child to the RETDesc.
//
const RETDesc &childTable = *child(0)->getRETDesc();
resultTable->addColumns(bindWA, childTable);
transUnionVectorSize_ = numTransSets + 1;
transUnionVector() = new(bindWA->wHeap())
ValueIdList[transUnionVectorSize_];
//If there is a lob column return error. Transpose not allowed on lob columns.
for (i = 0; i < resultTable->getDegree(); i++)
{
if ((resultTable->getType(i)).getFSDatatype() == REC_BLOB ||
(resultTable->getType(i)).getFSDatatype() == REC_CLOB)
{
*CmpCommon::diags() << DgSqlCode(-4322);
bindWA->setErrStatus();
return this;
}
}
// Get the key column reference
// This is the last time we need this ItemExpr.
//
ColReference *keyColumn = (ColReference *)removeKeyCol();
// If no key column has been specified, then no key col will be
// generated.
//
if (keyColumn) {
//Get the key column name.
//
NAString keyColName(keyColumn->getColRefNameObj().getColName(), bindWA->wHeap());
// Construct and Bind the ValueIdUnion node as the union of constants
// from 1 to the total number of transpose expressions. In the above
// example this will be from 1 to 9, since there are 3 transpose sets
// and each set has 3 expressions.
//
ValueIdList constVals;
ItemExpr *constExpr;
CollIndex keyVal;
// For each expression in each transpose set.
//
for (i = 0, keyVal = 1; i < numTransSets; i++) {
// Determine how many expressions are in each transpose set.
//
CollIndex numTransItems = 0;
ItemExpr *transSet = transSetsList[i];
while (transSet) {
numTransItems++;
transSet = transSet->child(1);
}
for (j = 0; j < numTransItems; j++, keyVal++) {
// Construct the constant value
//
#pragma nowarn(1506) // warning elimination
constExpr = new(bindWA->wHeap()) SystemLiteral(keyVal);
#pragma warn(1506) // warning elimination
// Bind the constant value.
//
constExpr->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert the valueId into the list
//
constVals.insert(constExpr->getValueId());
}
}
// Construct the ValueIdUnion node which will represent the key Col.
//
ValueIdUnion *keyVidu = new(bindWA->wHeap())
ValueIdUnion(constVals, NULL_VALUE_ID);
// Bind the ValueIdUnion node.
//
keyVidu->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Add the key column to the RETDesc (as the union of all the constants)
//
resultTable->addColumn(bindWA, keyColName, keyVidu->getValueId());
// The ValueIdUnion for the Key Values is the first entry in
// the ValueIdList of the first entry of transUnionVector_.
//
transUnionVector()[0].insert(keyVidu->getValueId());
}
// For each transpose set,
// - bind the list of expressions.
// - Construct a ValueIdUnion node containing the resulting valueIds.
// - Bind this ValueIdUnion node
// - Add the associate column name to the return descriptor with the
// valueId of the ValueIdUnion node.
//
ValueIdList transVals;
for (i = 0; i < numTransSets; i++) {
// The column(s) associated with this transpose set.
// (will be used below, within the inner loop)
//
ItemExprList newCols(newColsList[i], bindWA->wHeap());
// Determine how many expressions are in each transpose set.
//
CollIndex numTransItems = 0;
ItemExpr *transSet = transSetsList[i];
ItemExprList transItemList(bindWA->wHeap());
// Populate this list.
//
while (transSet) {
transItemList.insert(transSet->child(0));
numTransItems++;
transSet = transSet->child(1);
}
ItemExprList transItem(transItemList[0], bindWA->wHeap());
CollIndex numTransVals = transItem.entries();
// For a given transpose set, the number of new columns declared
// must be the same as the number of items per value. In the example
// above, the third transpose set contains the items (1, 'hello') and
// the columns (C3,C4) both have two entries.
//
if (numTransVals != newCols.entries()) {
*CmpCommon::diags() << DgSqlCode(-4088);
bindWA->setErrStatus();
return this;
}
for (k = 0; k < numTransVals; k++) {
ItemExpr *transValueUnionExpr = NULL;
for (j = 0; j < numTransItems; j++) {
transItem.clear();
transItem.insertTree(transItemList[j], ITM_ITEM_LIST);
// Within a given transpose set, the number of values per item
// must be the same. In the example above, the third transpose
// set contains the items (1, 'hello') and (2, 'world'). These
// both have two values per item. The others all have 1 value
// per item.
//
if (numTransVals != transItem.entries()) {
*CmpCommon::diags() << DgSqlCode(-4088);
bindWA->setErrStatus();
return this;
}
if (transValueUnionExpr == NULL) {
transValueUnionExpr = transItem[k];
}
else
{
transValueUnionExpr = new (bindWA->wHeap())
ItemList(transValueUnionExpr, transItem[k]);
}
}
// Bind the Transpose Values expressions. Get the expression value Id's
//
transVals.clear();
if(transValueUnionExpr != NULL )
transValueUnionExpr->convertToValueIdList(transVals,
bindWA,
ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
// If there are more than one transpose set, the value columns
// generated by transpose can be NULL. So, make sure the typing is
// done properly. This is done by setting the first in the list to
// be nullable, then the ValueIdUnion will be nullable and the new
// column will be nullable. This is not done on the ValueIdUnion
// node itself, since it will add an Null Instantiate node, and
// we later assume that this node will always be a ValueIdUnion
// node.
//
if (numTransSets > 1) {
ValueId valId = transVals[0];
transVals[0] = valId.nullInstantiate(bindWA, FALSE);
}
// Construct and Bind the ValueIdUnion node for the transpose vals.
//
ValueIdUnion *valVidu = new(bindWA->wHeap())
ValueIdUnion(transVals, NULL_VALUE_ID);
valVidu->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert this valueIdUnion node into the list of valueIdUnions
// in the proper entry in transUnionVector_
//
transUnionVector()[i + 1].insert(valVidu->getValueId());
// Get the val column reference
//
ColReference *valCol = (ColReference *)newCols[k];
// Must have Column Refs to val column.
//
CMPASSERT(valCol);
//Get the val column name.
//
NAString valColName( valCol->getColRefNameObj().getColName(), bindWA->wHeap());
// Add the transpose column
// (as the union of all of the transposed value columns)
//
resultTable->addColumn(bindWA, valColName, valVidu->getValueId());
}
}
// Set the return descriptor
//
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
//
// Bind the base class.
//
return bindSelf(bindWA);
} // Transpose::bindNode()
// -----------------------------------------------------------------------
// The Pack node binds itself by componsing its packing expression from
// all the columns available in its child's RETDesc. The packed columns
// produced by the packing expression are then made available in the Pack
// node's own RETDesc.
// -----------------------------------------------------------------------
RelExpr* Pack::bindNode(BindWA* bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// ---------------------------------------------------------------------
// The Pack node has a packing expression stored as packingExprTree_
// before binding. If packingExprTree_ is NULL, we are just going to
// pick up all the columns from the output of its child. During binding,
// this tree is converted into a value id list.
// ---------------------------------------------------------------------
// Create and bind the packing factor item expression.
#pragma nowarn(1506) // warning elimination
ItemExpr* pfie = new (bindWA->wHeap()) SystemLiteral(packingFactorLong());
#pragma warn(1506) // warning elimination
pfie->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert vid of bound constant into packingFactor valueIdSet.
packingFactor().clear();
packingFactor().insert(pfie->getValueId());
// Create my RETDesc to hold the packed columns.
RETDesc* resultTable = new (bindWA->wHeap()) RETDesc (bindWA);
// Bind the tree if its present.
if (packingExprTree_)
{
ItemExpr* packExprTree = removePackingExprTree();
packExprTree->convertToValueIdList(packingExpr(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return this;
for (CollIndex i = 0; i < packingExpr().entries(); i++)
{
// Add all columns to result table.
NAString packedColName( "PACKEDCOL_", bindWA->wHeap());
packedColName += bindWA->fabricateUniqueName();
#pragma nowarn(1506) // warning elimination
Int32 length = packedColName.length();
#pragma warn(1506) // warning elimination
char * colName = new (bindWA->wHeap()) char[length + 1];
colName[length] = 0;
#pragma nowarn(1506) // warning elimination
str_cpy_all(colName, packedColName, packedColName.length());
#pragma warn(1506) // warning elimination
ColRefName colRefName(colName);
resultTable->addColumn(bindWA,
colRefName,
packingExpr().at(i),
USER_COLUMN,
colName);
}
}
else // no packing expr tree, get all the columns from child.
{
// Get RETDesc from child which is assumed to be a RelRoot. too strict?
const RETDesc& childTable = *child(0)->getRETDesc();
ValueIdList childTableVidList;
// These are only the user columns. Are SYS columns important?
childTable.getValueIdList(childTableVidList);
// Initialize packing expression.
packingExpr().clear();
// For each column in child's RETDesc, put a PackFunc() on top of it.
for (CollIndex i = 0; i < childTableVidList.entries(); i++)
{
ItemExpr* childItemExpr = childTableVidList[i].getItemExpr();
PackFunc* packedItemExpr = new (bindWA->wHeap())
PackFunc(childItemExpr,pfie);
// Bind the packed column.
packedItemExpr->bindNode(bindWA);
if (bindWA->errStatus()) return this;
// Insert into both the result table and my packingExpr_.
packingExpr().insert(packedItemExpr->getValueId());
// $$$ Any implications of this? Needed to be seen.
// Use the original column name as the packed column name. The index
// is on USER columns only. SYS columns matter?
ColRefName colRefName = childTable.getColRefNameObj(i);
const char* heading = childTable.getHeading(i);
// Insert into RETDesc for RelRoot above it to pick up as select-list.
resultTable->addColumn(bindWA,
colRefName,
packedItemExpr->getValueId(),
USER_COLUMN,
heading);
// $$$
// OR: start with making a copy of child's RETDesc and change each col
// to point to the vid for the packed column instead of the original.
}
}
// Set the result table, bind self and return.
setRETDesc(resultTable);
bindWA->getCurrentScope()->setRETDesc(resultTable);
bindSelf(bindWA);
// To test packing. Add a unpack node on top of this pack node to check.
char* env = getenv("PACKING_FACTOR");
if (env && atol(env) > 0)
{
Lng32 pf = atol(env);
ItemExpr* unPackExpr = NULL;
ItemExpr* rowFilter = NULL;
ItemExpr* unPackItem;
ItemExpr* numRows;
const NAType* typeInt = new(bindWA->wHeap()) SQLInt(TRUE,FALSE);
ValueIdList packedCols;
resultTable->getValueIdList(packedCols);
NAString hostVarName("_sys_UnPackIndex", bindWA->wHeap());
hostVarName += bindWA->fabricateUniqueName();
ItemExpr* indexHostVar = new(bindWA->wHeap())
HostVar(hostVarName,new(bindWA->wHeap()) SQLInt(TRUE,FALSE),TRUE);
indexHostVar->synthTypeAndValueId();
for (CollIndex i=0; i < packedCols.entries(); i++)
{
const NAType* colType =
&(packedCols[i].getItemExpr()->child(0)->getValueId().getType());
Lng32 width = colType->getNominalSize();
#pragma nowarn(1506) // warning elimination
Lng32 base = (colType->supportsSQLnullPhysical() ? (pf-1)/CHAR_BIT +1 : 0)
+ sizeof(Int32);
#pragma warn(1506) // warning elimination
// $$$ Some duplicate code to be moved to PackColDesc later.
ColRefName colRefName;
colRefName = resultTable->getColRefNameObj(i);
unPackItem = new(bindWA->wHeap())
UnPackCol(packedCols[i].getItemExpr(),
indexHostVar,
width,
base,
colType->supportsSQLnull(),
colType);
numRows = new(bindWA->wHeap())
UnPackCol(packedCols[i].getItemExpr(),
new(bindWA->wHeap()) SystemLiteral(0),
typeInt->getNominalSize(),
0,
FALSE,
typeInt);
unPackExpr = (unPackExpr ?
new(bindWA->wHeap()) ItemList(unPackExpr,unPackItem) :
unPackItem);
rowFilter = (rowFilter ?
new(bindWA->wHeap()) ItemList(rowFilter,numRows) :
numRows);
}
RelExpr* unpack =
new(bindWA->wHeap()) UnPackRows(pf,unPackExpr,rowFilter,NULL,
this, indexHostVar->getValueId());
return unpack->bindNode(bindWA);
}
return this;
} // Pack::bindNode()
RelExpr * Rowset::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
if (bindWA->getHostArraysArea()) {
bindWA->getHostArraysArea()->done() = TRUE;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
// Transform current node into a new subtree which performs access to
// RowSet based on the unpacking and tuple node expression operators.
// The formed tuple is composed of all input RowSet host variables:
// Rowset-tuple: array_hv1, array_hv2, ... array_hvN.
// The Unpack expression is used to retrieve the elements of the Rowset
// with an indexed operator. For example, retrieve values for index two
// for each Rowset host variable.
// The transformed subtree has the following structure
//
// UNPACK
// |
// TUPLE
//
// Note that the original Rowset relational expression has a rename node
// on top.
//
// First find the maxRowSetSize and its rowsetSizeExpr. The rowset size is
// the smallest declared dimension of the arrays composing the rowset.
// If a constant rowset size was given in the SQL statement, it must be
// samaller than the computed value.
NABoolean hasDifferentSizes = FALSE;
Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */
ItemExpr *rowsetSizeExpr;
ItemExpr *hostVarTree;
// We get the list of input host vars, which is stored in the root of the
// parse tree
HostArraysWA *arrayArea = bindWA->getHostArraysArea();
RelRoot *root = bindWA->getTopRoot();
// Do any extra checking at this moment.
for (hostVarTree = inputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST);
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
if (hostVar->getOperatorType() != ITM_HOSTVAR ||
hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) {
// 30001 A rowset must be composed of host variable arrays
*CmpCommon::diags() << DgSqlCode(-30001);
bindWA->setErrStatus();
return NULL;
}
// Get the smallest dimension for rowset size
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
if (hostVarType->getNumElements() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be greater
// than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32)hostVarType->getNumElements());
bindWA->setErrStatus();
return NULL;
}
if (maxRowsetSize == 0)
maxRowsetSize = hostVarType->getNumElements();
else if (hostVarType->getNumElements() != maxRowsetSize) {
// 30005 The dimensions of the arrays composing the RowSet are
// different. The smallest dimesnion is assumed.
// This is just a warning
// Give the warning only once
if (hasDifferentSizes == FALSE) {
if (arrayArea->hasDynamicRowsets()) {
// 30015 The dimesion of the arrays composing the RowSet must be same
// in dynamic SQL
*CmpCommon::diags() << DgSqlCode(-30015) ;
bindWA->setErrStatus();
return NULL;
} // for static SQL this is only a warning.
hasDifferentSizes = TRUE;
*CmpCommon::diags() << DgSqlCode(30005);
}
// Pick the smallest one
if (hostVarType->getNumElements() < maxRowsetSize)
maxRowsetSize = hostVarType->getNumElements();
}
// Make sure that the element type null indicator and the corresponding
// rowset array are both nullable or not nullable. That is, force it
NAType* hostVarElemType = hostVarType->getElementType();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
// If hostVarType is Unknown then this a dynamic param that has been
// converted into a hostvar. For dynamic params there is no null
// indicator variable/param specified in the query text, so the previous
// check will always return FALSE. We will set all dynamic params to be
// nullable and let type synthesis infer nullability later on.
if (hostVarElemType->getTypeQualifier() == NA_UNKNOWN_TYPE)
hostVarElemNullInd = TRUE;
hostVarElemType->setNullable(hostVarElemNullInd);
}
// If a rowset size expression was produced during parsing, it is used
// to restrict the rowset size during execution. The expression must be
// an numeric literal (known at compile time) or an integer host variable
// (known at execution time). We do not allow other type of expression
// since the rowset size must be know before the statement is executed to
// avoid copying a lot when the host variable arrays are sent down the
// execution queue
// If there is no size specification of the form ROWSET <size> ( <list> ) then
// we take the size from ROWSET FOR INPUT SIZE <size>
if (!sizeExpr_ && bindWA->getHostArraysArea()) {
sizeExpr_ = bindWA->getHostArraysArea()->inputSize();
if ((bindWA->getHostArraysArea()->getInputArrayMaxSize() > 0) &&
(!sizeExpr_ )) {
// ODBC process is performing a bulk insert and we need to create
// an input parameter to simulate the functionality of ROWSET FOR INPUT
// SIZE ... syntax.
NAString name = "__arrayinputsize" ;
HostVar *node = new (bindWA->wHeap())
HostVar(name,
new(bindWA->wHeap()) SQLInt(TRUE,FALSE),
TRUE);
node->setHVRowsetForInputSize();
root->addAtTopOfInputVarTree(node);
sizeExpr_ = (ItemExpr *) node ;
}
}
if (sizeExpr_) {
if (sizeExpr_->getOperatorType() == ITM_CONSTANT) {
if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be
// greater than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32) (((ConstValue *)sizeExpr_)
->getExactNumericValue()));
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) {
// 30002 The given RowSet size ($0~Int0) must be smaller or
// equal to the smallest dimension ($1Int1) of the
// arrays composing the rowset
*CmpCommon::diags() << DgSqlCode(-30002)
<< DgInt0((Int32)
((ConstValue *)sizeExpr_)
->getExactNumericValue())
<< DgInt1(maxRowsetSize);
bindWA->setErrStatus();
return NULL;
}
else {
maxRowsetSize = (Lng32)((ConstValue *)sizeExpr_)->getExactNumericValue() ;
}
}
else
if (!((sizeExpr_->getOperatorType() == ITM_HOSTVAR &&
((HostVar *)sizeExpr_)->getType()->getTypeQualifier()
== NA_NUMERIC_TYPE) ||
(sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) ||
((sizeExpr_->getOperatorType() == ITM_CAST) &&
(sizeExpr_->child(0)->getOperatorType() == ITM_DYN_PARAM))))
{
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30014);
bindWA->setErrStatus();
return NULL;
}
// We return a -1 if the execution time rowset size exceeds the maximum
// declared size
ItemExpr *maxSize = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
ItemExpr *neg = new (bindWA->wHeap()) SystemLiteral(-1);
ItemExpr *constrPred = new (bindWA->wHeap())
BiRelat(ITM_GREATER, sizeExpr_, maxSize);
rowsetSizeExpr = new (bindWA->wHeap())
IfThenElse(constrPred, neg, sizeExpr_);
// IfThenElse only works if Case is its parent.
rowsetSizeExpr = new (bindWA->wHeap()) Case (NULL, rowsetSizeExpr);
// At code generation time, it is assumed that the size expression
// is of size integer, so we do this cast. We do not allow null
// values.
rowsetSizeExpr = new (bindWA->wHeap())
Cast(rowsetSizeExpr, new (bindWA->wHeap()) SQLInt(TRUE,FALSE));
// For dynamic rowsets, the parameter specifying rowset for input size
// must be typed as an non-nullable integer.
if (sizeExpr_->getOperatorType() == ITM_DYN_PARAM ) {
sizeExpr_->synthTypeAndValueId();
SQLInt intType(TRUE,FALSE); // TRUE -> allow neagtive values, FALSE -> not nullable
(sizeExpr_->getValueId()).coerceType(intType, NA_NUMERIC_TYPE);
}
}
else
{
rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
}
// Construct an index host variable to iterate over the elements of the
// rowset. The name of the host variable must be unique (fabricated
// by calling fabricateUniqueName). This host variable is bound since it
// is not an input of the parse tree. Call synthTypeAndValueId()
// which does the minimum binding.
NAString indexName(bindWA->wHeap());
if (indexExpr_) {
// Get the name.
indexName = ((ColReference *)indexExpr_)->getColRefNameObj().getColName();
} else {
indexName = "_sys_rowset_index" + bindWA->fabricateUniqueName();
}
const NAType *indexType = new (bindWA->wHeap()) SQLInt(TRUE, FALSE);
ItemExpr *indexHostVar = new (bindWA->wHeap())
HostVar(indexName, indexType,
TRUE // is system-generated
);
indexHostVar->synthTypeAndValueId();
// Generate the RowsetArrayScan expressions which are used to extract
// an element value of the rowset array given an index.
ItemExpr *unPackExpr = NULL;
for (hostVarTree = inputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
NAType* hostVarElemType = hostVarType->getElementType();
Lng32 hostVarElemSize = hostVarElemType->getTotalSize();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
// Force all host variable to have the same number of elements which was
// found previously
hostVarType->setNumElements(maxRowsetSize);
// The element size must be align
hostVarElemSize = ALIGN(hostVarElemSize,
hostVarElemType->getDataAlignment());
// Assign a valueId for this Host variable. UnPackRows node will need
// this valueId during its binding.
//hostVar->synthTypeAndValueId();
hostVar->bindNode(bindWA);
ItemExpr *unPackCol =
new (bindWA->wHeap())
RowsetArrayScan(hostVar, // Rowset Host Var array
indexHostVar, // Index
maxRowsetSize, // Cannot go over this size
hostVarElemSize, // Element size in bytes
hostVarElemNullInd,
hostVarElemType
);
// Construct a list of expressions to extract the Data value from
// the packed row. During normalization, this list (or a ValueIdList
// representing this list) will be reduced to the minimum required.
// This should be a NULL terminated list, unfortunately, there are
// many parts in the SQL/MX code that loops over the arity instead
// of checking for NULL terminated list...the effect a segmentation
// violation.
unPackExpr = (unPackExpr
? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol)
: unPackCol);
}
// enable rowsetrowcount for rowset update and deletes
// if the user has not turned the feature OFF.
// if we have rowsets in where clause and are not in a select
// then we have either rowset ypdate or delete, for direct rowsets.
if (arrayArea &&
(!(arrayArea->getHasDerivedRowsets())) &&
arrayArea->hasHostArraysInWhereClause() &&
(arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_) &&
(CmpCommon::getDefault(ROWSET_ROW_COUNT) == DF_ON)) {
arrayArea->setRowsetRowCountArraySize(maxRowsetSize);
}
if (indexExpr_) {
/*
* Create and item expression to obtain the index
*/
ItemExpr *unPackCol =
new (bindWA->wHeap())
RowsetArrayScan(indexHostVar, // Index
indexHostVar, // Index
maxRowsetSize, // Cannot go over this size
indexType->getTotalSize(),
0,
indexType,
ITM_ROWSETARRAY_ROWID
);
unPackExpr = (unPackExpr
? new (bindWA->wHeap()) ItemList(unPackExpr, unPackCol)
: unPackCol);
}
// Now create a Tuple node to hang the children and input values of the
// actual Rowset Node to it. Make sure to copy the RelExpr part of Rowset
// to tuple.
// Kludge up a dummy child for the index
ItemExpr *inputs = ((indexExpr_)
? new (bindWA->wHeap()) ItemList(inputHostvars_,
indexHostVar)
: inputHostvars_);
Tuple *tupleExpr = new (bindWA->wHeap()) Tuple(inputs);
tupleExpr->setBlockStmt(isinBlockStmt());
copyTopNode(tupleExpr);
// Construct the replacement tree for the Rowset operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
UnPackRows(maxRowsetSize,
unPackExpr,
rowsetSizeExpr,
NULL,
tupleExpr,
indexHostVar->getValueId()));
newSubTree->setBlockStmt(isinBlockStmt());
// do not set this flag for derived rowsets. This flag is used in generator to determine
// in onlj and TF TDB must set rownumber when encountering a execution time rowset error.
if (arrayArea &&
(!(arrayArea->getHasDerivedRowsets())) &&
(arrayArea->hasInputRowsetsInSelectPredicate() != HostArraysWA::YES_))
{
newSubTree->setRowsetIterator(TRUE);
}
// Move any predicate on the packed table to be on the result
// of unpacking.
newSubTree->addSelPredTree(removeSelPredTree());
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // Rowset::bindNode()
RelExpr * RowsetRowwise::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
if (bindWA->getHostArraysArea()) {
bindWA->getHostArraysArea()->done() = TRUE;
}
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Transform current node into a new subtree which performs access to
// RowSet based on the unpacking.
// UNPACK
// |
// TUPLE
//
// We get the list of input host vars, which is stored in the root of the
// parse tree
HostArraysWA *arrayArea = bindWA->getHostArraysArea();
if ((arrayArea->rwrsMaxSize()->getOperatorType() != ITM_CONSTANT) ||
(((ConstValue *)arrayArea->rwrsMaxSize())->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE))
{
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
// if partition number has been specified, then we don't unpack
// rows. The whole buffer is shipped to the specified partition.
if (arrayArea->partnNum())
return child(0)->castToRelExpr();
Lng32 maxRowsetSize =
(Lng32)((ConstValue *)arrayArea->rwrsMaxSize())->getExactNumericValue() ;
NAType * typ = new(bindWA->wHeap()) SQLInt(FALSE, FALSE);
ItemExpr * rwrsInputSizeExpr =
new(bindWA->wHeap()) Cast(arrayArea->inputSize(), typ);
if (bindWA->errStatus())
return this;
ItemExpr * rwrsMaxInputRowlenExpr =
new(bindWA->wHeap()) Cast(arrayArea->rwrsMaxInputRowlen(), typ);
if (bindWA->errStatus())
return this;
ItemExpr * rwrsBufferAddrExpr = arrayArea->rwrsBuffer();
if (bindWA->errStatus())
return this;
// Construct the replacement tree for the Rowset operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
UnPackRows(maxRowsetSize,
rwrsInputSizeExpr,
rwrsMaxInputRowlenExpr,
rwrsBufferAddrExpr,
child(0)));
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // RowsetRowwise::bindNode()
// LCOV_EXCL_START - rfi
RelExpr * RowsetFor::bindNode(BindWA* bindWA)
{
// Binding of this node should not happen. It should have been eliminated
// by now by the pre-binding step. Its content is used to populate the
// RowSet node with options.
CMPASSERT(0);
return NULL;
}
// LCOV_EXCL_STOP
RelExpr * RowsetInto::bindNode(BindWA* bindWA)
{
// If this node has already been bound, we are done.
if (nodeIsBound())
return this->transformRelexpr_;
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus()) return this;
NABoolean hasDifferentSizes = FALSE;
Lng32 maxRowsetSize = 0; /* Maximum number of Rows in Rowset */
ULng32 numOutputHostvars = 0;
ItemExpr *rowsetSizeExpr;
ItemExpr *hostVarTree;
// Do any extra checking at this moment.
for (hostVarTree = outputHostvars_;
hostVarTree;
hostVarTree = hostVarTree->child(1)) {
numOutputHostvars++;
CMPASSERT(hostVarTree->getOperatorType() == ITM_ITEM_LIST);
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
if (hostVar->getOperatorType() != ITM_HOSTVAR ||
hostVar->getType()->getTypeQualifier() != NA_ROWSET_TYPE) {
// 30001 A rowset must be composed of host variable arrays
*CmpCommon::diags() << DgSqlCode(-30001);
bindWA->setErrStatus();
return NULL;
}
// Get the smallest dimension for rowset size
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
if (hostVarType->getNumElements() <= 0) {
// 30004 The dimesion of the arrays composing the RowSet must be greater
// than zero. A value of $0~Int0 was given
*CmpCommon::diags() << DgSqlCode(-30004)
<< DgInt0((Int32)hostVarType->getNumElements());
bindWA->setErrStatus();
return NULL;
}
if (maxRowsetSize == 0)
maxRowsetSize = hostVarType->getNumElements();
else if (hostVarType->getNumElements() != maxRowsetSize) {
// 30005 Warning: the dimensions of the arrays composing the RowSet are
// different. The smallest dimesnion is assumed.
// This is just a warning
// Give the warning only once
if (hasDifferentSizes == FALSE) {
hasDifferentSizes = TRUE;
*CmpCommon::diags() << DgSqlCode(30005);
}
// Pick the smallest one
if (hostVarType->getNumElements() < maxRowsetSize)
maxRowsetSize = hostVarType->getNumElements();
}
// Make sure that the element type null indicator and the corresponding
// rowset array are both nullable or not nullable. That is, force it
NAType* hostVarElemType = hostVarType->getElementType();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
hostVarElemType->setNullable(hostVarElemNullInd);
}
// If a rowset size expression was produced during parsing, it is used
// to restrict the rowset size during execution. The expression must be
// an numeric literal (known at compile time) or an integer host variable
// (known at execution time). We do not allow other type of expression
// since the rowset size must be know before the statement is executed to
// avoid copying a lot when the host variable arrays are sent down the
// execution queue
if (sizeExpr_) {
if (sizeExpr_->getOperatorType() == ITM_CONSTANT) {
if (((ConstValue *)sizeExpr_)->getType()->getTypeQualifier()
!= NA_NUMERIC_TYPE) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
if (((ConstValue *)sizeExpr_)->getExactNumericValue() > maxRowsetSize) {
// 30002 The given RowSet size ($0~Int0) must be smaller or
// equal to the smallest dimension ($1Int1) of the
// arrays composing the rowset
*CmpCommon::diags() << DgSqlCode(-30002)
<< DgInt0((Int32)
((ConstValue *)sizeExpr_)
->getExactNumericValue())
<< DgInt1(maxRowsetSize);
bindWA->setErrStatus();
return NULL;
}
}
else
if (!(sizeExpr_->getOperatorType() == ITM_HOSTVAR &&
((HostVar *)sizeExpr_)->getType()->getFSDatatype()
== REC_BIN32_SIGNED)) {
// 30003 Rowset size must be an integer host variable or an
// integer constant
*CmpCommon::diags() << DgSqlCode(-30003);
bindWA->setErrStatus();
return NULL;
}
rowsetSizeExpr = sizeExpr_;
}
else
rowsetSizeExpr = new (bindWA->wHeap()) SystemLiteral(maxRowsetSize);
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
// 30020 Embedded update/delete cannot be used with SELECT...INTO and rowset.
*CmpCommon::diags() << DgSqlCode(-30020);
bindWA->setErrStatus();
return NULL;
}
// Generate the RowsetArrayInto expressions which are used to append
// an element value to the rowset array.
// Get RETDesc from its only child one which must be RelRoot type.
const RETDesc& childTable = *child(0)->getRETDesc();
ValueIdList childTableVidList;
childTable.getValueIdList(childTableVidList);
if (numOutputHostvars != childTableVidList.entries()) {
// 4094 The number of output host vars ($0) must equal the number of cols
*CmpCommon::diags() << DgSqlCode(-4094)
#pragma nowarn(1506) // warning elimination
<< DgInt0(numOutputHostvars) << DgInt1(childTableVidList.entries());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus();
return NULL;
}
ItemExpr *packExpr = NULL;
Lng32 i;
for (hostVarTree = outputHostvars_, i = 0;
hostVarTree;
hostVarTree = hostVarTree->child(1), i++) {
HostVar *hostVar = (HostVar *)hostVarTree->getChild(0);
SQLRowset* hostVarType = (SQLRowset *)hostVar->getType();
NAType* hostVarElemType = hostVarType->getElementType();
// hostVarElemType->setNullable(TRUE);
Lng32 hostVarElemSize = hostVarElemType->getTotalSize();
NABoolean hostVarElemNullInd = !(hostVar->getIndName().isNull());
ItemExpr* sourceExpr = childTableVidList[i].getItemExpr();
ValueId sourceId = childTableVidList[i];
const NAType& targetType = *hostVarElemType;
sourceId.coerceType(targetType);
const NAType& sourceType = sourceId.getType();
NABoolean relaxCharTypeMatchingRule = FALSE;
// We make sure that the types that are coming from below this
// node match properly with the types it has
if (NOT targetType.isCompatible(sourceType)) {
// JQ
// Relaxing Characet Data Type mismatching rule.
if ( targetType.getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE &&
((const CharType&)targetType).getCharSet() == CharInfo::UNICODE &&
((const CharType&)sourceType).getCharSet() == CharInfo::ISO88591
)
{
relaxCharTypeMatchingRule = TRUE;
}
if ( !relaxCharTypeMatchingRule ) {
// Incompatible assignment from type $0~String0 to type $1~String1
*CmpCommon::diags() << DgSqlCode(-30007)
<< DgString0(sourceType.getTypeSQLname(TRUE /*terse*/))
<< DgString1(targetType.getTypeSQLname(TRUE /*terse*/));
bindWA->setErrStatus();
return FALSE;
}
}
// Force all host variable to have the same number of elements which was
// found previously
hostVarType->setNumElements(maxRowsetSize);
// The element size must be align
hostVarElemSize = ALIGN(hostVarElemSize,
hostVarElemType->getDataAlignment());
// Preserve the length that is coming from the node below this one
if (hostVarElemType->getTypeQualifier() == NA_CHARACTER_TYPE &&
sourceType.getTypeQualifier() == NA_CHARACTER_TYPE) {
Int32 sourceSize = ((CharType *) &sourceType)->getDataStorageSize();
Int32 targetSize = ((CharType *) hostVarElemType)->getDataStorageSize();
if (sourceSize > targetSize ) {
// Adjust the layout size instead of changing the element size?
((CharType *) hostVarElemType)->setDataStorageSize(sourceSize);
}
}
if ( relaxCharTypeMatchingRule == TRUE )
sourceExpr = new (bindWA->wHeap())
Translate(sourceExpr, Translate::ISO88591_TO_UNICODE);
// If the type is external (for instance, decimal or varchar), we must first
// convert to our internal equivalent type
if (hostVarElemType->isExternalType()) {
NAType *internalType = hostVarElemType->equivalentType();
sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, internalType);
}
sourceExpr = new (bindWA->wHeap()) Cast(sourceExpr, hostVarElemType);
ItemExpr *packCol =
new (bindWA->wHeap())
RowsetArrayInto(sourceExpr,
rowsetSizeExpr, // Runtime size
maxRowsetSize, // Cannot go over this size
hostVarElemSize, // Element size in bytes
hostVarElemNullInd,
hostVarType
);
// Construct a list of expressions to append the Data value to the
// RowSet array. This list should be a NULL terminated list,
// unfortunately, there are many parts in the SQL/MX code that
// loops over the arity instead of checking for NULL terminated
// list...the effect a segmentation violation.
packExpr = (packExpr
? new (bindWA->wHeap()) ItemList(packExpr, packCol)
: packCol);
}
// Construct the replacement tree for the RowsetInto operator.
RelExpr *newSubTree = (new (bindWA->wHeap())
Pack(maxRowsetSize,
child(0)->castToRelExpr(),
packExpr));
newSubTree->setFirstNRows(getFirstNRows());
// If we have an ORDER BY when there is an INTO :array, then we
// add the requirement that the tuples that this Pack node will
// receive must be sorted
ValueIdList *ptrReqOrder;
ptrReqOrder = new (bindWA->wHeap())
ValueIdList(((RelRoot *) (RelExpr *) newSubTree->child(0))->reqdOrder());
((Pack *) newSubTree)->setRequiredOrder(*ptrReqOrder);
// Remember the transform tree, just in case someone is trying to bind this
// node again.
transformRelexpr_ = newSubTree;
// Bind the new generated subtree.
return newSubTree->bindNode(bindWA);
} // RowsetInto::bindNode
RelExpr *
IsolatedScalarUDF::bindNode (BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// If we have a RoutineDesc, it means we got transformed from a
// a UDFunction ItemExpr, and do NOT need to check all the metadata
// params etc.
if (getRoutineDesc() == NULL )
{
// If we get here, we created a IsolatedScalarUDF some other way
// than through the transformation of UDFunction. Either that or
// we have someone walking over our memory...
CMPASSERT(0);
bindWA->setErrStatus();
return this;
}
else
{
markAsBound();
}
return this;
} // IsolatedScalarUDF::bindNode ()
/*
* This method performs binder functions for the CALLSP node
* It performs semantic checks on the called stored procedure
* creates a Tuple child and allocates ValueIds for the parameters
* It also provides support for the CLI layer processing for OUT
* parameter processing.
*/
RelExpr *CallSP::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
if (bindWA->getHoldableType() == SQLCLIDEV_ANSI_HOLDABLE)
{
*CmpCommon::diags() << DgSqlCode(-4382);
bindWA->setErrStatus();
bindWA->setBindingCall (FALSE);
return this;
}
bindWA->setBindingCall (TRUE);
bindWA->setCurrOrdinalPosition (1);
bindWA->setCurrParamMode (COM_UNKNOWN_DIRECTION);
bindWA->clearHVorDPinSPDups ();
bindWA->setDupWarning (FALSE);
bindWA->setMaxResultSets(0);
// try PUBLIC SCHEMA only when no schema was specified
// and CQD PUBLIC_DEFAULT_SCHEMA is specified
NAString pSchema =
ActiveSchemaDB()->getDefaults().getValue(PUBLIC_SCHEMA_NAME);
ComSchemaName pubSchema(pSchema);
NAString pubSchemaIntName = "";
if ( (getRoutineName().getSchemaName().isNull()) &&
(!pubSchema.getSchemaNamePart().isEmpty()) )
{
pubSchemaIntName = pubSchema.getSchemaNamePart().getInternalName();
}
// Invoke GetNARoutine () to retrieve the corresponding NARoutine from
// NARoutineDB_
QualifiedName name = getRoutineName();
const SchemaName &defaultSchema =
bindWA->getSchemaDB ()->getDefaultSchema();
name.applyDefaults(defaultSchema);
setRoutineName(name);
bindWA->setCurrSPName(&name);
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT name.isSeabase()) && (NOT name.isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(name.getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
CmpSeabaseDDL cmpSBD((NAHeap*)bindWA->wHeap());
desc_struct *catRoutine =
cmpSBD.getSeabaseRoutineDesc(
name.getCatalogName(),
name.getSchemaName(),
name.getObjectName());
// try public schema
if ( !catRoutine &&
!pubSchemaIntName.isNull() )
{
getRoutineName().setSchemaName(pubSchemaIntName);
if ( !pubSchema.getCatalogNamePart().isEmpty() )
{
getRoutineName().setCatalogName(pubSchema.getCatalogNamePart().getInternalName());
}
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT getRoutineName().isSeabase()) && (NOT getRoutineName().isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(getRoutineName().getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
bindWA->resetErrStatus();
catRoutine =
cmpSBD.getSeabaseRoutineDesc(
getRoutineName().getCatalogName(),
getRoutineName().getSchemaName(),
getRoutineName().getObjectName());
if ( !bindWA->errStatus() && catRoutine )
{ // if found in public schema, do not show previous error
CmpCommon::diags()->clear();
}
}
if (bindWA->violateAccessDefaultSchemaOnly(getRoutineName()))
return NULL;
if ( NULL == catRoutine )
{
// Diagnostic error is set by the readRoutineDef, we just need to
// make sure the rest of the compiler knows that an error occurred.
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
// Create a new NARoutine object
Int32 error = FALSE;
NARoutine *routine = new (bindWA->wHeap()) NARoutine ( getRoutineName(),
catRoutine,
bindWA,
error );
if ( bindWA->errStatus () )
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
NABoolean createRETDesc=TRUE;
RoutineDesc *rDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, routine);
if (rDesc == NULL || bindWA->errStatus ())
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
if (rDesc->populateRoutineDesc(bindWA, createRETDesc) == FALSE )
{
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
setRoutineDesc(rDesc);
//
// Semantic checks
//
// if in trigger and during DDL make sure to Fix up the name
// location list so that the name is fully qualified when stored
// in the TEXT metadata table
if ( bindWA->inDDL() && bindWA->isInTrigger () )
{
ParNameLocList *pNameLocList = bindWA->getNameLocListPtr();
if (pNameLocList)
{
ParNameLoc * pNameLoc
= pNameLocList->getNameLocPtr(getRoutineName().getNamePosition());
CMPASSERT(pNameLoc);
pNameLoc->setExpandedName(getRoutineName().getQualifiedNameAsAnsiString());
}
}
// Cannot support result sets or out params when
// SP is invoked within a trigger
if ( bindWA->isInTrigger () &&
getNARoutine()->hasOutParams ())
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTPARAM_IN_TRIGGER)
<< DgTableName (getRoutineName().getQualifiedNameAsString());
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
if ( bindWA->isInTrigger () &&
getNARoutine()->hasResultSets ())
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_RESULTSETS_IN_TRIGGER)
<< DgTableName (getRoutineName().getQualifiedNameAsString());
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
const NAColumnArray ¶ms = getNARoutine()->getParams ();
CollIndex i = 0;
CollIndex numParams = getNARoutine()->getParamCount ();
CollIndex numSuppliedParams = countSuppliedParams (getRWProcAllParamsTree());
if (numSuppliedParams != numParams)
{
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_INCORRECT_PARAM_COUNT)
<< DgTableName(getRoutineName().getQualifiedNameAsString())
<< DgInt0((Lng32) numParams)
<< DgInt1((Lng32) numSuppliedParams);
bindWA->setErrStatus ();
bindWA->setBindingCall (FALSE);
return this;
}
short numResultSets = (short) getNARoutine()->getMaxResults();
bindWA->setMaxResultSets(numResultSets);
// On to the binding
// Invoke populateAndBindItemExpr, set up needed data structures
// Set up a RETDesc if we don't already have one.
RETDesc *resultTable = getRETDesc();
if (resultTable == NULL)
{
resultTable = new (bindWA->wHeap()) RETDesc(bindWA);
setRETDesc(resultTable);
}
populateAndBindItemExpr ( getRWProcAllParamsTree(),
bindWA );
if ( bindWA->errStatus ())
{
bindWA->setBindingCall (FALSE);
return this;
}
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
// Now fix the param index value of the dynamic params or host vars
LIST (ItemExpr *) &bWA_HVorDPs = bindWA->getSpHVDPs();
CollIndex numHVorDPs = bWA_HVorDPs.entries();
ARRAY(ItemExpr *) local_HVorDPs(numHVorDPs);
CollIndex idx, idx1, idx2;
// Sort the ItemExpr in the order they appeared in the stmt
for (idx = 0; idx < numHVorDPs; idx++)
{
// Copy ItemExpr ptrs to a sorted Array.
local_HVorDPs.insertAt(bWA_HVorDPs[idx]->getHVorDPIndex() - 1,
bWA_HVorDPs[idx]);
}
// The following code goes through the list of Exprs and
// sets index values. The rules are:
// 1. When a DP or HV is repeated, all of them get the same
// index value which is equal to the index of the first occurrence
// 2. Two DPs or HVs are same if their names and the modes are same.
Int32 currParamIndex = 1;
for (idx1 = 0; idx1 < numHVorDPs; idx1++)
{
ItemExpr *src = local_HVorDPs[idx1];
const NAString &name1 = (src->getOperatorType() == ITM_HOSTVAR) ?
((HostVar *)src)->getName() : ((DynamicParam *)src)->getName();
ComColumnDirection mode1 = src->getParamMode();
NABoolean encounteredElement = FALSE;
for (idx2 = idx1; idx2 < numHVorDPs; idx2++)
{
ItemExpr *dest = local_HVorDPs[idx2];
if (!encounteredElement && dest->getHVorDPIndex() >= currParamIndex)
{
// The parameter is encountered the first time
encounteredElement = TRUE;
dest->setPMOrdPosAndIndex(dest->getParamMode(),
dest->getOrdinalPosition(),
currParamIndex);
continue;
}
// The parameter is already corrected
if (dest->getHVorDPIndex() < currParamIndex)
continue;
const NAString &name2 = (dest->getOperatorType() == ITM_HOSTVAR) ?
((HostVar *)dest)->getName() : ((DynamicParam *)dest)->getName();
ComColumnDirection mode2 = dest->getParamMode();
if (name2.compareTo("") == 0)
continue;
if (name1.compareTo(name2) == 0 && mode1 == mode2)
{
dest->setPMOrdPosAndIndex(dest->getParamMode(),
dest->getOrdinalPosition(),
currParamIndex);
}
}
if (encounteredElement)
currParamIndex++;
}
// Restore the bindWA's HVorDP list since it might be needed
// while binding the root node in case of HVs.
bindWA->clearHVorDPinSPDups();
for (idx = 0; idx < numHVorDPs; idx++)
bindWA->addHVorDPToSPDups(local_HVorDPs[idx]);
// Create a tuple child for any subqueries or UDF inputs
// The hasSubquery() / hasUDF() flag gets set in setInOrOutParam if any of our
// passed in parameters is a subquery.
if ((getProcInputParamsVids().entries() != 0) &&
(hasSubquery() || hasUDF()))
{
Tuple *inTuple = new (bindWA->wHeap())
Tuple(getProcInputParamsVids().rebuildExprTree(ITM_ITEM_LIST),
bindWA->wHeap());
if ( inTuple )
{
// Now set and bind the Tuple child
setChild (0, inTuple);
// Bind this Tuple child
inTuple->bindNode (bindWA);
if ( bindWA->errStatus ())
{
bindWA->setBindingCall (FALSE);
return this;
}
// Get each IN entry from the Tuple and put it in
//the super's list
// Need to clear the list to avoid duplicates
getProcInputParamsVids().clear();
// Now reinitialize the inputs based on the Tuple processing.
inTuple->getRETDesc ()->getValueIdList (getProcInputParamsVids());
} // if inTuple
else
{
// Out of memory ...
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus();
return this;
}
} // if getProcInputParamVids().entries()
else
{
// If we dont have a subquery parameter, we dont need to go thru
// Optimization time rules and transformations, hence mark this
// as a physical node.
isPhysical_ = TRUE;
}
//
// Not sure whether we need to set the currently scoped RETDesc
// before binding the base class. Tuple::bindNode() does not do it
// so we won't either (for now)
//
//bindWA->getCurrentScope()->setRETDesc(getRETDesc());
// add the routine to the UdrStoiList. The UdrStoi list is used
// to check valid privileges
LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList ();
ULng32 numUdrs = udrList.entries();
NABoolean udrReferenced = FALSE;
// See if UDR already exists
for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++)
{
if ( 0 ==
udrList[stoiIndex]->getUdrName().compareTo(
getRoutineName().getQualifiedNameAsAnsiString()
)
)
{
udrReferenced = TRUE;
break;
}
}
// UDR has not been defined, go ahead an add one
if ( FALSE == udrReferenced )
{
SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo ();
udrStoi->setAnsiName ( convertNAString(
getRoutineName().getQualifiedNameAsAnsiString(),
bindWA->wHeap ())
);
OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ())
OptUdrOpenInfo( udrStoi
, getRoutineName().getQualifiedNameAsAnsiString()
, (NARoutine *)getNARoutine()
);
bindWA->getUdrStoiList().insert(udrOpenInfo);
}
//
// Bind the base class
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
{
bindWA->setBindingCall (FALSE);
return boundExpr;
}
// Our characteristic inputs get set for us, we don't need to do it
// ourselves, however, we need to set our characteristic outputs
getGroupAttr()->addCharacteristicOutputs(getProcOutputParamsVids());
if (getNARoutine()->isProcedure())
bindWA->setHasCallStmts(TRUE);
bindWA->setBindingCall (FALSE);
return boundExpr;
} // CallSP::bindNode()
// This is the main entry point to walking the ItemExpr tree built by the
// parser, separating the IN and OUT parameters, setting appropriate
// characteristics of the IN/OUT parameters and binding them
// Currently only CallSP uses this code. If this routine needs to be shared
void IsolatedNonTableUDR::populateAndBindItemExpr ( ItemExpr *param,
BindWA *bindWA )
{
// This method is called recursively
CollIndex numParams = getEffectiveNARoutine()->getParamCount ();
CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition ();
// No parameters, or we are done with the leaf node
if ( NULL == param )
{
return;
}
ComColumnDirection mode =
getEffectiveNARoutine()->getParams()[ordinalPosition-1]->getColumnMode ();
// This is the structure of the ItemExpr tree
// For 1 param
// ItemExpr
//
// 2 params
// ItemList
// / \
// Param1 Param2
//
// > 2 params
// ItemList
// / \
// Param1 ItemList
// / \
// Param2 ItemList
// ... ...
// ... ...
// / / \
// Param (N-2) / \
// / \
// Param(N-1) Param(N)
if ( ITM_ITEM_LIST == param->getOperatorType ())
{
// Use left child
CMPASSERT ((ItemExpr *) NULL != (*param).child (0));
populateAndBindItemExpr ( (*param).child(0),
bindWA );
if ( bindWA->errStatus ())
return;
// Now for the right child
CMPASSERT ((ItemExpr *) NULL != (*param).child (1));
populateAndBindItemExpr ( (*param).child(1),
bindWA );
return;
} // if ITM_ITEM_LIST == param->getOperatorType ()
// For all leaf nodes we must come here (see the recursive call to
// populateAndBindItemExp above)
// Set the bindWA's current ordinal position and parameter mode
// Let HV and DynamicParam's bindNode take care of the
// settings. To ensure this, do a bindNode here
bindWA->setCurrParamMode (mode);
param->bindNode (bindWA);
if (bindWA->errStatus ())
return;
// Add the IN or OUT params to their respective lists
// and also create and bind a new ItemExpr for INOUT and OUT
// params.
// Also bump up the ordinalPosition count since we are done with this
// parameter.
setInOrOutParam (param,/* ordinalPosition,*/ mode, bindWA);
if ( bindWA->errStatus ())
return;
bindWA->setCurrOrdinalPosition (bindWA->getCurrOrdinalPosition () + 1);
} // PopulateAndBindItemExpr
// LCOV_EXCL_START - rfi
void
IsolatedNonTableUDR::setInOrOutParam (ItemExpr *expr,
ComColumnDirection paramMode,
BindWA *bindWA)
{
// Should not get here..
CMPASSERT(FALSE);
}
// LCOV_EXCL_STOP
// This method separates the IN and OUT parameters Each IN/INOUT param
// is cast to the formal type (from NARoutine). This Cast'ed item expr
// is added to an ItemList tree to be passed to the Tuple ()
// constructor. For each OUT/INOUT, we create a NATypeToItem
// ItemExpr, bind it and add it to procOutParams_.
//
// This method is called once for each CALL statement argument. If an
// input argument to a CALL is an expression tree such as "? + ?" or
// "abs(:x)" then this method is called once for the entire tree.
//
// Side Effects: OUT: hasSubquery_
// neededValueIds_
// procAllParamsVids_
// procInputParamsVids_
// procOutputParamsVids_
void CallSP::setInOrOutParam ( ItemExpr *expr,
ComColumnDirection paramMode,
BindWA *bindWA)
{
// Depending on whether this is an IN or OUT parameter, we need to
// take different actions.
// For an IN (and INOUT) param, do the following
// Cast the parameter to its formal type and add it to the list of
// IN params. This will be used later to create a Tuple child and
// be bound by the Tuple itself
CollIndex ordinalPosition = bindWA->getCurrOrdinalPosition ();
const NAColumnArray &formalParams = getNARoutine()->getParams();
NAColumn &naColumn = *(formalParams[ordinalPosition-1]);
const NAType ¶mType = *(naColumn.getType());
// Don't really want to bind this, but how else can we
// get the ItemExpr's type
ItemExpr *boundExpr = expr->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
//10-061031-0188-Begin
//Need to infer charset for string literals part of CALLSP
//parameters
ValueId inputTypeId = boundExpr->getValueId();
if(inputTypeId.getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
const CharType* stringLiteral = (CharType*)&(inputTypeId.getType());
if(CmpCommon::wantCharSetInference())
{
const CharType* desiredType =
CharType::findPushDownCharType(((CharType&)paramType).getCharSet(), stringLiteral, 0);
if ( desiredType )
inputTypeId.coerceType((NAType&)*desiredType, NA_CHARACTER_TYPE);
}
}
NABoolean throwInTranslateNode = FALSE;
CharInfo::CharSet paramCS = CharInfo::UnknownCharSet;
CharInfo::CharSet inputCS = CharInfo::UnknownCharSet;
const NABoolean isJdbc =
(CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE);
const NABoolean isOdbc =
(CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE);
const NAType &inputType = inputTypeId.getType();
//10-061031-0188-End
if ( COM_INPUT_COLUMN == paramMode ||
COM_INOUT_COLUMN == paramMode )
{
// If this input argument to the CALL is a single dynamic param
// then we want to record the formal parameter name. It will later
// be written into the query plan by the code generator and
// eventually if this CALL statement is DESCRIBEd, the formal
// param name gets returned in the SQLDESC_NAME descriptor entry.
if (expr->getOperatorType() == ITM_DYN_PARAM)
{
DynamicParam *dp = (DynamicParam *) expr;
dp->setUdrFormalParamName(naColumn.getColName());
}
// Check to see if we have a Subquery as an input
if ( !hasSubquery() )
hasSubquery() = expr->containsSubquery ();
// Check to see if we have a UDF as an input
if ( !hasUDF() )
hasUDF() = (expr->containsUDF () != NULL);
// Do type checking,
// If it is not a compatible type report an error
if (!( NA_UNKNOWN_TYPE == inputType.getTypeQualifier () ||
paramType.isCompatible(inputType) ||
expr->getOperatorType () == ITM_DYN_PARAM
)
)
{
if ( inputType.getTypeQualifier() == NA_CHARACTER_TYPE )
{
paramCS = ((CharType&)paramType).getCharSet();
inputCS = ((CharType&)inputType).getCharSet();
NABoolean CS_unknown = (paramCS == CharInfo::UnknownCharSet) ||
(inputCS == CharInfo::UnknownCharSet) ;
if ( paramType.NAType::isCompatible(inputType) &&
paramCS != inputCS &&
CS_unknown == FALSE &&
CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON
)
throwInTranslateNode = TRUE;
}
if ( throwInTranslateNode == FALSE )
{
// Error, data types dont match
#pragma nowarn(1506) // warning elimination
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_PARAM_TYPE_MISMATCH)
<< DgInt0 (ordinalPosition)
<< DgTableName(getRoutineName().getQualifiedNameAsString())
<< DgString0 (inputType.getTypeSQLname (TRUE))
<< DgString1 (paramType.getTypeSQLname (TRUE));
#pragma warn(1506) // warning elimination
bindWA->setErrStatus ();
return;
}
} // if NOT isCompatible
// Create a Cast node if the types are not identical
if (! (inputType == paramType))
{
// First create a Translate node if the character sets are not identical
if ( throwInTranslateNode )
{
Int32 tran_type = find_translate_type( inputCS, paramCS );
ItemExpr * newTranslateChild =
new (bindWA->wHeap()) Translate(boundExpr, tran_type );
boundExpr = newTranslateChild->bindNode(bindWA);
if (bindWA->errStatus())
return;
// NOTE: Leave "expr" at it's old value as code below needs to check
// that original ItemExpr rather than the new Translate node.
}
Cast *retExpr = new (bindWA->wHeap())
Cast(boundExpr, ¶mType, ITM_CAST, TRUE);
boundExpr = retExpr->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
}
// Fill the ValueIdList for all the params
getProcAllParamsVids().insert( boundExpr->getValueId());
// Fill the ValueIdList for Input params
getProcInputParamsVids().insert( boundExpr->getValueId());
} // if INPUT or INOUT
// For OUT (and INOUT) parameters, we create a NATypeToItem object,
// bind it and add it to the list of OUT parameters (procOutParams_)
if ( COM_OUTPUT_COLUMN == paramMode ||
COM_INOUT_COLUMN == paramMode )
{
if (!( ITM_HOSTVAR == expr->getOperatorType () ||
ITM_DYN_PARAM == expr->getOperatorType ()))
{
#pragma nowarn(1506) // warning elimination
*CmpCommon::diags() << DgSqlCode(-UDR_BINDER_OUTVAR_NOT_HV_OR_DP)
<< DgInt0(ordinalPosition)
<< DgTableName(getRoutineName().getQualifiedNameAsString());
#pragma warn(1506) // warning elimination
bindWA->setErrStatus ();
return;
} // if NOT HOSTVAR or DYNAMIC PARAM
NATypeToItem *paramTypeItem = new (bindWA->wHeap())
NATypeToItem (naColumn.mutateType());
ItemExpr *outputExprToBind = NULL;
outputExprToBind = paramTypeItem->bindNode (bindWA);
if ( bindWA->errStatus ())
{
return;
}
// Fill the ValueIdList for all the params
getProcAllParamsVids().insert( outputExprToBind->getValueId());
// Fill the ValueIdList for the output params
addProcOutputParamsVid(outputExprToBind->getValueId ());
//
// Populate our RETDesc
//
// It has already been alocated
RETDesc *resultTable = getRETDesc();
const NAString &formalParamName = naColumn.getColName();
const NAString *colParamName = &formalParamName;
// Set the userParamName
const NAString &userParamName =
// cannot use the boundExpr here as it will be a cast()
// for the HostVar or DynamicParam. Use the original
// ItemExpr pointer instead.
(ITM_HOSTVAR == expr->getOperatorType()) ?
((HostVar *)expr)->getName() :
((DynamicParam *)expr)->getName();
// Typically the name for this output column will be the formal
// parameter name. Exceptions:
// - No formal name was specified in the CREATE PROCEDURE. Use
// the (possibly empty) dynamic parameter or host variable name
// instead.
// - This is a JDBC or ODBC compile and the client is using a
// named host variable or dynamic parameter. JDBC and ODBC want
// us to use the client's name in this case.
if (formalParamName.isNull() ||
(!userParamName.isNull() && (isJdbc || isOdbc)))
{
colParamName = &userParamName;
}
ColRefName *columnName =
new (bindWA->wHeap())
ColRefName(*colParamName, bindWA->wHeap());
resultTable->addColumn(bindWA, *columnName, outputExprToBind->getValueId());
//
// We need the following line for static cursor declaration,
// according to a comment in bindRowValues()
//
cmpCurrentContext->saveRetrievedCols_ = resultTable->getDegree();
} // if OUTPUT or INOUT
} // setInOrOutParam
CollIndex RelRoutine::countSuppliedParams (ItemExpr *tree) const
{
CollIndex numParams=0;
if ( !tree ) return 0;
if (ITM_ITEM_LIST == tree->getOperatorType ())
{
numParams += countSuppliedParams (tree->child (0));
numParams += countSuppliedParams (tree->child (1));
}
else
numParams++;
return numParams;
} // RelRoutine::countSuppliedParams
void RelRoutine::gatherParamValueIds (const ItemExpr *tree, ValueIdList ¶msList) const
{
if ( !tree ) return;
if (ITM_ITEM_LIST == tree->getOperatorType ())
{
gatherParamValueIds (tree->child (0), paramsList);
gatherParamValueIds (tree->child (1), paramsList);
}
else
paramsList.insert(tree->getValueId());
} // RelRoutine::gatherParamValueIds
void ProxyFunc::createProxyFuncTableDesc(BindWA *bindWA, CorrName &corrName)
{
// Map column definitions into a desc_struct
desc_struct *tableDesc = createVirtualTableDesc();
// Map the desc_struct into an NATable. This will also add an
// NATable entry into the bindWA's NATableDB.
NATable *naTable =
bindWA->getNATable(corrName, FALSE /*catmanUsages*/, tableDesc);
if (bindWA->errStatus())
return;
// Allocate a TableDesc and attach it to this RelExpr instance
setTableDesc(bindWA->createTableDesc(naTable, corrName));
if (bindWA->errStatus())
return;
// Allocate a RETDesc and attach it to this and the BindScope
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA, getTableDesc()));
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
}
RelExpr *ProxyFunc::bindNode(BindWA *bindWA)
{
// This method now serves as a common bind node for SPProxy and
// ExtractSource classes, where we before had SPProxyFunc::bindNode()
// and ExtractSource::bindNode().
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Bind the child nodes
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Declare a correlation name that is unique within this query
switch (getOperatorType())
{
case REL_EXTRACT_SOURCE:
virtualTableName_ = "EXTRACT_SOURCE_";
break;
case REL_SP_PROXY:
virtualTableName_ = "SP_RESULT_SET_";
break;
default:
CMPASSERT(0);
break;
}
virtualTableName_ += bindWA->fabricateUniqueName();
CorrName corrName(getVirtualTableName());
corrName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
createProxyFuncTableDesc(bindWA, corrName);
if (bindWA->errStatus())
return this;
// Bind the base class
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return boundExpr;
// Assign the set of columns that belong to the virtual table
// as the output values that can be produced by this node.
getGroupAttr()->addCharacteristicOutputs(getTableDesc()->getColumnList());
return boundExpr;
} // ProxyFunc::bindNode()
RelExpr *TableMappingUDF::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// Create NARoutine object (no caching for TMUDF)
NARoutine *tmudfRoutine =NULL;
CorrName& tmfuncName = getUserTableName();
tmfuncName.setSpecialType(ExtendedQualName::VIRTUAL_TABLE);
QualifiedName name = getRoutineName();
const SchemaName &defaultSchema =
bindWA->getSchemaDB ()->getDefaultSchema();
name.applyDefaults(defaultSchema);
setRoutineName(name);
// Return an error if an unsupported catalog is being used.
if ((NOT name.isSeabase()) && (NOT name.isHive()))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(name.getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
Lng32 diagsMark = CmpCommon::diags()->mark();
NABoolean errStatus = bindWA->errStatus();
tmudfRoutine = getRoutineMetadata(name, tmfuncName, bindWA);
if (tmudfRoutine == NULL)
{
// this could be a predefined TMUDF, which is not
// recorded in the metadata at this time
OperatorTypeEnum opType =
PredefinedTableMappingFunction::nameIsAPredefinedTMF(tmfuncName);
if (opType != REL_TABLE_MAPPING_UDF)
{
// yes, this is a predefined TMUDF
PredefinedTableMappingFunction *result;
// discard the errors from the failed name lookup
CmpCommon::diags()->rewind(diagsMark);
if (!errStatus)
bindWA->resetErrStatus();
// create a new RelExpr
result = new(bindWA->wHeap())
PredefinedTableMappingFunction(
tmfuncName,
const_cast<ItemExpr *>(getProcAllParamsTree()),
opType);
// copy data members of the base classes
TableMappingUDF::copyTopNode(result);
// set children
result->setArity(getArity());
for (int i=0; i<getArity(); i++)
result->child(i) = child(i);
// Abandon the current node and return the bound new node.
// Next time it will reach this method it will call an
// overloaded getRoutineMetadata() that will succeed.
return result->bindNode(bindWA);
}
// getRoutineMetadata has already set the diagnostics area
// and set the error status
CMPASSERT(bindWA->errStatus());
return NULL;
}
// Bind the child nodes.
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Use information from child to populate childInfo_
NAHeap *heap = CmpCommon::statementHeap();
for(Int32 i = 0; i < getArity(); i++)
{
NAString childName(heap);
NAColumnArray childColumns(heap) ;
RETDesc *childRetDesc = child(i)->getRETDesc();
// Get Name
LIST(CorrName*) nameList;
childRetDesc->getXTNM().dumpKeys(nameList);
if (nameList.entries() == 1)
{
childName = (nameList[0])->getExposedNameAsString();
}
else
{
childName = "_inputTable" + bindWA->fabricateUniqueName();
}
// ask for histograms of all child outputs, since we don't
// know what the UDF will need and what predicates exist
// on passthru columns of the UDF
bindWA->getCurrentScope()->context()->inWhereClause() = TRUE;
// Get NAColumns
CollIndex numChildCols = childRetDesc->getColumnList()->entries();
for(CollIndex j=0; j < numChildCols; j++)
{
NAColumn * childCol = new (heap) NAColumn(
childRetDesc->getColRefNameObj(j).getColName().data(),
j,
childRetDesc->getType(j).newCopy(heap),
heap);
childColumns.insert(childCol);
bindWA->markAsReferencedColumn(childRetDesc->getValueId(j));
}
bindWA->getCurrentScope()->context()->inWhereClause() = FALSE;
// get child root
CMPASSERT(child(i)->getOperator().match(REL_ROOT) ||
child(i)->getOperator().match(REL_RENAME_TABLE));
RelRoot * myChild;
if (child(i)->getOperator().match(REL_RENAME_TABLE))
myChild = (RelRoot *) (child(i)->child(0).getPtr());
else
myChild = (RelRoot *) child(i).getPtr();
// output vidList from child RetDesc,
// can also get from child Root compExpr
ValueIdList vidList;
childRetDesc->getValueIdList(vidList, USER_COLUMN);
ValueIdSet childPartition(myChild->partitionArrangement());
ValueIdList childOrder(myChild->reqdOrder());
// request multi-column histograms for the PARTITION BY columns
bindWA->getCurrentScope()->context()->inGroupByClause() = TRUE;
// replace 1-based ordinals in the child's partition by / order by with
// actual columns
for (ValueId cp=childPartition.init();
childPartition.next(cp);
childPartition.advance(cp))
{
NABoolean negate;
ConstValue *cv = cp.getItemExpr()->castToConstValue(negate);
if (cv &&
cv->canGetExactNumericValue())
{
Lng32 scale = 0;
Int64 ordinal = cv->getExactNumericValue(scale);
if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries())
{
// remove this ValueId from the set and add the corresponding
// column value. Note that this won't cause problems with the
// iterator through the set, since we don't need to apply
// this conversion on the new element we are inserting
childPartition -= cp;
childPartition += vidList[ordinal-1];
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-11154)
<< DgInt0(ordinal)
<< DgString0("PARTITION BY")
<< DgInt1(vidList.entries());
bindWA->setErrStatus();
return NULL;
}
}
bindWA->markAsReferencedColumn(cp);
}
bindWA->getCurrentScope()->context()->inGroupByClause() = FALSE;
for (CollIndex co=0; co<childOrder.entries(); co++)
{
NABoolean negate;
ItemExpr *ie = childOrder[co].getItemExpr();
ConstValue *cv = NULL;
if (ie->getOperatorType() == ITM_INVERSE)
ie = ie->child(0);
cv = ie->castToConstValue(negate);
if (cv &&
cv->canGetExactNumericValue())
{
Lng32 scale = 0;
Int64 ordinal = cv->getExactNumericValue(scale);
// replace the const value with the actual column
if (!negate && scale == 0 && ordinal >= 1 && ordinal <= vidList.entries())
if (ie == childOrder[co].getItemExpr())
{
// ascending order
childOrder[co] = vidList[ordinal-1];
}
else
{
// desc order, need to add an InverseOrder on top
ItemExpr *inv = new(bindWA->wHeap()) InverseOrder(
vidList[ordinal-1].getItemExpr());
inv->synthTypeAndValueId();
childOrder[co] = inv->getValueId();
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-11154)
<< DgInt0(ordinal)
<< DgString0("ORDER BY")
<< DgInt1(vidList.entries());
bindWA->setErrStatus();
return NULL;
}
}
}
TableMappingUDFChildInfo * cInfo = new (heap) TableMappingUDFChildInfo(
childName,
childColumns,
myChild->getPartReqType(),
childPartition,
childOrder,
vidList);
childInfo_.insert(cInfo);
}
RoutineDesc *tmudfRoutineDesc = new (bindWA->wHeap()) RoutineDesc(bindWA, tmudfRoutine);
if (tmudfRoutineDesc == NULL || bindWA->errStatus ())
{
// Error
bindWA->setBindingCall (FALSE);
bindWA->setErrStatus ();
return this;
}
setRoutineDesc(tmudfRoutineDesc);
// xcnm will be empty because the routineDesc does not contain any
// output columns yet
RETDesc *rDesc = new (bindWA->wHeap()) RETDesc(bindWA, tmudfRoutineDesc);
bindWA->getCurrentScope()->setRETDesc(rDesc);
setRETDesc(rDesc);
dllInteraction_ = new (bindWA->wHeap()) TMUDFDllInteraction();
// ValueIDList of the actual input parameters
// (tmudfRoutine has formal parameters)
if (getProcAllParamsTree() && (getProcAllParamsVids().isEmpty() == TRUE))
{
((ItemExpr *)getProcAllParamsTree())->convertToValueIdList(
getProcAllParamsVids(), bindWA, ITM_ITEM_LIST);
if (bindWA->errStatus()) return NULL;
// Clear the Tree since we now have gotten vids for all the parameters.
setProcAllParamsTree(NULL);
}
getProcInputParamsVids().insert(getProcAllParamsVids());
// invoke the optional UDF compiler interface or a default
// implementation to validate scalar inputs and produce a list of
// output columns
NABoolean status = dllInteraction_->describeParamsAndMaxOutputs(this, bindWA);
if (!status)
{
bindWA->setErrStatus();
return NULL;
}
checkAndCoerceScalarInputParamTypes(bindWA);
if (bindWA->errStatus())
return NULL;
createOutputVids(bindWA);
if (bindWA->errStatus())
return NULL;
// create a ValueIdMap that allows us to translate
// output columns that are passed through back to
// input columns (outputs of the child), this can
// be used to push down predicates, translate
// required order and partitioning, etc.
status = dllInteraction_->createOutputInputColumnMap(
this,
udfOutputToChildInputMap_);
if (!status)
{
bindWA->setErrStatus();
return NULL;
}
// if this is a maintenance-type operation that must run on
// all nodes of the cluster or must run in parallel, regardless
// of the ATTEMPT_ESP_PARALLELISM CQD, then set a flag in the
// root node
if (getOperatorType() == REL_TABLE_MAPPING_BUILTIN_LOG_READER)
bindWA->getTopRoot()->setMustUseESPs(TRUE);
// add the routine to the UdrStoiList. The UdrStoi list is used
// to check valid privileges
LIST(OptUdrOpenInfo *) udrList = bindWA->getUdrStoiList ();
ULng32 numUdrs = udrList.entries();
NABoolean udrReferenced = FALSE;
// See if UDR already exists
for (ULng32 stoiIndex = 0; stoiIndex < numUdrs; stoiIndex++)
{
if ( 0 ==
udrList[stoiIndex]->getUdrName().compareTo(
getRoutineName().getQualifiedNameAsAnsiString()
)
)
{
udrReferenced = TRUE;
break;
}
}
// UDR has not been defined, go ahead an add one
if ( FALSE == udrReferenced )
{
SqlTableOpenInfo *udrStoi = new (bindWA->wHeap ())SqlTableOpenInfo ();
udrStoi->setAnsiName ( convertNAString(
getRoutineName().getQualifiedNameAsAnsiString(),
bindWA->wHeap ())
);
OptUdrOpenInfo *udrOpenInfo = new (bindWA->wHeap ())
OptUdrOpenInfo( udrStoi
, getRoutineName().getQualifiedNameAsAnsiString()
, (NARoutine *)getNARoutine()
);
bindWA->getUdrStoiList().insert(udrOpenInfo);
}
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return NULL;
return boundExpr;
}
RelExpr * FastExtract::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
// check validity of target location
if (getTargetType() == FILE)
{
char reasonMsg[256];
NABoolean raiseError = FALSE;
if ((unsigned char)(getTargetName().data()[0]) != SLASH_C)
{
raiseError = TRUE;
sprintf(reasonMsg,"Relative path name was used");
}
else if (getTargetName().length() > 512)
{
raiseError = TRUE;
sprintf(reasonMsg,"Length exceeds 512 characters");
}
else
{
char * sqroot = getenv("MY_SQROOT");
if (sqroot && (! CmpCommon::context()->getSqlmxRegress()) &&
(strncmp(sqroot, getTargetName().data(),strlen(sqroot)) == 0))
{
raiseError = TRUE;
sprintf(reasonMsg,"Database system directory was used");
}
}
if (raiseError && strncmp(getTargetName().data(),"hdfs://",7) != 0 )
{
*CmpCommon::diags() << DgSqlCode(-4378) << DgString0(reasonMsg) ;
bindWA->setErrStatus();
return NULL;
}
}
if (getDelimiter().length() == 0)
{
delimiter_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_DELIMITER);
}
// if inserting into a hive table and an explicit null string was
// not specified in the unload command, and the target table has a user
// specified null format string, then use it.
if ((isHiveInsert()) &&
(hiveTableDesc_ && hiveTableDesc_->getNATable() &&
hiveTableDesc_->getNATable()->getClusteringIndex()) &&
(NOT nullStringSpec_))
{
const HHDFSTableStats* hTabStats =
hiveTableDesc_->getNATable()->getClusteringIndex()->getHHDFSTableStats();
if (hTabStats->getNullFormat())
{
nullString_ = hTabStats->getNullFormat();
nullStringSpec_ = TRUE;
}
}
// if an explicit or user specified null format was not used, then
// use the default null string.
if (NOT nullStringSpec_)
{
nullString_ = HIVE_DEFAULT_NULL_STRING;
}
if (getRecordSeparator().length() == 0)
{
recordSeparator_ = ActiveSchemaDB()->getDefaults().getValue(TRAF_UNLOAD_DEF_RECORD_SEPARATOR);
}
if (!isHiveInsert())
{
bindWA->setIsFastExtract();
}
// Bind the child nodes.
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// Use information from child to populate childInfo_
NAHeap *heap = CmpCommon::statementHeap();
RETDesc *childRETDesc = child(0)->getRETDesc();
// output vidList from child RetDesc,
// can also get from child Root compExpr
ValueIdList vidList;
childRETDesc->getValueIdList(vidList, USER_COLUMN);
if (isHiveInsert())
{
// validate number of columns and column types of the select list
ValueIdList tgtCols;
hiveTableDesc_->getUserColumnList(tgtCols);
if (vidList.entries() != tgtCols.entries())
{
// 4023 degree of row value constructor must equal that of target table
*CmpCommon::diags() << DgSqlCode(-4023)
<< DgInt0(vidList.entries())
<< DgInt1(tgtCols.entries());
bindWA->setErrStatus();
return NULL;
}
// Check that the source and target types are compatible.
for (CollIndex j=0; j<vidList.entries(); j++)
{
Assign *tmpAssign = new(bindWA->wHeap())
Assign(tgtCols[j].getItemExpr(), vidList[j].getItemExpr());
if ( CmpCommon::getDefault(ALLOW_IMPLICIT_CHAR_CASTING) == DF_ON )
tmpAssign->tryToDoImplicitCasting(bindWA);
const NAType *targetType = tmpAssign->synthesizeType();
if (!targetType) {
bindWA->setErrStatus();
return NULL;
}
}
}
setSelectList(vidList);
if (includeHeader())
{
const ColumnDescList &columnsRET = *(childRETDesc->getColumnList());
for (CollIndex i = 0; i < columnsRET.entries(); i++)
{
if (columnsRET[i]->getHeading())
header_ += columnsRET[i]->getHeading();
else if (!(columnsRET[i]->getColRefNameObj().isEmpty()))
header_ += columnsRET[i]->getColRefNameObj().getColName();
else
header_ += "EXPR";
if (i < (columnsRET.entries() -1))
{
header_ += " ";
header_ += delimiter_;
header_ += " ";
}
}
}
else
{
header_ = "NO_HEADER" ;
}
// no rows are returned from this operator.
// Allocate an empty RETDesc and attach it to this and the BindScope.
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus()) return NULL;
return boundExpr;
}
RelExpr * ControlRunningQuery::bindNode(BindWA *bindWA)
{
if (nodeIsBound()) {
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//
// Check to see if user is authorized to control this query.
//
if (!isUserAuthorized(bindWA))
return NULL;
//
// Bind the child nodes.
//
bindChildren(bindWA);
if (bindWA->errStatus())
return this;
// no rows are returned from this operator.
// Allocate an empty RETDesc and attach it to this and the BindScope.
//
setRETDesc(new (bindWA->wHeap()) RETDesc(bindWA));
//
// Bind the base class.
//
RelExpr *boundExpr = bindSelf(bindWA);
if (bindWA->errStatus())
return boundExpr;
ValueIdSet ov;
getPotentialOutputValues(ov);
getGroupAttr()->addCharacteristicOutputs(ov);
return boundExpr;
} // ControlRunningQuery::bindNode()
bool ControlRunningQuery::isUserAuthorized(BindWA *bindWA)
{
bool userHasPriv = false;
Int32 sessionID = ComUser::getSessionUser();
// Check to see if the current user owns the query id.
// This only has to be done for the Cancel query request.
// This option to check privilege is not available unless
// the query Id was supplied.
if ((action_ == Cancel) &&
(qs_ == ControlQid))
{
// The user ID associated with the query is stored in the QID.
// To be safe, copy the QID to a character string.
Int32 qidLen = queryId_.length();
char *pQid = new (bindWA->wHeap()) char[qidLen+1];
str_cpy_all(pQid, queryId_.data(), qidLen);
pQid[qidLen] = '\0';
// Set up the returned parameters
// Max username can be (128 * 2) + 2 (delimiters) + 1 (null indicator)
char username[2 * MAX_USERNAME_LEN + 2 + 1];
Int64 usernameLen = sizeof(username) - 1;
// Call function to extract the username from the QID
Int32 retcode = ComSqlId::getSqlQueryIdAttr(ComSqlId::SQLQUERYID_USERNAME,
pQid,
qidLen,
usernameLen,
&username[0]);
if (retcode == 0)
{
// The username stored in the QID is actually the userID preceeded with
// a "U". Check for a U and convert the succeeding characters
// to integer. This integer value is compared against the current userID.
username[usernameLen] = '\0';
if (username[0] == 'U')
{
Int64 userID = str_atoi(&username[1],usernameLen - 1);
if (sessionID == userID || sessionID == ComUser::getRootUserID())
userHasPriv = true;
}
// If userName does not begin with a 'U', ignore and continue
}
// If retcode != 0, continue, an invalid QID could be specified which
// is checked later in the code
}
// The current user does not own the query, see if the current user has
// the correct QUERY privilege. Code above only supports cancel, but other
// checks could be added. Component checks for all query operations.
if (!userHasPriv)
{
SQLOperation operation;
switch (ControlRunningQuery::action_)
{
case ControlRunningQuery::Suspend:
operation = SQLOperation::QUERY_SUSPEND;
break;
case ControlRunningQuery::Activate:
operation = SQLOperation::QUERY_ACTIVATE;
break;
case ControlRunningQuery::Cancel:
operation = SQLOperation::QUERY_CANCEL;
break;
default:
operation = SQLOperation::UNKNOWN;
}
NAString privMDLoc = CmpSeabaseDDL::getSystemCatalogStatic();
privMDLoc += ".\"";
privMDLoc += SEABASE_PRIVMGR_SCHEMA;
privMDLoc += "\"";
PrivMgrComponentPrivileges componentPriv(
privMDLoc.data(),CmpCommon::diags());
userHasPriv = componentPriv.hasSQLPriv(sessionID,operation,true);
if (!userHasPriv)
{
// ANSI requests a special SqlState for cancel requests
if (ControlRunningQuery::action_ == ControlRunningQuery::Cancel)
*CmpCommon::diags() << DgSqlCode(-8029);
else
*CmpCommon::diags() << DgSqlCode(-1017);
bindWA->setErrStatus();
}
if (bindWA->errStatus())
return false;
}
return true;
}// ControlRunningQuery::isUserAuthorized()
RelExpr * OSIMControl::bindNode(BindWA *bindWA)
{
if (nodeIsBound())
{
bindWA->getCurrentScope()->setRETDesc(getRETDesc());
return this;
}
//Create OptimizerSimulator if this is called first time.
if(!CURRCONTEXT_OPTSIMULATOR)
CURRCONTEXT_OPTSIMULATOR = new(CTXTHEAP) OptimizerSimulator(CTXTHEAP);
//in respond to force option of osim load,
//e.g. osim load from '/xxx/xxx/osim-dir', force
//if true, when loading osim tables/views/indexes
//existing objects with same qualified name
//will be droped first
CURRCONTEXT_OPTSIMULATOR->setForceLoad(isForceLoad());
//Set OSIM mode
if(!CURRCONTEXT_OPTSIMULATOR->setOsimModeAndLogDir(targetMode_, osimLocalDir_.data()))
{
bindWA->setErrStatus();
return this;
}
return ControlAbstractClass::bindNode(bindWA);
}
| 1 | 13,151 | Here are two things I wonder: First, would it make sense to replace the TRUE here with "leftExpr->getValueId().getType().supportsSQLnull() && rightExpr->getValueId().getType().supportsSQLnull()". I'm not sure we have optimizations elsewhere that set the "special nulls" semantics back to FALSE if one of the operands it not nullable. Second, I wonder whether we have bugs elsewhere in the code that may not check for the "special nulls" semantics. For example, hash and merge joins probably can't use such predicates as equi-join predicates - one of the reasons for the optimization above. When I look at method ItemExpr::isAnEquiJoinPredicate() in core/sql/optimizer/OptItemExpr.cpp, I don't see a check for that. You have an example with a NULL value that is working, so hopefully this is not an issue, but I'm not sure how and why it works. | apache-trafodion | cpp |
@@ -267,7 +267,8 @@ T *LookupTableByName(const SymbolTable<T> &table, const std::string &name,
TD(StringConstant, 257, "string constant") \
TD(IntegerConstant, 258, "integer constant") \
TD(FloatConstant, 259, "float constant") \
- TD(Identifier, 260, "identifier")
+ TD(NumericConstant, 260, "nan, inf or function name (signed)") \
+ TD(Identifier, 261, "identifier")
#ifdef __GNUC__
__extension__ // Stop GCC complaining about trailing comma with -Wpendantic.
#endif | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <list>
#include <string>
#include <utility>
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
namespace flatbuffers {
// Reflects the version at the compiling time of binary(lib/dll/so).
const char *FLATBUFFERS_VERSION() {
// clang-format off
return
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
// clang-format on
}
const double kPi = 3.14159265358979323846;
// clang-format off
const char *const kTypeNames[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, ...) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
nullptr
};
const char kTypeSizes[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
sizeof(CTYPE),
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
// clang-format on
// The enums in the reflection schema should match the ones we use internally.
// Compare the last element to check if these go out of sync.
static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union),
"enums don't match");
// Any parsing calls have to be wrapped in this macro, which automates
// handling of recursive error checking a bit. It will check the received
// CheckedError object, and return straight away on error.
#define ECHECK(call) \
{ \
auto ce = (call); \
if (ce.Check()) return ce; \
}
// These two functions are called hundreds of times below, so define a short
// form:
#define NEXT() ECHECK(Next())
#define EXPECT(tok) ECHECK(Expect(tok))
static bool ValidateUTF8(const std::string &str) {
const char *s = &str[0];
const char *const sEnd = s + str.length();
while (s < sEnd) {
if (FromUTF8(&s) < 0) { return false; }
}
return true;
}
static bool IsLowerSnakeCase(const std::string &str) {
for (size_t i = 0; i < str.length(); i++) {
char c = str[i];
if (!check_ascii_range(c, 'a', 'z') && !is_digit(c) && c != '_') {
return false;
}
}
return true;
}
// Convert an underscore_based_identifier in to camelCase.
// Also uppercases the first character if first is true.
std::string MakeCamel(const std::string &in, bool first) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (!i && first)
s += CharToUpper(in[0]);
else if (in[i] == '_' && i + 1 < in.length())
s += CharToUpper(in[++i]);
else
s += in[i];
}
return s;
}
// Convert an underscore_based_identifier in to screaming snake case.
std::string MakeScreamingCamel(const std::string &in) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (in[i] != '_')
s += CharToUpper(in[i]);
else
s += in[i];
}
return s;
}
void DeserializeDoc(std::vector<std::string> &doc,
const Vector<Offset<String>> *documentation) {
if (documentation == nullptr) return;
for (uoffset_t index = 0; index < documentation->size(); index++)
doc.push_back(documentation->Get(index)->str());
}
void Parser::Message(const std::string &msg) {
if (!error_.empty()) error_ += "\n"; // log all warnings and errors
error_ += file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : "";
// clang-format off
#ifdef _WIN32 // MSVC alike
error_ +=
"(" + NumToString(line_) + ", " + NumToString(CursorPosition()) + ")";
#else // gcc alike
if (file_being_parsed_.length()) error_ += ":";
error_ += NumToString(line_) + ": " + NumToString(CursorPosition());
#endif
// clang-format on
error_ += ": " + msg;
}
void Parser::Warning(const std::string &msg) {
if (!opts.no_warnings) Message("warning: " + msg);
}
CheckedError Parser::Error(const std::string &msg) {
Message("error: " + msg);
return CheckedError(true);
}
inline CheckedError NoError() { return CheckedError(false); }
CheckedError Parser::RecurseError() {
return Error("maximum parsing depth " + NumToString(parse_depth_counter_) +
" reached");
}
class Parser::ParseDepthGuard {
public:
explicit ParseDepthGuard(Parser *parser_not_null)
: parser_(*parser_not_null), caller_depth_(parser_.parse_depth_counter_) {
FLATBUFFERS_ASSERT(caller_depth_ <= (FLATBUFFERS_MAX_PARSING_DEPTH) &&
"Check() must be called to prevent stack overflow");
parser_.parse_depth_counter_ += 1;
}
~ParseDepthGuard() { parser_.parse_depth_counter_ -= 1; }
CheckedError Check() {
return caller_depth_ >= (FLATBUFFERS_MAX_PARSING_DEPTH)
? parser_.RecurseError()
: CheckedError(false);
}
FLATBUFFERS_DELETE_FUNC(ParseDepthGuard(const ParseDepthGuard &));
FLATBUFFERS_DELETE_FUNC(ParseDepthGuard &operator=(const ParseDepthGuard &));
private:
Parser &parser_;
const int caller_depth_;
};
template<typename T> std::string TypeToIntervalString() {
return "[" + NumToString((flatbuffers::numeric_limits<T>::lowest)()) + "; " +
NumToString((flatbuffers::numeric_limits<T>::max)()) + "]";
}
// atot: template version of atoi/atof: convert a string to an instance of T.
template<typename T>
bool atot_scalar(const char *s, T *val, bool_constant<false>) {
return StringToNumber(s, val);
}
template<typename T>
bool atot_scalar(const char *s, T *val, bool_constant<true>) {
// Normalize NaN parsed from fbs or json to unsigned NaN.
if (false == StringToNumber(s, val)) return false;
*val = (*val != *val) ? std::fabs(*val) : *val;
return true;
}
template<typename T> CheckedError atot(const char *s, Parser &parser, T *val) {
auto done = atot_scalar(s, val, bool_constant<is_floating_point<T>::value>());
if (done) return NoError();
if (0 == *val)
return parser.Error("invalid number: \"" + std::string(s) + "\"");
else
return parser.Error("invalid number: \"" + std::string(s) + "\"" +
", constant does not fit " + TypeToIntervalString<T>());
}
template<>
inline CheckedError atot<Offset<void>>(const char *s, Parser &parser,
Offset<void> *val) {
(void)parser;
*val = Offset<void>(atoi(s));
return NoError();
}
std::string Namespace::GetFullyQualifiedName(const std::string &name,
size_t max_components) const {
// Early exit if we don't have a defined namespace.
if (components.empty() || !max_components) { return name; }
std::string stream_str;
for (size_t i = 0; i < std::min(components.size(), max_components); i++) {
stream_str += components[i];
stream_str += '.';
}
if (!stream_str.empty()) stream_str.pop_back();
if (name.length()) {
stream_str += '.';
stream_str += name;
}
return stream_str;
}
template<typename T>
T *LookupTableByName(const SymbolTable<T> &table, const std::string &name,
const Namespace ¤t_namespace, size_t skip_top) {
const auto &components = current_namespace.components;
if (table.dict.empty()) return nullptr;
if (components.size() < skip_top) return nullptr;
const auto N = components.size() - skip_top;
std::string full_name;
for (size_t i = 0; i < N; i++) {
full_name += components[i];
full_name += '.';
}
for (size_t i = N; i > 0; i--) {
full_name += name;
auto obj = table.Lookup(full_name);
if (obj) return obj;
auto len = full_name.size() - components[i - 1].size() - 1 - name.size();
full_name.resize(len);
}
FLATBUFFERS_ASSERT(full_name.empty());
return table.Lookup(name); // lookup in global namespace
}
// Declare tokens we'll use. Single character tokens are represented by their
// ascii character code (e.g. '{'), others above 256.
// clang-format off
#define FLATBUFFERS_GEN_TOKENS(TD) \
TD(Eof, 256, "end of file") \
TD(StringConstant, 257, "string constant") \
TD(IntegerConstant, 258, "integer constant") \
TD(FloatConstant, 259, "float constant") \
TD(Identifier, 260, "identifier")
#ifdef __GNUC__
__extension__ // Stop GCC complaining about trailing comma with -Wpendantic.
#endif
enum {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) kToken ## NAME = VALUE,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
};
static std::string TokenToString(int t) {
static const char * const tokens[] = {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
#define FLATBUFFERS_TD(ENUM, IDLTYPE, ...) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (t < 256) { // A single ascii char token.
std::string s;
s.append(1, static_cast<char>(t));
return s;
} else { // Other tokens.
return tokens[t - 256];
}
}
// clang-format on
std::string Parser::TokenToStringId(int t) const {
return t == kTokenIdentifier ? attribute_ : TokenToString(t);
}
// Parses exactly nibbles worth of hex digits into a number, or error.
CheckedError Parser::ParseHexNum(int nibbles, uint64_t *val) {
FLATBUFFERS_ASSERT(nibbles > 0);
for (int i = 0; i < nibbles; i++)
if (!is_xdigit(cursor_[i]))
return Error("escape code must be followed by " + NumToString(nibbles) +
" hex digits");
std::string target(cursor_, cursor_ + nibbles);
*val = StringToUInt(target.c_str(), 16);
cursor_ += nibbles;
return NoError();
}
CheckedError Parser::SkipByteOrderMark() {
if (static_cast<unsigned char>(*cursor_) != 0xef) return NoError();
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbb)
return Error("invalid utf-8 byte order mark");
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbf)
return Error("invalid utf-8 byte order mark");
cursor_++;
return NoError();
}
static inline bool IsIdentifierStart(char c) {
return is_alpha(c) || (c == '_');
}
CheckedError Parser::Next() {
doc_comment_.clear();
bool seen_newline = cursor_ == source_;
attribute_.clear();
attr_is_trivial_ascii_string_ = true;
for (;;) {
char c = *cursor_++;
token_ = c;
switch (c) {
case '\0':
cursor_--;
token_ = kTokenEof;
return NoError();
case ' ':
case '\r':
case '\t': break;
case '\n':
MarkNewLine();
seen_newline = true;
break;
case '{':
case '}':
case '(':
case ')':
case '[':
case ']':
case ',':
case ':':
case ';':
case '=': return NoError();
case '\"':
case '\'': {
int unicode_high_surrogate = -1;
while (*cursor_ != c) {
if (*cursor_ < ' ' && static_cast<signed char>(*cursor_) >= 0)
return Error("illegal character in string constant");
if (*cursor_ == '\\') {
attr_is_trivial_ascii_string_ = false; // has escape sequence
cursor_++;
if (unicode_high_surrogate != -1 && *cursor_ != 'u') {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
switch (*cursor_) {
case 'n':
attribute_ += '\n';
cursor_++;
break;
case 't':
attribute_ += '\t';
cursor_++;
break;
case 'r':
attribute_ += '\r';
cursor_++;
break;
case 'b':
attribute_ += '\b';
cursor_++;
break;
case 'f':
attribute_ += '\f';
cursor_++;
break;
case '\"':
attribute_ += '\"';
cursor_++;
break;
case '\'':
attribute_ += '\'';
cursor_++;
break;
case '\\':
attribute_ += '\\';
cursor_++;
break;
case '/':
attribute_ += '/';
cursor_++;
break;
case 'x': { // Not in the JSON standard
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(2, &val));
attribute_ += static_cast<char>(val);
break;
}
case 'u': {
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(4, &val));
if (val >= 0xD800 && val <= 0xDBFF) {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (multiple high surrogates)");
} else {
unicode_high_surrogate = static_cast<int>(val);
}
} else if (val >= 0xDC00 && val <= 0xDFFF) {
if (unicode_high_surrogate == -1) {
return Error(
"illegal Unicode sequence (unpaired low surrogate)");
} else {
int code_point = 0x10000 +
((unicode_high_surrogate & 0x03FF) << 10) +
(val & 0x03FF);
ToUTF8(code_point, &attribute_);
unicode_high_surrogate = -1;
}
} else {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
ToUTF8(static_cast<int>(val), &attribute_);
}
break;
}
default: return Error("unknown escape code in string constant");
}
} else { // printable chars + UTF-8 bytes
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
// reset if non-printable
attr_is_trivial_ascii_string_ &=
check_ascii_range(*cursor_, ' ', '~');
attribute_ += *cursor_++;
}
}
if (unicode_high_surrogate != -1) {
return Error("illegal Unicode sequence (unpaired high surrogate)");
}
cursor_++;
if (!attr_is_trivial_ascii_string_ && !opts.allow_non_utf8 &&
!ValidateUTF8(attribute_)) {
return Error("illegal UTF-8 sequence");
}
token_ = kTokenStringConstant;
return NoError();
}
case '/':
if (*cursor_ == '/') {
const char *start = ++cursor_;
while (*cursor_ && *cursor_ != '\n' && *cursor_ != '\r') cursor_++;
if (*start == '/') { // documentation comment
if (!seen_newline)
return Error(
"a documentation comment should be on a line on its own");
doc_comment_.push_back(std::string(start + 1, cursor_));
}
break;
} else if (*cursor_ == '*') {
cursor_++;
// TODO: make nested.
while (*cursor_ != '*' || cursor_[1] != '/') {
if (*cursor_ == '\n') MarkNewLine();
if (!*cursor_) return Error("end of file in comment");
cursor_++;
}
cursor_ += 2;
break;
}
FLATBUFFERS_FALLTHROUGH(); // else fall thru
default:
const auto has_sign = (c == '+') || (c == '-');
// '-'/'+' and following identifier - can be a predefined constant like:
// NAN, INF, PI, etc or it can be a function name like cos/sin/deg.
if (IsIdentifierStart(c) || (has_sign && IsIdentifierStart(*cursor_))) {
// Collect all chars of an identifier:
const char *start = cursor_ - 1;
while (IsIdentifierStart(*cursor_) || is_digit(*cursor_)) cursor_++;
attribute_.append(start, cursor_);
token_ = has_sign ? kTokenStringConstant : kTokenIdentifier;
return NoError();
}
auto dot_lvl =
(c == '.') ? 0 : 1; // dot_lvl==0 <=> exactly one '.' seen
if (!dot_lvl && !is_digit(*cursor_)) return NoError(); // enum?
// Parser accepts hexadecimal-floating-literal (see C++ 5.13.4).
if (is_digit(c) || has_sign || !dot_lvl) {
const auto start = cursor_ - 1;
auto start_digits = !is_digit(c) ? cursor_ : cursor_ - 1;
if (!is_digit(c) && is_digit(*cursor_)) {
start_digits = cursor_; // see digit in cursor_ position
c = *cursor_++;
}
// hex-float can't begind with '.'
auto use_hex = dot_lvl && (c == '0') && is_alpha_char(*cursor_, 'X');
if (use_hex) start_digits = ++cursor_; // '0x' is the prefix, skip it
// Read an integer number or mantisa of float-point number.
do {
if (use_hex) {
while (is_xdigit(*cursor_)) cursor_++;
} else {
while (is_digit(*cursor_)) cursor_++;
}
} while ((*cursor_ == '.') && (++cursor_) && (--dot_lvl >= 0));
// Exponent of float-point number.
if ((dot_lvl >= 0) && (cursor_ > start_digits)) {
// The exponent suffix of hexadecimal float number is mandatory.
if (use_hex && !dot_lvl) start_digits = cursor_;
if ((use_hex && is_alpha_char(*cursor_, 'P')) ||
is_alpha_char(*cursor_, 'E')) {
dot_lvl = 0; // Emulate dot to signal about float-point number.
cursor_++;
if (*cursor_ == '+' || *cursor_ == '-') cursor_++;
start_digits = cursor_; // the exponent-part has to have digits
// Exponent is decimal integer number
while (is_digit(*cursor_)) cursor_++;
if (*cursor_ == '.') {
cursor_++; // If see a dot treat it as part of invalid number.
dot_lvl = -1; // Fall thru to Error().
}
}
}
// Finalize.
if ((dot_lvl >= 0) && (cursor_ > start_digits)) {
attribute_.append(start, cursor_);
token_ = dot_lvl ? kTokenIntegerConstant : kTokenFloatConstant;
return NoError();
} else {
return Error("invalid number: " + std::string(start, cursor_));
}
}
std::string ch;
ch = c;
if (false == check_ascii_range(c, ' ', '~'))
ch = "code: " + NumToString(c);
return Error("illegal character: " + ch);
}
}
}
// Check if a given token is next.
bool Parser::Is(int t) const { return t == token_; }
bool Parser::IsIdent(const char *id) const {
return token_ == kTokenIdentifier && attribute_ == id;
}
// Expect a given token to be next, consume it, or error if not present.
CheckedError Parser::Expect(int t) {
if (t != token_) {
return Error("expecting: " + TokenToString(t) +
" instead got: " + TokenToStringId(token_));
}
NEXT();
return NoError();
}
CheckedError Parser::ParseNamespacing(std::string *id, std::string *last) {
while (Is('.')) {
NEXT();
*id += ".";
*id += attribute_;
if (last) *last = attribute_;
EXPECT(kTokenIdentifier);
}
return NoError();
}
EnumDef *Parser::LookupEnum(const std::string &id) {
// Search thru parent namespaces.
return LookupTableByName(enums_, id, *current_namespace_, 0);
}
StructDef *Parser::LookupStruct(const std::string &id) const {
auto sd = structs_.Lookup(id);
if (sd) sd->refcount++;
return sd;
}
StructDef *Parser::LookupStructThruParentNamespaces(
const std::string &id) const {
auto sd = LookupTableByName(structs_, id, *current_namespace_, 1);
if (sd) sd->refcount++;
return sd;
}
CheckedError Parser::ParseTypeIdent(Type &type) {
std::string id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
auto enum_def = LookupEnum(id);
if (enum_def) {
type = enum_def->underlying_type;
if (enum_def->is_union) type.base_type = BASE_TYPE_UNION;
} else {
type.base_type = BASE_TYPE_STRUCT;
type.struct_def = LookupCreateStruct(id);
}
return NoError();
}
// Parse any IDL type.
CheckedError Parser::ParseType(Type &type) {
if (token_ == kTokenIdentifier) {
if (IsIdent("bool")) {
type.base_type = BASE_TYPE_BOOL;
NEXT();
} else if (IsIdent("byte") || IsIdent("int8")) {
type.base_type = BASE_TYPE_CHAR;
NEXT();
} else if (IsIdent("ubyte") || IsIdent("uint8")) {
type.base_type = BASE_TYPE_UCHAR;
NEXT();
} else if (IsIdent("short") || IsIdent("int16")) {
type.base_type = BASE_TYPE_SHORT;
NEXT();
} else if (IsIdent("ushort") || IsIdent("uint16")) {
type.base_type = BASE_TYPE_USHORT;
NEXT();
} else if (IsIdent("int") || IsIdent("int32")) {
type.base_type = BASE_TYPE_INT;
NEXT();
} else if (IsIdent("uint") || IsIdent("uint32")) {
type.base_type = BASE_TYPE_UINT;
NEXT();
} else if (IsIdent("long") || IsIdent("int64")) {
type.base_type = BASE_TYPE_LONG;
NEXT();
} else if (IsIdent("ulong") || IsIdent("uint64")) {
type.base_type = BASE_TYPE_ULONG;
NEXT();
} else if (IsIdent("float") || IsIdent("float32")) {
type.base_type = BASE_TYPE_FLOAT;
NEXT();
} else if (IsIdent("double") || IsIdent("float64")) {
type.base_type = BASE_TYPE_DOUBLE;
NEXT();
} else if (IsIdent("string")) {
type.base_type = BASE_TYPE_STRING;
NEXT();
} else {
ECHECK(ParseTypeIdent(type));
}
} else if (token_ == '[') {
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
NEXT();
Type subtype;
ECHECK(ParseType(subtype));
if (IsSeries(subtype)) {
// We could support this, but it will complicate things, and it's
// easier to work around with a struct around the inner vector.
return Error("nested vector types not supported (wrap in table first)");
}
if (token_ == ':') {
NEXT();
if (token_ != kTokenIntegerConstant) {
return Error("length of fixed-length array must be an integer value");
}
uint16_t fixed_length = 0;
bool check = StringToNumber(attribute_.c_str(), &fixed_length);
if (!check || fixed_length < 1) {
return Error(
"length of fixed-length array must be positive and fit to "
"uint16_t type");
}
type = Type(BASE_TYPE_ARRAY, subtype.struct_def, subtype.enum_def,
fixed_length);
NEXT();
} else {
type = Type(BASE_TYPE_VECTOR, subtype.struct_def, subtype.enum_def);
}
type.element = subtype.base_type;
EXPECT(']');
} else {
return Error("illegal type syntax");
}
return NoError();
}
CheckedError Parser::AddField(StructDef &struct_def, const std::string &name,
const Type &type, FieldDef **dest) {
auto &field = *new FieldDef();
field.value.offset =
FieldIndexToOffset(static_cast<voffset_t>(struct_def.fields.vec.size()));
field.name = name;
field.file = struct_def.file;
field.value.type = type;
if (struct_def.fixed) { // statically compute the field offset
auto size = InlineSize(type);
auto alignment = InlineAlignment(type);
// structs_ need to have a predictable format, so we need to align to
// the largest scalar
struct_def.minalign = std::max(struct_def.minalign, alignment);
struct_def.PadLastField(alignment);
field.value.offset = static_cast<voffset_t>(struct_def.bytesize);
struct_def.bytesize += size;
}
if (struct_def.fields.Add(name, &field))
return Error("field already exists: " + name);
*dest = &field;
return NoError();
}
CheckedError Parser::ParseField(StructDef &struct_def) {
std::string name = attribute_;
if (LookupCreateStruct(name, false, false))
return Error("field name can not be the same as table/struct name");
if (!IsLowerSnakeCase(name)) {
Warning("field names should be lowercase snake_case, got: " + name);
}
std::vector<std::string> dc = doc_comment_;
EXPECT(kTokenIdentifier);
EXPECT(':');
Type type;
ECHECK(ParseType(type));
if (struct_def.fixed) {
auto valid = IsScalar(type.base_type) || IsStruct(type);
if (!valid && IsArray(type)) {
const auto &elem_type = type.VectorType();
valid |= IsScalar(elem_type.base_type) || IsStruct(elem_type);
}
if (!valid)
return Error("structs may contain only scalar or struct fields");
}
if (!struct_def.fixed && IsArray(type))
return Error("fixed-length array in table must be wrapped in struct");
if (IsArray(type) && !SupportsAdvancedArrayFeatures()) {
return Error(
"Arrays are not yet supported in all "
"the specified programming languages.");
}
FieldDef *typefield = nullptr;
if (type.base_type == BASE_TYPE_UNION) {
// For union fields, add a second auto-generated field to hold the type,
// with a special suffix.
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(),
type.enum_def->underlying_type, &typefield));
} else if (IsVector(type) && type.element == BASE_TYPE_UNION) {
// Only cpp, js and ts supports the union vector feature so far.
if (!SupportsAdvancedUnionFeatures()) {
return Error(
"Vectors of unions are not yet supported in at least one of "
"the specified programming languages.");
}
// For vector of union fields, add a second auto-generated vector field to
// hold the types, with a special suffix.
Type union_vector(BASE_TYPE_VECTOR, nullptr, type.enum_def);
union_vector.element = BASE_TYPE_UTYPE;
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), union_vector,
&typefield));
}
FieldDef *field;
ECHECK(AddField(struct_def, name, type, &field));
if (token_ == '=') {
NEXT();
ECHECK(ParseSingleValue(&field->name, field->value, true));
if (IsStruct(type) || (struct_def.fixed && field->value.constant != "0"))
return Error(
"default values are not supported for struct fields, table fields, "
"or in structs.");
if ((IsString(type) || IsVector(type)) && field->value.constant != "0" &&
field->value.constant != "null" && !SupportsDefaultVectorsAndStrings())
return Error(
"Default values for strings and vectors are not supported in one of "
"the specified programming languages");
if (IsVector(type) && field->value.constant != "0" &&
field->value.constant != "[]") {
return Error("The only supported default for vectors is `[]`.");
}
}
// Append .0 if the value has not it (skip hex and scientific floats).
// This suffix needed for generated C++ code.
if (IsFloat(type.base_type)) {
auto &text = field->value.constant;
FLATBUFFERS_ASSERT(false == text.empty());
auto s = text.c_str();
while (*s == ' ') s++;
if (*s == '-' || *s == '+') s++;
// 1) A float constants (nan, inf, pi, etc) is a kind of identifier.
// 2) A float number needn't ".0" at the end if it has exponent.
if ((false == IsIdentifierStart(*s)) &&
(std::string::npos == field->value.constant.find_first_of(".eEpP"))) {
field->value.constant += ".0";
}
}
field->doc_comment = dc;
ECHECK(ParseMetaData(&field->attributes));
field->deprecated = field->attributes.Lookup("deprecated") != nullptr;
auto hash_name = field->attributes.Lookup("hash");
if (hash_name) {
switch ((IsVector(type)) ? type.element : type.base_type) {
case BASE_TYPE_SHORT:
case BASE_TYPE_USHORT: {
if (FindHashFunction16(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 16 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT: {
if (FindHashFunction32(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 32 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (FindHashFunction64(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 64 bit types: " +
hash_name->constant);
break;
}
default:
return Error(
"only short, ushort, int, uint, long and ulong data types support "
"hashing.");
}
}
// For historical convenience reasons, string keys are assumed required.
// Scalars are kDefault unless otherwise specified.
// Nonscalars are kOptional unless required;
field->key = field->attributes.Lookup("key") != nullptr;
const bool required = field->attributes.Lookup("required") != nullptr ||
(IsString(type) && field->key);
const bool default_str_or_vec =
((IsString(type) || IsVector(type)) && field->value.constant != "0");
const bool optional = IsScalar(type.base_type)
? (field->value.constant == "null")
: !(required || default_str_or_vec);
if (required && optional) {
return Error("Fields cannot be both optional and required.");
}
field->presence = FieldDef::MakeFieldPresence(optional, required);
if (required && (struct_def.fixed || IsScalar(type.base_type))) {
return Error("only non-scalar fields in tables may be 'required'");
}
if (field->key) {
if (struct_def.has_key) return Error("only one field may be set as 'key'");
struct_def.has_key = true;
if (!IsScalar(type.base_type) && !IsString(type)) {
return Error("'key' field must be string or scalar type");
}
}
if (field->IsScalarOptional()) {
if (type.enum_def && type.enum_def->Lookup("null")) {
FLATBUFFERS_ASSERT(IsInteger(type.base_type));
return Error(
"the default 'null' is reserved for declaring optional scalar "
"fields, it conflicts with declaration of enum '" +
type.enum_def->name + "'.");
}
if (field->attributes.Lookup("key")) {
return Error(
"only a non-optional scalar field can be used as a 'key' field");
}
if (!SupportsOptionalScalars()) {
return Error(
"Optional scalars are not yet supported in at least one the of "
"the specified programming languages.");
}
}
if (type.enum_def) {
// The type.base_type can only be scalar, union, array or vector.
// Table, struct or string can't have enum_def.
// Default value of union and vector in NONE, NULL translated to "0".
FLATBUFFERS_ASSERT(IsInteger(type.base_type) ||
(type.base_type == BASE_TYPE_UNION) || IsVector(type) ||
IsArray(type));
if (IsVector(type)) {
// Vector can't use initialization list.
FLATBUFFERS_ASSERT(field->value.constant == "0");
} else {
// All unions should have the NONE ("0") enum value.
auto in_enum = field->IsOptional() ||
type.enum_def->attributes.Lookup("bit_flags") ||
type.enum_def->FindByValue(field->value.constant);
if (false == in_enum)
return Error("default value of " + field->value.constant +
" for field " + name + " is not part of enum " +
type.enum_def->name);
}
}
if (field->deprecated && struct_def.fixed)
return Error("can't deprecate fields in a struct");
auto cpp_type = field->attributes.Lookup("cpp_type");
if (cpp_type) {
if (!hash_name)
return Error("cpp_type can only be used with a hashed field");
/// forcing cpp_ptr_type to 'naked' if unset
auto cpp_ptr_type = field->attributes.Lookup("cpp_ptr_type");
if (!cpp_ptr_type) {
auto val = new Value();
val->type = cpp_type->type;
val->constant = "naked";
field->attributes.Add("cpp_ptr_type", val);
}
}
field->shared = field->attributes.Lookup("shared") != nullptr;
if (field->shared && field->value.type.base_type != BASE_TYPE_STRING)
return Error("shared can only be defined on strings");
auto field_native_custom_alloc =
field->attributes.Lookup("native_custom_alloc");
if (field_native_custom_alloc)
return Error(
"native_custom_alloc can only be used with a table or struct "
"definition");
field->native_inline = field->attributes.Lookup("native_inline") != nullptr;
if (field->native_inline && !IsStruct(field->value.type))
return Error("native_inline can only be defined on structs");
auto nested = field->attributes.Lookup("nested_flatbuffer");
if (nested) {
if (nested->type.base_type != BASE_TYPE_STRING)
return Error(
"nested_flatbuffer attribute must be a string (the root type)");
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
return Error(
"nested_flatbuffer attribute may only apply to a vector of ubyte");
// This will cause an error if the root type of the nested flatbuffer
// wasn't defined elsewhere.
field->nested_flatbuffer = LookupCreateStruct(nested->constant);
}
if (field->attributes.Lookup("flexbuffer")) {
field->flexbuffer = true;
uses_flexbuffers_ = true;
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
return Error("flexbuffer attribute may only apply to a vector of ubyte");
}
if (typefield) {
if (!IsScalar(typefield->value.type.base_type)) {
// this is a union vector field
typefield->presence = field->presence;
}
// If this field is a union, and it has a manually assigned id,
// the automatically added type field should have an id as well (of N - 1).
auto attr = field->attributes.Lookup("id");
if (attr) {
const auto &id_str = attr->constant;
voffset_t id = 0;
const auto done = !atot(id_str.c_str(), *this, &id).Check();
if (done && id > 0) {
auto val = new Value();
val->type = attr->type;
val->constant = NumToString(id - 1);
typefield->attributes.Add("id", val);
} else {
return Error(
"a union type effectively adds two fields with non-negative ids, "
"its id must be that of the second field (the first field is "
"the type field and not explicitly declared in the schema);\n"
"field: " +
field->name + ", id: " + id_str);
}
}
// if this field is a union that is deprecated,
// the automatically added type field should be deprecated as well
if (field->deprecated) { typefield->deprecated = true; }
}
EXPECT(';');
return NoError();
}
CheckedError Parser::ParseString(Value &val, bool use_string_pooling) {
auto s = attribute_;
EXPECT(kTokenStringConstant);
if (use_string_pooling) {
val.constant = NumToString(builder_.CreateSharedString(s).o);
} else {
val.constant = NumToString(builder_.CreateString(s).o);
}
return NoError();
}
CheckedError Parser::ParseComma() {
if (!opts.protobuf_ascii_alike) EXPECT(',');
return NoError();
}
CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
size_t parent_fieldn,
const StructDef *parent_struct_def,
uoffset_t count, bool inside_vector) {
switch (val.type.base_type) {
case BASE_TYPE_UNION: {
FLATBUFFERS_ASSERT(field);
std::string constant;
Vector<uint8_t> *vector_of_union_types = nullptr;
// Find corresponding type field we may have already parsed.
for (auto elem = field_stack_.rbegin() + count;
elem != field_stack_.rbegin() + parent_fieldn + count; ++elem) {
auto &type = elem->second->value.type;
if (type.enum_def == val.type.enum_def) {
if (inside_vector) {
if (IsVector(type) && type.element == BASE_TYPE_UTYPE) {
// Vector of union type field.
uoffset_t offset;
ECHECK(atot(elem->first.constant.c_str(), *this, &offset));
vector_of_union_types = reinterpret_cast<Vector<uint8_t> *>(
builder_.GetCurrentBufferPointer() + builder_.GetSize() -
offset);
break;
}
} else {
if (type.base_type == BASE_TYPE_UTYPE) {
// Union type field.
constant = elem->first.constant;
break;
}
}
}
}
if (constant.empty() && !inside_vector) {
// We haven't seen the type field yet. Sadly a lot of JSON writers
// output these in alphabetical order, meaning it comes after this
// value. So we scan past the value to find it, then come back here.
// We currently don't do this for vectors of unions because the
// scanning/serialization logic would get very complicated.
auto type_name = field->name + UnionTypeFieldSuffix();
FLATBUFFERS_ASSERT(parent_struct_def);
auto type_field = parent_struct_def->fields.Lookup(type_name);
FLATBUFFERS_ASSERT(type_field); // Guaranteed by ParseField().
// Remember where we are in the source file, so we can come back here.
auto backup = *static_cast<ParserState *>(this);
ECHECK(SkipAnyJsonValue()); // The table.
ECHECK(ParseComma());
auto next_name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(kTokenIdentifier);
}
if (next_name == type_name) {
EXPECT(':');
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
Value type_val = type_field->value;
ECHECK(ParseAnyValue(type_val, type_field, 0, nullptr, 0));
constant = type_val.constant;
// Got the information we needed, now rewind:
*static_cast<ParserState *>(this) = backup;
}
}
if (constant.empty() && !vector_of_union_types) {
return Error("missing type field for this union value: " + field->name);
}
uint8_t enum_idx;
if (vector_of_union_types) {
enum_idx = vector_of_union_types->Get(count);
} else {
ECHECK(atot(constant.c_str(), *this, &enum_idx));
}
auto enum_val = val.type.enum_def->ReverseLookup(enum_idx, true);
if (!enum_val) return Error("illegal type id for: " + field->name);
if (enum_val->union_type.base_type == BASE_TYPE_STRUCT) {
ECHECK(ParseTable(*enum_val->union_type.struct_def, &val.constant,
nullptr));
if (enum_val->union_type.struct_def->fixed) {
// All BASE_TYPE_UNION values are offsets, so turn this into one.
SerializeStruct(*enum_val->union_type.struct_def, val);
builder_.ClearOffsets();
val.constant = NumToString(builder_.GetSize());
}
} else if (IsString(enum_val->union_type)) {
ECHECK(ParseString(val, field->shared));
} else {
FLATBUFFERS_ASSERT(false);
}
break;
}
case BASE_TYPE_STRUCT:
ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr));
break;
case BASE_TYPE_STRING: {
ECHECK(ParseString(val, field->shared));
break;
}
case BASE_TYPE_VECTOR: {
uoffset_t off;
ECHECK(ParseVector(val.type.VectorType(), &off, field, parent_fieldn));
val.constant = NumToString(off);
break;
}
case BASE_TYPE_ARRAY: {
ECHECK(ParseArray(val));
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT:
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (field && field->attributes.Lookup("hash") &&
(token_ == kTokenIdentifier || token_ == kTokenStringConstant)) {
ECHECK(ParseHash(val, field));
} else {
ECHECK(ParseSingleValue(field ? &field->name : nullptr, val, false));
}
break;
}
default:
ECHECK(ParseSingleValue(field ? &field->name : nullptr, val, false));
break;
}
return NoError();
}
void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) {
SerializeStruct(builder_, struct_def, val);
}
void Parser::SerializeStruct(FlatBufferBuilder &builder,
const StructDef &struct_def, const Value &val) {
FLATBUFFERS_ASSERT(val.constant.length() == struct_def.bytesize);
builder.Align(struct_def.minalign);
builder.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()),
struct_def.bytesize);
builder.AddStructOffset(val.offset, builder.GetSize());
}
template<typename F>
CheckedError Parser::ParseTableDelimiters(size_t &fieldn,
const StructDef *struct_def, F body) {
// We allow tables both as JSON object{ .. } with field names
// or vector[..] with all fields in order
char terminator = '}';
bool is_nested_vector = struct_def && Is('[');
if (is_nested_vector) {
NEXT();
terminator = ']';
} else {
EXPECT('{');
}
for (;;) {
if ((!opts.strict_json || !fieldn) && Is(terminator)) break;
std::string name;
if (is_nested_vector) {
if (fieldn >= struct_def->fields.vec.size()) {
return Error("too many unnamed fields in nested array");
}
name = struct_def->fields.vec[fieldn]->name;
} else {
name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(opts.strict_json ? kTokenStringConstant : kTokenIdentifier);
}
if (!opts.protobuf_ascii_alike || !(Is('{') || Is('['))) EXPECT(':');
}
ECHECK(body(name, fieldn, struct_def));
if (Is(terminator)) break;
ECHECK(ParseComma());
}
NEXT();
if (is_nested_vector && fieldn != struct_def->fields.vec.size()) {
return Error("wrong number of unnamed fields in table vector");
}
return NoError();
}
CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
uoffset_t *ovalue) {
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
size_t fieldn_outer = 0;
auto err = ParseTableDelimiters(
fieldn_outer, &struct_def,
[&](const std::string &name, size_t &fieldn,
const StructDef *struct_def_inner) -> CheckedError {
if (name == "$schema") {
ECHECK(Expect(kTokenStringConstant));
return NoError();
}
auto field = struct_def_inner->fields.Lookup(name);
if (!field) {
if (!opts.skip_unexpected_fields_in_json) {
return Error("unknown field: " + name);
} else {
ECHECK(SkipAnyJsonValue());
}
} else {
if (IsIdent("null") && !IsScalar(field->value.type.base_type)) {
ECHECK(Next()); // Ignore this field.
} else {
Value val = field->value;
if (field->flexbuffer) {
flexbuffers::Builder builder(1024,
flexbuffers::BUILDER_FLAG_SHARE_ALL);
ECHECK(ParseFlexBufferValue(&builder));
builder.Finish();
// Force alignment for nested flexbuffer
builder_.ForceVectorAlignment(builder.GetSize(), sizeof(uint8_t),
sizeof(largest_scalar_t));
auto off = builder_.CreateVector(builder.GetBuffer());
val.constant = NumToString(off.o);
} else if (field->nested_flatbuffer) {
ECHECK(
ParseNestedFlatbuffer(val, field, fieldn, struct_def_inner));
} else {
ECHECK(ParseAnyValue(val, field, fieldn, struct_def_inner, 0));
}
// Hardcoded insertion-sort with error-check.
// If fields are specified in order, then this loop exits
// immediately.
auto elem = field_stack_.rbegin();
for (; elem != field_stack_.rbegin() + fieldn; ++elem) {
auto existing_field = elem->second;
if (existing_field == field)
return Error("field set more than once: " + field->name);
if (existing_field->value.offset < field->value.offset) break;
}
// Note: elem points to before the insertion point, thus .base()
// points to the correct spot.
field_stack_.insert(elem.base(), std::make_pair(val, field));
fieldn++;
}
}
return NoError();
});
ECHECK(err);
// Check if all required fields are parsed.
for (auto field_it = struct_def.fields.vec.begin();
field_it != struct_def.fields.vec.end(); ++field_it) {
auto required_field = *field_it;
if (!required_field->IsRequired()) { continue; }
bool found = false;
for (auto pf_it = field_stack_.end() - fieldn_outer;
pf_it != field_stack_.end(); ++pf_it) {
auto parsed_field = pf_it->second;
if (parsed_field == required_field) {
found = true;
break;
}
}
if (!found) {
return Error("required field is missing: " + required_field->name +
" in " + struct_def.name);
}
}
if (struct_def.fixed && fieldn_outer != struct_def.fields.vec.size())
return Error("struct: wrong number of initializers: " + struct_def.name);
auto start = struct_def.fixed ? builder_.StartStruct(struct_def.minalign)
: builder_.StartTable();
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size;
size /= 2) {
// Go through elements in reverse, since we're building the data backwards.
for (auto it = field_stack_.rbegin();
it != field_stack_.rbegin() + fieldn_outer; ++it) {
auto &field_value = it->first;
auto field = it->second;
if (!struct_def.sortbysize ||
size == SizeOf(field_value.type.base_type)) {
switch (field_value.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (struct_def.fixed) { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.PushElement(val); \
} else { \
CTYPE val, valdef; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
ECHECK(atot(field->value.constant.c_str(), *this, &valdef)); \
builder_.AddElement(field_value.offset, val, valdef); \
} \
break;
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (IsStruct(field->value.type)) { \
SerializeStruct(*field->value.type.struct_def, field_value); \
} else { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.AddOffset(field_value.offset, val); \
} \
break;
FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
case BASE_TYPE_ARRAY:
builder_.Pad(field->padding);
builder_.PushBytes(
reinterpret_cast<const uint8_t*>(field_value.constant.c_str()),
InlineSize(field_value.type));
break;
// clang-format on
}
}
}
}
for (size_t i = 0; i < fieldn_outer; i++) field_stack_.pop_back();
if (struct_def.fixed) {
builder_.ClearOffsets();
builder_.EndStruct();
FLATBUFFERS_ASSERT(value);
// Temporarily store this struct in the value string, since it is to
// be serialized in-place elsewhere.
value->assign(
reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()),
struct_def.bytesize);
builder_.PopBytes(struct_def.bytesize);
FLATBUFFERS_ASSERT(!ovalue);
} else {
auto val = builder_.EndTable(start);
if (ovalue) *ovalue = val;
if (value) *value = NumToString(val);
}
return NoError();
}
template<typename F>
CheckedError Parser::ParseVectorDelimiters(uoffset_t &count, F body) {
EXPECT('[');
for (;;) {
if ((!opts.strict_json || !count) && Is(']')) break;
ECHECK(body(count));
count++;
if (Is(']')) break;
ECHECK(ParseComma());
}
NEXT();
return NoError();
}
static bool CompareSerializedScalars(const uint8_t *a, const uint8_t *b,
const FieldDef &key) {
switch (key.value.type.base_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_##ENUM: { \
CTYPE def = static_cast<CTYPE>(0); \
if (!a || !b) { StringToNumber(key.value.constant.c_str(), &def); } \
const auto av = a ? ReadScalar<CTYPE>(a) : def; \
const auto bv = b ? ReadScalar<CTYPE>(b) : def; \
return av < bv; \
}
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: {
FLATBUFFERS_ASSERT(false && "scalar type expected");
return false;
}
}
}
static bool CompareTablesByScalarKey(const Offset<Table> *_a,
const Offset<Table> *_b,
const FieldDef &key) {
const voffset_t offset = key.value.offset;
// Indirect offset pointer to table pointer.
auto a = reinterpret_cast<const uint8_t *>(_a) + ReadScalar<uoffset_t>(_a);
auto b = reinterpret_cast<const uint8_t *>(_b) + ReadScalar<uoffset_t>(_b);
// Fetch field address from table.
a = reinterpret_cast<const Table *>(a)->GetAddressOf(offset);
b = reinterpret_cast<const Table *>(b)->GetAddressOf(offset);
return CompareSerializedScalars(a, b, key);
}
static bool CompareTablesByStringKey(const Offset<Table> *_a,
const Offset<Table> *_b,
const FieldDef &key) {
const voffset_t offset = key.value.offset;
// Indirect offset pointer to table pointer.
auto a = reinterpret_cast<const uint8_t *>(_a) + ReadScalar<uoffset_t>(_a);
auto b = reinterpret_cast<const uint8_t *>(_b) + ReadScalar<uoffset_t>(_b);
// Fetch field address from table.
a = reinterpret_cast<const Table *>(a)->GetAddressOf(offset);
b = reinterpret_cast<const Table *>(b)->GetAddressOf(offset);
if (a && b) {
// Indirect offset pointer to string pointer.
a += ReadScalar<uoffset_t>(a);
b += ReadScalar<uoffset_t>(b);
return *reinterpret_cast<const String *>(a) <
*reinterpret_cast<const String *>(b);
} else {
return a ? true : false;
}
}
static void SwapSerializedTables(Offset<Table> *a, Offset<Table> *b) {
// These are serialized offsets, so are relative where they are
// stored in memory, so compute the distance between these pointers:
ptrdiff_t diff = (b - a) * sizeof(Offset<Table>);
FLATBUFFERS_ASSERT(diff >= 0); // Guaranteed by SimpleQsort.
auto udiff = static_cast<uoffset_t>(diff);
a->o = EndianScalar(ReadScalar<uoffset_t>(a) - udiff);
b->o = EndianScalar(ReadScalar<uoffset_t>(b) + udiff);
std::swap(*a, *b);
}
// See below for why we need our own sort :(
template<typename T, typename F, typename S>
void SimpleQsort(T *begin, T *end, size_t width, F comparator, S swapper) {
if (end - begin <= static_cast<ptrdiff_t>(width)) return;
auto l = begin + width;
auto r = end;
while (l < r) {
if (comparator(begin, l)) {
r -= width;
swapper(l, r);
} else {
l += width;
}
}
l -= width;
swapper(begin, l);
SimpleQsort(begin, l, width, comparator, swapper);
SimpleQsort(r, end, width, comparator, swapper);
}
CheckedError Parser::ParseAlignAttribute(const std::string &align_constant,
size_t min_align, size_t *align) {
// Use uint8_t to avoid problems with size_t==`unsigned long` on LP64.
uint8_t align_value;
if (StringToNumber(align_constant.c_str(), &align_value) &&
VerifyAlignmentRequirements(static_cast<size_t>(align_value),
min_align)) {
*align = align_value;
return NoError();
}
return Error("unexpected force_align value '" + align_constant +
"', alignment must be a power of two integer ranging from the "
"type\'s natural alignment " +
NumToString(min_align) + " to " +
NumToString(FLATBUFFERS_MAX_ALIGNMENT));
}
CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue,
FieldDef *field, size_t fieldn) {
uoffset_t count = 0;
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
Value val;
val.type = type;
ECHECK(ParseAnyValue(val, field, fieldn, nullptr, count, true));
field_stack_.push_back(std::make_pair(val, nullptr));
return NoError();
});
ECHECK(err);
const size_t len = count * InlineSize(type) / InlineAlignment(type);
const size_t elemsize = InlineAlignment(type);
const auto force_align = field->attributes.Lookup("force_align");
if (force_align) {
size_t align;
ECHECK(ParseAlignAttribute(force_align->constant, 1, &align));
if (align > 1) { builder_.ForceVectorAlignment(len, elemsize, align); }
}
builder_.StartVector(len, elemsize);
for (uoffset_t i = 0; i < count; i++) {
// start at the back, since we're building the data backwards.
auto &val = field_stack_.back().first;
switch (val.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE,...) \
case BASE_TYPE_ ## ENUM: \
if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \
else { \
CTYPE elem; \
ECHECK(atot(val.constant.c_str(), *this, &elem)); \
builder_.PushElement(elem); \
} \
break;
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
}
field_stack_.pop_back();
}
builder_.ClearOffsets();
*ovalue = builder_.EndVector(count);
if (type.base_type == BASE_TYPE_STRUCT && type.struct_def->has_key) {
// We should sort this vector. Find the key first.
const FieldDef *key = nullptr;
for (auto it = type.struct_def->fields.vec.begin();
it != type.struct_def->fields.vec.end(); ++it) {
if ((*it)->key) {
key = (*it);
break;
}
}
FLATBUFFERS_ASSERT(key);
// Now sort it.
// We can't use std::sort because for structs the size is not known at
// compile time, and for tables our iterators dereference offsets, so can't
// be used to swap elements.
// And we can't use C qsort either, since that would force use to use
// globals, making parsing thread-unsafe.
// So for now, we use SimpleQsort above.
// TODO: replace with something better, preferably not recursive.
if (type.struct_def->fixed) {
const voffset_t offset = key->value.offset;
const size_t struct_size = type.struct_def->bytesize;
auto v =
reinterpret_cast<VectorOfAny *>(builder_.GetCurrentBufferPointer());
SimpleQsort<uint8_t>(
v->Data(), v->Data() + v->size() * type.struct_def->bytesize,
type.struct_def->bytesize,
[offset, key](const uint8_t *a, const uint8_t *b) -> bool {
return CompareSerializedScalars(a + offset, b + offset, *key);
},
[struct_size](uint8_t *a, uint8_t *b) {
// FIXME: faster?
for (size_t i = 0; i < struct_size; i++) { std::swap(a[i], b[i]); }
});
} else {
auto v = reinterpret_cast<Vector<Offset<Table>> *>(
builder_.GetCurrentBufferPointer());
// Here also can't use std::sort. We do have an iterator type for it,
// but it is non-standard as it will dereference the offsets, and thus
// can't be used to swap elements.
if (key->value.type.base_type == BASE_TYPE_STRING) {
SimpleQsort<Offset<Table>>(
v->data(), v->data() + v->size(), 1,
[key](const Offset<Table> *_a, const Offset<Table> *_b) -> bool {
return CompareTablesByStringKey(_a, _b, *key);
},
SwapSerializedTables);
} else {
SimpleQsort<Offset<Table>>(
v->data(), v->data() + v->size(), 1,
[key](const Offset<Table> *_a, const Offset<Table> *_b) -> bool {
return CompareTablesByScalarKey(_a, _b, *key);
},
SwapSerializedTables);
}
}
}
return NoError();
}
CheckedError Parser::ParseArray(Value &array) {
std::vector<Value> stack;
FlatBufferBuilder builder;
const auto &type = array.type.VectorType();
auto length = array.type.fixed_length;
uoffset_t count = 0;
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
vector_emplace_back(&stack, Value());
auto &val = stack.back();
val.type = type;
if (IsStruct(type)) {
ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr));
} else {
ECHECK(ParseSingleValue(nullptr, val, false));
}
return NoError();
});
ECHECK(err);
if (length != count) return Error("Fixed-length array size is incorrect.");
for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
auto &val = *it;
// clang-format off
switch (val.type.base_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
if (IsStruct(val.type)) { \
SerializeStruct(builder, *val.type.struct_def, val); \
} else { \
CTYPE elem; \
ECHECK(atot(val.constant.c_str(), *this, &elem)); \
builder.PushElement(elem); \
} \
break;
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: FLATBUFFERS_ASSERT(0);
}
// clang-format on
}
array.constant.assign(
reinterpret_cast<const char *>(builder.GetCurrentBufferPointer()),
InlineSize(array.type));
return NoError();
}
CheckedError Parser::ParseNestedFlatbuffer(Value &val, FieldDef *field,
size_t fieldn,
const StructDef *parent_struct_def) {
if (token_ == '[') { // backwards compat for 'legacy' ubyte buffers
ECHECK(ParseAnyValue(val, field, fieldn, parent_struct_def, 0));
} else {
auto cursor_at_value_begin = cursor_;
ECHECK(SkipAnyJsonValue());
std::string substring(cursor_at_value_begin - 1, cursor_ - 1);
// Create and initialize new parser
Parser nested_parser;
FLATBUFFERS_ASSERT(field->nested_flatbuffer);
nested_parser.root_struct_def_ = field->nested_flatbuffer;
nested_parser.enums_ = enums_;
nested_parser.opts = opts;
nested_parser.uses_flexbuffers_ = uses_flexbuffers_;
nested_parser.parse_depth_counter_ = parse_depth_counter_;
// Parse JSON substring into new flatbuffer builder using nested_parser
bool ok = nested_parser.Parse(substring.c_str(), nullptr, nullptr);
// Clean nested_parser to avoid deleting the elements in
// the SymbolTables on destruction
nested_parser.enums_.dict.clear();
nested_parser.enums_.vec.clear();
if (!ok) { ECHECK(Error(nested_parser.error_)); }
// Force alignment for nested flatbuffer
builder_.ForceVectorAlignment(
nested_parser.builder_.GetSize(), sizeof(uint8_t),
nested_parser.builder_.GetBufferMinAlignment());
auto off = builder_.CreateVector(nested_parser.builder_.GetBufferPointer(),
nested_parser.builder_.GetSize());
val.constant = NumToString(off.o);
}
return NoError();
}
CheckedError Parser::ParseMetaData(SymbolTable<Value> *attributes) {
if (Is('(')) {
NEXT();
for (;;) {
auto name = attribute_;
if (false == (Is(kTokenIdentifier) || Is(kTokenStringConstant)))
return Error("attribute name must be either identifier or string: " +
name);
if (known_attributes_.find(name) == known_attributes_.end())
return Error("user define attributes must be declared before use: " +
name);
NEXT();
auto e = new Value();
if (attributes->Add(name, e)) Warning("attribute already found: " + name);
if (Is(':')) {
NEXT();
ECHECK(ParseSingleValue(&name, *e, true));
}
if (Is(')')) {
NEXT();
break;
}
EXPECT(',');
}
}
return NoError();
}
CheckedError Parser::ParseEnumFromString(const Type &type,
std::string *result) {
const auto base_type =
type.enum_def ? type.enum_def->underlying_type.base_type : type.base_type;
if (!IsInteger(base_type)) return Error("not a valid value for this field");
uint64_t u64 = 0;
for (size_t pos = 0; pos != std::string::npos;) {
const auto delim = attribute_.find_first_of(' ', pos);
const auto last = (std::string::npos == delim);
auto word = attribute_.substr(pos, !last ? delim - pos : std::string::npos);
pos = !last ? delim + 1 : std::string::npos;
const EnumVal *ev = nullptr;
if (type.enum_def) {
ev = type.enum_def->Lookup(word);
} else {
auto dot = word.find_first_of('.');
if (std::string::npos == dot)
return Error("enum values need to be qualified by an enum type");
auto enum_def_str = word.substr(0, dot);
const auto enum_def = LookupEnum(enum_def_str);
if (!enum_def) return Error("unknown enum: " + enum_def_str);
auto enum_val_str = word.substr(dot + 1);
ev = enum_def->Lookup(enum_val_str);
}
if (!ev) return Error("unknown enum value: " + word);
u64 |= ev->GetAsUInt64();
}
*result = IsUnsigned(base_type) ? NumToString(u64)
: NumToString(static_cast<int64_t>(u64));
return NoError();
}
CheckedError Parser::ParseHash(Value &e, FieldDef *field) {
FLATBUFFERS_ASSERT(field);
Value *hash_name = field->attributes.Lookup("hash");
switch (e.type.base_type) {
case BASE_TYPE_SHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
int16_t hashed_value = static_cast<int16_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_USHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
uint16_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_INT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_UINT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
uint32_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_LONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
int64_t hashed_value = static_cast<int64_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_ULONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
uint64_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
default: FLATBUFFERS_ASSERT(0);
}
NEXT();
return NoError();
}
CheckedError Parser::TokenError() {
return Error("cannot parse value starting with: " + TokenToStringId(token_));
}
// Re-pack helper (ParseSingleValue) to normalize defaults of scalars.
template<typename T> inline void SingleValueRepack(Value &e, T val) {
// Remove leading zeros.
if (IsInteger(e.type.base_type)) { e.constant = NumToString(val); }
}
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Normalize defaults NaN to unsigned quiet-NaN(0) if value was parsed from
// hex-float literal.
static inline void SingleValueRepack(Value &e, float val) {
if (val != val) e.constant = "nan";
}
static inline void SingleValueRepack(Value &e, double val) {
if (val != val) e.constant = "nan";
}
#endif
CheckedError Parser::ParseFunction(const std::string *name, Value &e) {
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
// Copy name, attribute will be changed on NEXT().
const auto functionname = attribute_;
if (!IsFloat(e.type.base_type)) {
return Error(functionname + ": type of argument mismatch, expecting: " +
kTypeNames[BASE_TYPE_DOUBLE] +
", found: " + kTypeNames[e.type.base_type] +
", name: " + (name ? *name : "") + ", value: " + e.constant);
}
NEXT();
EXPECT('(');
ECHECK(ParseSingleValue(name, e, false));
EXPECT(')');
// calculate with double precision
double x, y = 0.0;
ECHECK(atot(e.constant.c_str(), *this, &x));
// clang-format off
auto func_match = false;
#define FLATBUFFERS_FN_DOUBLE(name, op) \
if (!func_match && functionname == name) { y = op; func_match = true; }
FLATBUFFERS_FN_DOUBLE("deg", x / kPi * 180);
FLATBUFFERS_FN_DOUBLE("rad", x * kPi / 180);
FLATBUFFERS_FN_DOUBLE("sin", sin(x));
FLATBUFFERS_FN_DOUBLE("cos", cos(x));
FLATBUFFERS_FN_DOUBLE("tan", tan(x));
FLATBUFFERS_FN_DOUBLE("asin", asin(x));
FLATBUFFERS_FN_DOUBLE("acos", acos(x));
FLATBUFFERS_FN_DOUBLE("atan", atan(x));
// TODO(wvo): add more useful conversion functions here.
#undef FLATBUFFERS_FN_DOUBLE
// clang-format on
if (true != func_match) {
return Error(std::string("Unknown conversion function: ") + functionname +
", field name: " + (name ? *name : "") +
", value: " + e.constant);
}
e.constant = NumToString(y);
return NoError();
}
CheckedError Parser::TryTypedValue(const std::string *name, int dtoken,
bool check, Value &e, BaseType req,
bool *destmatch) {
bool match = dtoken == token_;
if (match) {
FLATBUFFERS_ASSERT(*destmatch == false);
*destmatch = true;
e.constant = attribute_;
// Check token match
if (!check) {
if (e.type.base_type == BASE_TYPE_NONE) {
e.type.base_type = req;
} else {
return Error(
std::string("type mismatch: expecting: ") +
kTypeNames[e.type.base_type] + ", found: " + kTypeNames[req] +
", name: " + (name ? *name : "") + ", value: " + e.constant);
}
}
// The exponent suffix of hexadecimal float-point number is mandatory.
// A hex-integer constant is forbidden as an initializer of float number.
if ((kTokenFloatConstant != dtoken) && IsFloat(e.type.base_type)) {
const auto &s = e.constant;
const auto k = s.find_first_of("0123456789.");
if ((std::string::npos != k) && (s.length() > (k + 1)) &&
(s[k] == '0' && is_alpha_char(s[k + 1], 'X')) &&
(std::string::npos == s.find_first_of("pP", k + 2))) {
return Error(
"invalid number, the exponent suffix of hexadecimal "
"floating-point literals is mandatory: \"" +
s + "\"");
}
}
NEXT();
}
return NoError();
}
CheckedError Parser::ParseSingleValue(const std::string *name, Value &e,
bool check_now) {
const auto in_type = e.type.base_type;
const auto is_tok_ident = (token_ == kTokenIdentifier);
const auto is_tok_string = (token_ == kTokenStringConstant);
// First see if this could be a conversion function:
if (is_tok_ident && *cursor_ == '(') { return ParseFunction(name, e); }
// clang-format off
auto match = false;
#define IF_ECHECK_(force, dtoken, check, req) \
if (!match && ((check) || IsConstTrue(force))) \
ECHECK(TryTypedValue(name, dtoken, check, e, req, &match))
#define TRY_ECHECK(dtoken, check, req) IF_ECHECK_(false, dtoken, check, req)
#define FORCE_ECHECK(dtoken, check, req) IF_ECHECK_(true, dtoken, check, req)
// clang-format on
if (is_tok_ident || is_tok_string) {
const auto kTokenStringOrIdent = token_;
// The string type is a most probable type, check it first.
TRY_ECHECK(kTokenStringConstant, in_type == BASE_TYPE_STRING,
BASE_TYPE_STRING);
// avoid escaped and non-ascii in the string
if (!match && is_tok_string && IsScalar(in_type) &&
!attr_is_trivial_ascii_string_) {
return Error(
std::string("type mismatch or invalid value, an initializer of "
"non-string field must be trivial ASCII string: type: ") +
kTypeNames[in_type] + ", name: " + (name ? *name : "") +
", value: " + attribute_);
}
// A boolean as true/false. Boolean as Integer check below.
if (!match && IsBool(in_type)) {
auto is_true = attribute_ == "true";
if (is_true || attribute_ == "false") {
attribute_ = is_true ? "1" : "0";
// accepts both kTokenStringConstant and kTokenIdentifier
TRY_ECHECK(kTokenStringOrIdent, IsBool(in_type), BASE_TYPE_BOOL);
}
}
// Check for optional scalars.
if (!match && IsScalar(in_type) && attribute_ == "null") {
e.constant = "null";
NEXT();
match = true;
}
// Check if this could be a string/identifier enum value.
// Enum can have only true integer base type.
if (!match && IsInteger(in_type) && !IsBool(in_type) &&
IsIdentifierStart(*attribute_.c_str())) {
ECHECK(ParseEnumFromString(e.type, &e.constant));
NEXT();
match = true;
}
// Parse a float/integer number from the string.
// A "scalar-in-string" value needs extra checks.
if (!match && is_tok_string && IsScalar(in_type)) {
// Strip trailing whitespaces from attribute_.
auto last_non_ws = attribute_.find_last_not_of(' ');
if (std::string::npos != last_non_ws) attribute_.resize(last_non_ws + 1);
if (IsFloat(e.type.base_type)) {
// The functions strtod() and strtof() accept both 'nan' and
// 'nan(number)' literals. While 'nan(number)' is rejected by the parser
// as an unsupported function if is_tok_ident is true.
if (attribute_.find_last_of(')') != std::string::npos) {
return Error("invalid number: " + attribute_);
}
}
}
// Float numbers or nan, inf, pi, etc.
TRY_ECHECK(kTokenStringOrIdent, IsFloat(in_type), BASE_TYPE_FLOAT);
// An integer constant in string.
TRY_ECHECK(kTokenStringOrIdent, IsInteger(in_type), BASE_TYPE_INT);
// Unknown tokens will be interpreted as string type.
// An attribute value may be a scalar or string constant.
FORCE_ECHECK(kTokenStringConstant, in_type == BASE_TYPE_STRING,
BASE_TYPE_STRING);
} else {
// Try a float number.
TRY_ECHECK(kTokenFloatConstant, IsFloat(in_type), BASE_TYPE_FLOAT);
// Integer token can init any scalar (integer of float).
FORCE_ECHECK(kTokenIntegerConstant, IsScalar(in_type), BASE_TYPE_INT);
}
// Match empty vectors for default-empty-vectors.
if (!match && IsVector(e.type) && token_ == '[') {
NEXT();
if (token_ != ']') { return Error("Expected `]` in vector default"); }
NEXT();
match = true;
e.constant = "[]";
}
#undef FORCE_ECHECK
#undef TRY_ECHECK
#undef IF_ECHECK_
if (!match) {
std::string msg;
msg += "Cannot assign token starting with '" + TokenToStringId(token_) +
"' to value of <" + std::string(kTypeNames[in_type]) + "> type.";
return Error(msg);
}
const auto match_type = e.type.base_type; // may differ from in_type
// The check_now flag must be true when parse a fbs-schema.
// This flag forces to check default scalar values or metadata of field.
// For JSON parser the flag should be false.
// If it is set for JSON each value will be checked twice (see ParseTable).
// Special case 'null' since atot can't handle that.
if (check_now && IsScalar(match_type) && e.constant != "null") {
// clang-format off
switch (match_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: {\
CTYPE val; \
ECHECK(atot(e.constant.c_str(), *this, &val)); \
SingleValueRepack(e, val); \
break; }
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: break;
}
// clang-format on
}
return NoError();
}
StructDef *Parser::LookupCreateStruct(const std::string &name,
bool create_if_new, bool definition) {
std::string qualified_name = current_namespace_->GetFullyQualifiedName(name);
// See if it exists pre-declared by an unqualified use.
auto struct_def = LookupStruct(name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace, and is registered under its
// qualified name.
struct_def->defined_namespace = current_namespace_;
structs_.Move(name, qualified_name);
}
return struct_def;
}
// See if it exists pre-declared by an qualified use.
struct_def = LookupStruct(qualified_name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace.
struct_def->defined_namespace = current_namespace_;
}
return struct_def;
}
if (!definition && !struct_def) {
struct_def = LookupStructThruParentNamespaces(name);
}
if (!struct_def && create_if_new) {
struct_def = new StructDef();
if (definition) {
structs_.Add(qualified_name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
} else {
// Not a definition.
// Rather than failing, we create a "pre declared" StructDef, due to
// circular references, and check for errors at the end of parsing.
// It is defined in the current namespace, as the best guess what the
// final namespace will be.
structs_.Add(name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
struct_def->original_location.reset(
new std::string(file_being_parsed_ + ":" + NumToString(line_)));
}
}
return struct_def;
}
const EnumVal *EnumDef::MinValue() const {
return vals.vec.empty() ? nullptr : vals.vec.front();
}
const EnumVal *EnumDef::MaxValue() const {
return vals.vec.empty() ? nullptr : vals.vec.back();
}
template<typename T> static uint64_t EnumDistanceImpl(T e1, T e2) {
if (e1 < e2) { std::swap(e1, e2); } // use std for scalars
// Signed overflow may occur, use unsigned calculation.
// The unsigned overflow is well-defined by C++ standard (modulo 2^n).
return static_cast<uint64_t>(e1) - static_cast<uint64_t>(e2);
}
uint64_t EnumDef::Distance(const EnumVal *v1, const EnumVal *v2) const {
return IsUInt64() ? EnumDistanceImpl(v1->GetAsUInt64(), v2->GetAsUInt64())
: EnumDistanceImpl(v1->GetAsInt64(), v2->GetAsInt64());
}
std::string EnumDef::AllFlags() const {
FLATBUFFERS_ASSERT(attributes.Lookup("bit_flags"));
uint64_t u64 = 0;
for (auto it = Vals().begin(); it != Vals().end(); ++it) {
u64 |= (*it)->GetAsUInt64();
}
return IsUInt64() ? NumToString(u64) : NumToString(static_cast<int64_t>(u64));
}
EnumVal *EnumDef::ReverseLookup(int64_t enum_idx,
bool skip_union_default) const {
auto skip_first = static_cast<int>(is_union && skip_union_default);
for (auto it = Vals().begin() + skip_first; it != Vals().end(); ++it) {
if ((*it)->GetAsInt64() == enum_idx) { return *it; }
}
return nullptr;
}
EnumVal *EnumDef::FindByValue(const std::string &constant) const {
int64_t i64;
auto done = false;
if (IsUInt64()) {
uint64_t u64; // avoid reinterpret_cast of pointers
done = StringToNumber(constant.c_str(), &u64);
i64 = static_cast<int64_t>(u64);
} else {
done = StringToNumber(constant.c_str(), &i64);
}
FLATBUFFERS_ASSERT(done);
if (!done) return nullptr;
return ReverseLookup(i64, false);
}
void EnumDef::SortByValue() {
auto &v = vals.vec;
if (IsUInt64())
std::sort(v.begin(), v.end(), [](const EnumVal *e1, const EnumVal *e2) {
return e1->GetAsUInt64() < e2->GetAsUInt64();
});
else
std::sort(v.begin(), v.end(), [](const EnumVal *e1, const EnumVal *e2) {
return e1->GetAsInt64() < e2->GetAsInt64();
});
}
void EnumDef::RemoveDuplicates() {
// This method depends form SymbolTable implementation!
// 1) vals.vec - owner (raw pointer)
// 2) vals.dict - access map
auto first = vals.vec.begin();
auto last = vals.vec.end();
if (first == last) return;
auto result = first;
while (++first != last) {
if ((*result)->value != (*first)->value) {
*(++result) = *first;
} else {
auto ev = *first;
for (auto it = vals.dict.begin(); it != vals.dict.end(); ++it) {
if (it->second == ev) it->second = *result; // reassign
}
delete ev; // delete enum value
*first = nullptr;
}
}
vals.vec.erase(++result, last);
}
template<typename T> void EnumDef::ChangeEnumValue(EnumVal *ev, T new_value) {
ev->value = static_cast<int64_t>(new_value);
}
namespace EnumHelper {
template<BaseType E> struct EnumValType { typedef int64_t type; };
template<> struct EnumValType<BASE_TYPE_ULONG> { typedef uint64_t type; };
} // namespace EnumHelper
struct EnumValBuilder {
EnumVal *CreateEnumerator(const std::string &ev_name) {
FLATBUFFERS_ASSERT(!temp);
auto first = enum_def.vals.vec.empty();
user_value = first;
temp = new EnumVal(ev_name, first ? 0 : enum_def.vals.vec.back()->value);
return temp;
}
EnumVal *CreateEnumerator(const std::string &ev_name, int64_t val) {
FLATBUFFERS_ASSERT(!temp);
user_value = true;
temp = new EnumVal(ev_name, val);
return temp;
}
FLATBUFFERS_CHECKED_ERROR AcceptEnumerator(const std::string &name) {
FLATBUFFERS_ASSERT(temp);
ECHECK(ValidateValue(&temp->value, false == user_value));
FLATBUFFERS_ASSERT((temp->union_type.enum_def == nullptr) ||
(temp->union_type.enum_def == &enum_def));
auto not_unique = enum_def.vals.Add(name, temp);
temp = nullptr;
if (not_unique) return parser.Error("enum value already exists: " + name);
return NoError();
}
FLATBUFFERS_CHECKED_ERROR AcceptEnumerator() {
return AcceptEnumerator(temp->name);
}
FLATBUFFERS_CHECKED_ERROR AssignEnumeratorValue(const std::string &value) {
user_value = true;
auto fit = false;
if (enum_def.IsUInt64()) {
uint64_t u64;
fit = StringToNumber(value.c_str(), &u64);
temp->value = static_cast<int64_t>(u64); // well-defined since C++20.
} else {
int64_t i64;
fit = StringToNumber(value.c_str(), &i64);
temp->value = i64;
}
if (!fit) return parser.Error("enum value does not fit, \"" + value + "\"");
return NoError();
}
template<BaseType E, typename CTYPE>
inline FLATBUFFERS_CHECKED_ERROR ValidateImpl(int64_t *ev, int m) {
typedef typename EnumHelper::EnumValType<E>::type T; // int64_t or uint64_t
static_assert(sizeof(T) == sizeof(int64_t), "invalid EnumValType");
const auto v = static_cast<T>(*ev);
auto up = static_cast<T>((flatbuffers::numeric_limits<CTYPE>::max)());
auto dn = static_cast<T>((flatbuffers::numeric_limits<CTYPE>::lowest)());
if (v < dn || v > (up - m)) {
return parser.Error("enum value does not fit, \"" + NumToString(v) +
(m ? " + 1\"" : "\"") + " out of " +
TypeToIntervalString<CTYPE>());
}
*ev = static_cast<int64_t>(v + m); // well-defined since C++20.
return NoError();
}
FLATBUFFERS_CHECKED_ERROR ValidateValue(int64_t *ev, bool next) {
// clang-format off
switch (enum_def.underlying_type.base_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_##ENUM: { \
if (!IsInteger(BASE_TYPE_##ENUM)) break; \
return ValidateImpl<BASE_TYPE_##ENUM, CTYPE>(ev, next ? 1 : 0); \
}
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: break;
}
// clang-format on
return parser.Error("fatal: invalid enum underlying type");
}
EnumValBuilder(Parser &_parser, EnumDef &_enum_def)
: parser(_parser),
enum_def(_enum_def),
temp(nullptr),
user_value(false) {}
~EnumValBuilder() { delete temp; }
Parser &parser;
EnumDef &enum_def;
EnumVal *temp;
bool user_value;
};
CheckedError Parser::ParseEnum(const bool is_union, EnumDef **dest) {
std::vector<std::string> enum_comment = doc_comment_;
NEXT();
std::string enum_name = attribute_;
EXPECT(kTokenIdentifier);
EnumDef *enum_def;
ECHECK(StartEnum(enum_name, is_union, &enum_def));
enum_def->doc_comment = enum_comment;
if (!is_union && !opts.proto_mode) {
// Give specialized error message, since this type spec used to
// be optional in the first FlatBuffers release.
if (!Is(':')) {
return Error(
"must specify the underlying integer type for this"
" enum (e.g. \': short\', which was the default).");
} else {
NEXT();
}
// Specify the integer type underlying this enum.
ECHECK(ParseType(enum_def->underlying_type));
if (!IsInteger(enum_def->underlying_type.base_type) ||
IsBool(enum_def->underlying_type.base_type))
return Error("underlying enum type must be integral");
// Make this type refer back to the enum it was derived from.
enum_def->underlying_type.enum_def = enum_def;
}
ECHECK(ParseMetaData(&enum_def->attributes));
const auto underlying_type = enum_def->underlying_type.base_type;
if (enum_def->attributes.Lookup("bit_flags") &&
!IsUnsigned(underlying_type)) {
// todo: Convert to the Error in the future?
Warning("underlying type of bit_flags enum must be unsigned");
}
EnumValBuilder evb(*this, *enum_def);
EXPECT('{');
// A lot of code generatos expect that an enum is not-empty.
if ((is_union || Is('}')) && !opts.proto_mode) {
evb.CreateEnumerator("NONE");
ECHECK(evb.AcceptEnumerator());
}
std::set<std::pair<BaseType, StructDef *>> union_types;
while (!Is('}')) {
if (opts.proto_mode && attribute_ == "option") {
ECHECK(ParseProtoOption());
} else {
auto &ev = *evb.CreateEnumerator(attribute_);
auto full_name = ev.name;
ev.doc_comment = doc_comment_;
EXPECT(kTokenIdentifier);
if (is_union) {
ECHECK(ParseNamespacing(&full_name, &ev.name));
if (opts.union_value_namespacing) {
// Since we can't namespace the actual enum identifiers, turn
// namespace parts into part of the identifier.
ev.name = full_name;
std::replace(ev.name.begin(), ev.name.end(), '.', '_');
}
if (Is(':')) {
NEXT();
ECHECK(ParseType(ev.union_type));
if (ev.union_type.base_type != BASE_TYPE_STRUCT &&
ev.union_type.base_type != BASE_TYPE_STRING)
return Error("union value type may only be table/struct/string");
} else {
ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name));
}
if (!enum_def->uses_multiple_type_instances) {
auto ins = union_types.insert(std::make_pair(
ev.union_type.base_type, ev.union_type.struct_def));
enum_def->uses_multiple_type_instances = (false == ins.second);
}
}
if (Is('=')) {
NEXT();
ECHECK(evb.AssignEnumeratorValue(attribute_));
EXPECT(kTokenIntegerConstant);
}
ECHECK(evb.AcceptEnumerator());
if (opts.proto_mode && Is('[')) {
NEXT();
// ignore attributes on enums.
while (token_ != ']') NEXT();
NEXT();
}
}
if (!Is(opts.proto_mode ? ';' : ',')) break;
NEXT();
}
EXPECT('}');
// At this point, the enum can be empty if input is invalid proto-file.
if (!enum_def->size())
return Error("incomplete enum declaration, values not found");
if (enum_def->attributes.Lookup("bit_flags")) {
const auto base_width = static_cast<uint64_t>(8 * SizeOf(underlying_type));
for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end();
++it) {
auto ev = *it;
const auto u = ev->GetAsUInt64();
// Stop manipulations with the sign.
if (!IsUnsigned(underlying_type) && u == (base_width - 1))
return Error("underlying type of bit_flags enum must be unsigned");
if (u >= base_width)
return Error("bit flag out of range of underlying integral type");
enum_def->ChangeEnumValue(ev, 1ULL << u);
}
}
enum_def->SortByValue(); // Must be sorted to use MinValue/MaxValue.
// Ensure enum value uniqueness.
auto prev_it = enum_def->Vals().begin();
for (auto it = prev_it + 1; it != enum_def->Vals().end(); ++it) {
auto prev_ev = *prev_it;
auto ev = *it;
if (prev_ev->GetAsUInt64() == ev->GetAsUInt64())
return Error("all enum values must be unique: " + prev_ev->name +
" and " + ev->name + " are both " +
NumToString(ev->GetAsInt64()));
}
if (dest) *dest = enum_def;
types_.Add(current_namespace_->GetFullyQualifiedName(enum_def->name),
new Type(BASE_TYPE_UNION, nullptr, enum_def));
return NoError();
}
CheckedError Parser::StartStruct(const std::string &name, StructDef **dest) {
auto &struct_def = *LookupCreateStruct(name, true, true);
if (!struct_def.predecl) return Error("datatype already exists: " + name);
struct_def.predecl = false;
struct_def.name = name;
struct_def.file = file_being_parsed_;
// Move this struct to the back of the vector just in case it was predeclared,
// to preserve declaration order.
*std::remove(structs_.vec.begin(), structs_.vec.end(), &struct_def) =
&struct_def;
*dest = &struct_def;
return NoError();
}
CheckedError Parser::CheckClash(std::vector<FieldDef *> &fields,
StructDef *struct_def, const char *suffix,
BaseType basetype) {
auto len = strlen(suffix);
for (auto it = fields.begin(); it != fields.end(); ++it) {
auto &fname = (*it)->name;
if (fname.length() > len &&
fname.compare(fname.length() - len, len, suffix) == 0 &&
(*it)->value.type.base_type != BASE_TYPE_UTYPE) {
auto field =
struct_def->fields.Lookup(fname.substr(0, fname.length() - len));
if (field && field->value.type.base_type == basetype)
return Error("Field " + fname +
" would clash with generated functions for field " +
field->name);
}
}
return NoError();
}
bool Parser::SupportsOptionalScalars(const flatbuffers::IDLOptions &opts) {
static FLATBUFFERS_CONSTEXPR unsigned long supported_langs =
IDLOptions::kRust | IDLOptions::kSwift | IDLOptions::kLobster |
IDLOptions::kKotlin | IDLOptions::kCpp | IDLOptions::kJava |
IDLOptions::kCSharp | IDLOptions::kTs | IDLOptions::kBinary;
unsigned long langs = opts.lang_to_generate;
return (langs > 0 && langs < IDLOptions::kMAX) && !(langs & ~supported_langs);
}
bool Parser::SupportsOptionalScalars() const {
// Check in general if a language isn't specified.
return opts.lang_to_generate == 0 || SupportsOptionalScalars(opts);
}
bool Parser::SupportsDefaultVectorsAndStrings() const {
static FLATBUFFERS_CONSTEXPR unsigned long supported_langs =
IDLOptions::kRust | IDLOptions::kSwift;
return !(opts.lang_to_generate & ~supported_langs);
}
bool Parser::SupportsAdvancedUnionFeatures() const {
return opts.lang_to_generate != 0 &&
(opts.lang_to_generate &
~(IDLOptions::kCpp | IDLOptions::kTs | IDLOptions::kPhp |
IDLOptions::kJava | IDLOptions::kCSharp | IDLOptions::kKotlin |
IDLOptions::kBinary | IDLOptions::kSwift)) == 0;
}
bool Parser::SupportsAdvancedArrayFeatures() const {
return (opts.lang_to_generate &
~(IDLOptions::kCpp | IDLOptions::kPython | IDLOptions::kJava |
IDLOptions::kCSharp | IDLOptions::kJsonSchema | IDLOptions::kJson |
IDLOptions::kBinary)) == 0;
}
Namespace *Parser::UniqueNamespace(Namespace *ns) {
for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) {
if (ns->components == (*it)->components) {
delete ns;
return *it;
}
}
namespaces_.push_back(ns);
return ns;
}
std::string Parser::UnqualifiedName(const std::string &full_qualified_name) {
Namespace *ns = new Namespace();
std::size_t current, previous = 0;
current = full_qualified_name.find('.');
while (current != std::string::npos) {
ns->components.push_back(
full_qualified_name.substr(previous, current - previous));
previous = current + 1;
current = full_qualified_name.find('.', previous);
}
current_namespace_ = UniqueNamespace(ns);
return full_qualified_name.substr(previous, current - previous);
}
static bool compareFieldDefs(const FieldDef *a, const FieldDef *b) {
auto a_id = atoi(a->attributes.Lookup("id")->constant.c_str());
auto b_id = atoi(b->attributes.Lookup("id")->constant.c_str());
return a_id < b_id;
}
CheckedError Parser::ParseDecl() {
std::vector<std::string> dc = doc_comment_;
bool fixed = IsIdent("struct");
if (!fixed && !IsIdent("table")) return Error("declaration expected");
NEXT();
std::string name = attribute_;
EXPECT(kTokenIdentifier);
StructDef *struct_def;
ECHECK(StartStruct(name, &struct_def));
struct_def->doc_comment = dc;
struct_def->fixed = fixed;
ECHECK(ParseMetaData(&struct_def->attributes));
struct_def->sortbysize =
struct_def->attributes.Lookup("original_order") == nullptr && !fixed;
EXPECT('{');
while (token_ != '}') ECHECK(ParseField(*struct_def));
if (fixed) {
const auto force_align = struct_def->attributes.Lookup("force_align");
if (force_align) {
size_t align;
ECHECK(ParseAlignAttribute(force_align->constant, struct_def->minalign,
&align));
struct_def->minalign = align;
}
if (!struct_def->bytesize) return Error("size 0 structs not allowed");
}
struct_def->PadLastField(struct_def->minalign);
// Check if this is a table that has manual id assignments
auto &fields = struct_def->fields.vec;
if (!fixed && fields.size()) {
size_t num_id_fields = 0;
for (auto it = fields.begin(); it != fields.end(); ++it) {
if ((*it)->attributes.Lookup("id")) num_id_fields++;
}
// If any fields have ids..
if (num_id_fields || opts.require_explicit_ids) {
// Then all fields must have them.
if (num_id_fields != fields.size()) {
if (opts.require_explicit_ids) {
return Error(
"all fields must have an 'id' attribute when "
"--require-explicit-ids is used");
} else {
return Error(
"either all fields or no fields must have an 'id' attribute");
}
}
// Simply sort by id, then the fields are the same as if no ids had
// been specified.
std::sort(fields.begin(), fields.end(), compareFieldDefs);
// Verify we have a contiguous set, and reassign vtable offsets.
FLATBUFFERS_ASSERT(fields.size() <=
flatbuffers::numeric_limits<voffset_t>::max());
for (voffset_t i = 0; i < static_cast<voffset_t>(fields.size()); i++) {
auto &field = *fields[i];
const auto &id_str = field.attributes.Lookup("id")->constant;
// Metadata values have a dynamic type, they can be `float`, 'int', or
// 'string`.
// The FieldIndexToOffset(i) expects the voffset_t so `id` is limited by
// this type.
voffset_t id = 0;
const auto done = !atot(id_str.c_str(), *this, &id).Check();
if (!done)
return Error("field id\'s must be non-negative number, field: " +
field.name + ", id: " + id_str);
if (i != id)
return Error("field id\'s must be consecutive from 0, id " +
NumToString(i) + " missing or set twice, field: " +
field.name + ", id: " + id_str);
field.value.offset = FieldIndexToOffset(i);
}
}
}
ECHECK(
CheckClash(fields, struct_def, UnionTypeFieldSuffix(), BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "Type", BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "_length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "Length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "_byte_vector", BASE_TYPE_STRING));
ECHECK(CheckClash(fields, struct_def, "ByteVector", BASE_TYPE_STRING));
EXPECT('}');
types_.Add(current_namespace_->GetFullyQualifiedName(struct_def->name),
new Type(BASE_TYPE_STRUCT, struct_def, nullptr));
return NoError();
}
CheckedError Parser::ParseService() {
std::vector<std::string> service_comment = doc_comment_;
NEXT();
auto service_name = attribute_;
EXPECT(kTokenIdentifier);
auto &service_def = *new ServiceDef();
service_def.name = service_name;
service_def.file = file_being_parsed_;
service_def.doc_comment = service_comment;
service_def.defined_namespace = current_namespace_;
if (services_.Add(current_namespace_->GetFullyQualifiedName(service_name),
&service_def))
return Error("service already exists: " + service_name);
ECHECK(ParseMetaData(&service_def.attributes));
EXPECT('{');
do {
std::vector<std::string> doc_comment = doc_comment_;
auto rpc_name = attribute_;
EXPECT(kTokenIdentifier);
EXPECT('(');
Type reqtype, resptype;
ECHECK(ParseTypeIdent(reqtype));
EXPECT(')');
EXPECT(':');
ECHECK(ParseTypeIdent(resptype));
if (reqtype.base_type != BASE_TYPE_STRUCT || reqtype.struct_def->fixed ||
resptype.base_type != BASE_TYPE_STRUCT || resptype.struct_def->fixed)
return Error("rpc request and response types must be tables");
auto &rpc = *new RPCCall();
rpc.name = rpc_name;
rpc.request = reqtype.struct_def;
rpc.response = resptype.struct_def;
rpc.doc_comment = doc_comment;
if (service_def.calls.Add(rpc_name, &rpc))
return Error("rpc already exists: " + rpc_name);
ECHECK(ParseMetaData(&rpc.attributes));
EXPECT(';');
} while (token_ != '}');
NEXT();
return NoError();
}
bool Parser::SetRootType(const char *name) {
root_struct_def_ = LookupStruct(name);
if (!root_struct_def_)
root_struct_def_ =
LookupStruct(current_namespace_->GetFullyQualifiedName(name));
return root_struct_def_ != nullptr;
}
void Parser::MarkGenerated() {
// This function marks all existing definitions as having already
// been generated, which signals no code for included files should be
// generated.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
(*it)->generated = true;
}
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
if (!(*it)->predecl) { (*it)->generated = true; }
}
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
(*it)->generated = true;
}
}
CheckedError Parser::ParseNamespace() {
NEXT();
auto ns = new Namespace();
namespaces_.push_back(ns); // Store it here to not leak upon error.
if (token_ != ';') {
for (;;) {
ns->components.push_back(attribute_);
EXPECT(kTokenIdentifier);
if (Is('.')) NEXT() else break;
}
}
namespaces_.pop_back();
current_namespace_ = UniqueNamespace(ns);
EXPECT(';');
return NoError();
}
// Best effort parsing of .proto declarations, with the aim to turn them
// in the closest corresponding FlatBuffer equivalent.
// We parse everything as identifiers instead of keywords, since we don't
// want protobuf keywords to become invalid identifiers in FlatBuffers.
CheckedError Parser::ParseProtoDecl() {
bool isextend = IsIdent("extend");
if (IsIdent("package")) {
// These are identical in syntax to FlatBuffer's namespace decl.
ECHECK(ParseNamespace());
} else if (IsIdent("message") || isextend) {
std::vector<std::string> struct_comment = doc_comment_;
NEXT();
StructDef *struct_def = nullptr;
Namespace *parent_namespace = nullptr;
if (isextend) {
if (Is('.')) NEXT(); // qualified names may start with a . ?
auto id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
struct_def = LookupCreateStruct(id, false);
if (!struct_def)
return Error("cannot extend unknown message type: " + id);
} else {
std::string name = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(StartStruct(name, &struct_def));
// Since message definitions can be nested, we create a new namespace.
auto ns = new Namespace();
// Copy of current namespace.
*ns = *current_namespace_;
// But with current message name.
ns->components.push_back(name);
ns->from_table++;
parent_namespace = current_namespace_;
current_namespace_ = UniqueNamespace(ns);
}
struct_def->doc_comment = struct_comment;
ECHECK(ParseProtoFields(struct_def, isextend, false));
if (!isextend) { current_namespace_ = parent_namespace; }
if (Is(';')) NEXT();
} else if (IsIdent("enum")) {
// These are almost the same, just with different terminator:
EnumDef *enum_def;
ECHECK(ParseEnum(false, &enum_def));
if (Is(';')) NEXT();
// Temp: remove any duplicates, as .fbs files can't handle them.
enum_def->RemoveDuplicates();
} else if (IsIdent("syntax")) { // Skip these.
NEXT();
EXPECT('=');
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("service")) { // Skip these.
NEXT();
EXPECT(kTokenIdentifier);
ECHECK(ParseProtoCurliesOrIdent());
} else {
return Error("don\'t know how to parse .proto declaration starting with " +
TokenToStringId(token_));
}
return NoError();
}
CheckedError Parser::StartEnum(const std::string &enum_name, bool is_union,
EnumDef **dest) {
auto &enum_def = *new EnumDef();
enum_def.name = enum_name;
enum_def.file = file_being_parsed_;
enum_def.doc_comment = doc_comment_;
enum_def.is_union = is_union;
enum_def.defined_namespace = current_namespace_;
if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name),
&enum_def))
return Error("enum already exists: " + enum_name);
enum_def.underlying_type.base_type =
is_union ? BASE_TYPE_UTYPE : BASE_TYPE_INT;
enum_def.underlying_type.enum_def = &enum_def;
if (dest) *dest = &enum_def;
return NoError();
}
CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend,
bool inside_oneof) {
EXPECT('{');
while (token_ != '}') {
if (IsIdent("message") || IsIdent("extend") || IsIdent("enum")) {
// Nested declarations.
ECHECK(ParseProtoDecl());
} else if (IsIdent("extensions")) { // Skip these.
NEXT();
EXPECT(kTokenIntegerConstant);
if (Is(kTokenIdentifier)) {
NEXT(); // to
NEXT(); // num
}
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("reserved")) { // Skip these.
NEXT();
while (!Is(';')) { NEXT(); } // A variety of formats, just skip.
NEXT();
} else {
std::vector<std::string> field_comment = doc_comment_;
// Parse the qualifier.
bool required = false;
bool repeated = false;
bool oneof = false;
if (!inside_oneof) {
if (IsIdent("optional")) {
// This is the default.
NEXT();
} else if (IsIdent("required")) {
required = true;
NEXT();
} else if (IsIdent("repeated")) {
repeated = true;
NEXT();
} else if (IsIdent("oneof")) {
oneof = true;
NEXT();
} else {
// can't error, proto3 allows decls without any of the above.
}
}
StructDef *anonymous_struct = nullptr;
EnumDef *oneof_union = nullptr;
Type type;
if (IsIdent("group") || oneof) {
if (!oneof) NEXT();
if (oneof && opts.proto_oneof_union) {
auto name = MakeCamel(attribute_, true) + "Union";
ECHECK(StartEnum(name, true, &oneof_union));
type = Type(BASE_TYPE_UNION, nullptr, oneof_union);
} else {
auto name = "Anonymous" + NumToString(anonymous_counter_++);
ECHECK(StartStruct(name, &anonymous_struct));
type = Type(BASE_TYPE_STRUCT, anonymous_struct);
}
} else {
ECHECK(ParseTypeFromProtoType(&type));
}
// Repeated elements get mapped to a vector.
if (repeated) {
type.element = type.base_type;
type.base_type = BASE_TYPE_VECTOR;
if (type.element == BASE_TYPE_VECTOR) {
// We have a vector or vectors, which FlatBuffers doesn't support.
// For now make it a vector of string (since the source is likely
// "repeated bytes").
// TODO(wvo): A better solution would be to wrap this in a table.
type.element = BASE_TYPE_STRING;
}
}
std::string name = attribute_;
EXPECT(kTokenIdentifier);
if (!oneof) {
// Parse the field id. Since we're just translating schemas, not
// any kind of binary compatibility, we can safely ignore these, and
// assign our own.
EXPECT('=');
EXPECT(kTokenIntegerConstant);
}
FieldDef *field = nullptr;
if (isextend) {
// We allow a field to be re-defined when extending.
// TODO: are there situations where that is problematic?
field = struct_def->fields.Lookup(name);
}
if (!field) ECHECK(AddField(*struct_def, name, type, &field));
field->doc_comment = field_comment;
if (!IsScalar(type.base_type) && required) {
field->presence = FieldDef::kRequired;
}
// See if there's a default specified.
if (Is('[')) {
NEXT();
for (;;) {
auto key = attribute_;
ECHECK(ParseProtoKey());
EXPECT('=');
auto val = attribute_;
ECHECK(ParseProtoCurliesOrIdent());
if (key == "default") {
// Temp: skip non-numeric and non-boolean defaults (enums).
auto numeric = strpbrk(val.c_str(), "0123456789-+.");
if (IsScalar(type.base_type) && numeric == val.c_str()) {
field->value.constant = val;
} else if (val == "true") {
field->value.constant = val;
} // "false" is default, no need to handle explicitly.
} else if (key == "deprecated") {
field->deprecated = val == "true";
}
if (!Is(',')) break;
NEXT();
}
EXPECT(']');
}
if (anonymous_struct) {
ECHECK(ParseProtoFields(anonymous_struct, false, oneof));
if (Is(';')) NEXT();
} else if (oneof_union) {
// Parse into a temporary StructDef, then transfer fields into an
// EnumDef describing the oneof as a union.
StructDef oneof_struct;
ECHECK(ParseProtoFields(&oneof_struct, false, oneof));
if (Is(';')) NEXT();
for (auto field_it = oneof_struct.fields.vec.begin();
field_it != oneof_struct.fields.vec.end(); ++field_it) {
const auto &oneof_field = **field_it;
const auto &oneof_type = oneof_field.value.type;
if (oneof_type.base_type != BASE_TYPE_STRUCT ||
!oneof_type.struct_def || oneof_type.struct_def->fixed)
return Error("oneof '" + name +
"' cannot be mapped to a union because member '" +
oneof_field.name + "' is not a table type.");
EnumValBuilder evb(*this, *oneof_union);
auto ev = evb.CreateEnumerator(oneof_type.struct_def->name);
ev->union_type = oneof_type;
ev->doc_comment = oneof_field.doc_comment;
ECHECK(evb.AcceptEnumerator(oneof_field.name));
}
} else {
EXPECT(';');
}
}
}
NEXT();
return NoError();
}
CheckedError Parser::ParseProtoKey() {
if (token_ == '(') {
NEXT();
// Skip "(a.b)" style custom attributes.
while (token_ == '.' || token_ == kTokenIdentifier) NEXT();
EXPECT(')');
while (Is('.')) {
NEXT();
EXPECT(kTokenIdentifier);
}
} else {
EXPECT(kTokenIdentifier);
}
return NoError();
}
CheckedError Parser::ParseProtoCurliesOrIdent() {
if (Is('{')) {
NEXT();
for (int nesting = 1; nesting;) {
if (token_ == '{')
nesting++;
else if (token_ == '}')
nesting--;
NEXT();
}
} else {
NEXT(); // Any single token.
}
return NoError();
}
CheckedError Parser::ParseProtoOption() {
NEXT();
ECHECK(ParseProtoKey());
EXPECT('=');
ECHECK(ParseProtoCurliesOrIdent());
return NoError();
}
// Parse a protobuf type, and map it to the corresponding FlatBuffer one.
CheckedError Parser::ParseTypeFromProtoType(Type *type) {
struct type_lookup {
const char *proto_type;
BaseType fb_type, element;
};
static type_lookup lookup[] = {
{ "float", BASE_TYPE_FLOAT, BASE_TYPE_NONE },
{ "double", BASE_TYPE_DOUBLE, BASE_TYPE_NONE },
{ "int32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "int64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "uint32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "uint64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sint32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sint64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "fixed32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "fixed64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sfixed32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sfixed64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "bool", BASE_TYPE_BOOL, BASE_TYPE_NONE },
{ "string", BASE_TYPE_STRING, BASE_TYPE_NONE },
{ "bytes", BASE_TYPE_VECTOR, BASE_TYPE_UCHAR },
{ nullptr, BASE_TYPE_NONE, BASE_TYPE_NONE }
};
for (auto tl = lookup; tl->proto_type; tl++) {
if (attribute_ == tl->proto_type) {
type->base_type = tl->fb_type;
type->element = tl->element;
NEXT();
return NoError();
}
}
if (Is('.')) NEXT(); // qualified names may start with a . ?
ECHECK(ParseTypeIdent(*type));
return NoError();
}
CheckedError Parser::SkipAnyJsonValue() {
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
switch (token_) {
case '{': {
size_t fieldn_outer = 0;
return ParseTableDelimiters(fieldn_outer, nullptr,
[&](const std::string &, size_t &fieldn,
const StructDef *) -> CheckedError {
ECHECK(SkipAnyJsonValue());
fieldn++;
return NoError();
});
}
case '[': {
uoffset_t count = 0;
return ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
return SkipAnyJsonValue();
});
}
case kTokenStringConstant:
case kTokenIntegerConstant:
case kTokenFloatConstant: NEXT(); break;
default:
if (IsIdent("true") || IsIdent("false") || IsIdent("null")) {
NEXT();
} else
return TokenError();
}
return NoError();
}
CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) {
ParseDepthGuard depth_guard(this);
ECHECK(depth_guard.Check());
switch (token_) {
case '{': {
auto start = builder->StartMap();
size_t fieldn_outer = 0;
auto err =
ParseTableDelimiters(fieldn_outer, nullptr,
[&](const std::string &name, size_t &fieldn,
const StructDef *) -> CheckedError {
builder->Key(name);
ECHECK(ParseFlexBufferValue(builder));
fieldn++;
return NoError();
});
ECHECK(err);
builder->EndMap(start);
if (builder->HasDuplicateKeys())
return Error("FlexBuffers map has duplicate keys");
break;
}
case '[': {
auto start = builder->StartVector();
uoffset_t count = 0;
ECHECK(ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
return ParseFlexBufferValue(builder);
}));
builder->EndVector(start, false, false);
break;
}
case kTokenStringConstant:
builder->String(attribute_);
EXPECT(kTokenStringConstant);
break;
case kTokenIntegerConstant:
builder->Int(StringToInt(attribute_.c_str()));
EXPECT(kTokenIntegerConstant);
break;
case kTokenFloatConstant: {
double d;
StringToNumber(attribute_.c_str(), &d);
builder->Double(d);
EXPECT(kTokenFloatConstant);
break;
}
default:
if (IsIdent("true")) {
builder->Bool(true);
NEXT();
} else if (IsIdent("false")) {
builder->Bool(false);
NEXT();
} else if (IsIdent("null")) {
builder->Null();
NEXT();
} else
return TokenError();
}
return NoError();
}
bool Parser::ParseFlexBuffer(const char *source, const char *source_filename,
flexbuffers::Builder *builder) {
const auto initial_depth = parse_depth_counter_;
(void)initial_depth;
auto ok = !StartParseFile(source, source_filename).Check() &&
!ParseFlexBufferValue(builder).Check();
if (ok) builder->Finish();
FLATBUFFERS_ASSERT(initial_depth == parse_depth_counter_);
return ok;
}
bool Parser::Parse(const char *source, const char **include_paths,
const char *source_filename) {
const auto initial_depth = parse_depth_counter_;
(void)initial_depth;
bool r;
if (opts.use_flexbuffers) {
r = ParseFlexBuffer(source, source_filename, &flex_builder_);
} else {
r = !ParseRoot(source, include_paths, source_filename).Check();
}
FLATBUFFERS_ASSERT(initial_depth == parse_depth_counter_);
return r;
}
bool Parser::ParseJson(const char *json, const char *json_filename) {
const auto initial_depth = parse_depth_counter_;
(void)initial_depth;
builder_.Clear();
const auto done =
!StartParseFile(json, json_filename).Check() && !DoParseJson().Check();
FLATBUFFERS_ASSERT(initial_depth == parse_depth_counter_);
return done;
}
CheckedError Parser::StartParseFile(const char *source,
const char *source_filename) {
file_being_parsed_ = source_filename ? source_filename : "";
source_ = source;
ResetState(source_);
error_.clear();
ECHECK(SkipByteOrderMark());
NEXT();
if (Is(kTokenEof)) return Error("input file is empty");
return NoError();
}
CheckedError Parser::ParseRoot(const char *source, const char **include_paths,
const char *source_filename) {
ECHECK(DoParse(source, include_paths, source_filename, nullptr));
// Check that all types were defined.
for (auto it = structs_.vec.begin(); it != structs_.vec.end();) {
auto &struct_def = **it;
if (struct_def.predecl) {
if (opts.proto_mode) {
// Protos allow enums to be used before declaration, so check if that
// is the case here.
EnumDef *enum_def = nullptr;
for (size_t components =
struct_def.defined_namespace->components.size() + 1;
components && !enum_def; components--) {
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(
struct_def.name, components - 1);
enum_def = LookupEnum(qualified_name);
}
if (enum_def) {
// This is pretty slow, but a simple solution for now.
auto initial_count = struct_def.refcount;
for (auto struct_it = structs_.vec.begin();
struct_it != structs_.vec.end(); ++struct_it) {
auto &sd = **struct_it;
for (auto field_it = sd.fields.vec.begin();
field_it != sd.fields.vec.end(); ++field_it) {
auto &field = **field_it;
if (field.value.type.struct_def == &struct_def) {
field.value.type.struct_def = nullptr;
field.value.type.enum_def = enum_def;
auto &bt = IsVector(field.value.type)
? field.value.type.element
: field.value.type.base_type;
FLATBUFFERS_ASSERT(bt == BASE_TYPE_STRUCT);
bt = enum_def->underlying_type.base_type;
struct_def.refcount--;
enum_def->refcount++;
}
}
}
if (struct_def.refcount)
return Error("internal: " + NumToString(struct_def.refcount) + "/" +
NumToString(initial_count) +
" use(s) of pre-declaration enum not accounted for: " +
enum_def->name);
structs_.dict.erase(structs_.dict.find(struct_def.name));
it = structs_.vec.erase(it);
delete &struct_def;
continue; // Skip error.
}
}
auto err = "type referenced but not defined (check namespace): " +
struct_def.name;
if (struct_def.original_location)
err += ", originally at: " + *struct_def.original_location;
return Error(err);
}
++it;
}
// This check has to happen here and not earlier, because only now do we
// know for sure what the type of these are.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto &enum_def = **it;
if (enum_def.is_union) {
for (auto val_it = enum_def.Vals().begin();
val_it != enum_def.Vals().end(); ++val_it) {
auto &val = **val_it;
if (!SupportsAdvancedUnionFeatures() &&
(IsStruct(val.union_type) || IsString(val.union_type)))
return Error(
"only tables can be union elements in the generated language: " +
val.name);
}
}
}
// Parse JSON object only if the scheme has been parsed.
if (token_ == '{') { ECHECK(DoParseJson()); }
EXPECT(kTokenEof);
return NoError();
}
// Generate a unique hash for a file based on its name and contents (if any).
static uint64_t HashFile(const char *source_filename, const char *source) {
uint64_t hash = 0;
if (source_filename)
hash = HashFnv1a<uint64_t>(StripPath(source_filename).c_str());
if (source && *source) hash ^= HashFnv1a<uint64_t>(source);
return hash;
}
CheckedError Parser::DoParse(const char *source, const char **include_paths,
const char *source_filename,
const char *include_filename) {
uint64_t source_hash = 0;
if (source_filename) {
// If the file is in-memory, don't include its contents in the hash as we
// won't be able to load them later.
if (FileExists(source_filename))
source_hash = HashFile(source_filename, source);
else
source_hash = HashFile(source_filename, nullptr);
if (included_files_.find(source_hash) == included_files_.end()) {
included_files_[source_hash] = include_filename ? include_filename : "";
files_included_per_file_[source_filename] = std::set<std::string>();
} else {
return NoError();
}
}
if (!include_paths) {
static const char *current_directory[] = { "", nullptr };
include_paths = current_directory;
}
field_stack_.clear();
builder_.Clear();
// Start with a blank namespace just in case this file doesn't have one.
current_namespace_ = empty_namespace_;
ECHECK(StartParseFile(source, source_filename));
// Includes must come before type declarations:
for (;;) {
// Parse pre-include proto statements if any:
if (opts.proto_mode && (attribute_ == "option" || attribute_ == "syntax" ||
attribute_ == "package")) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("native_include")) {
NEXT();
vector_emplace_back(&native_included_files_, attribute_);
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include") || (opts.proto_mode && IsIdent("import"))) {
NEXT();
if (opts.proto_mode && attribute_ == "public") NEXT();
auto name = flatbuffers::PosixPath(attribute_.c_str());
EXPECT(kTokenStringConstant);
// Look for the file relative to the directory of the current file.
std::string filepath;
if (source_filename) {
auto source_file_directory =
flatbuffers::StripFileName(source_filename);
filepath = flatbuffers::ConCatPathFileName(source_file_directory, name);
}
if (filepath.empty() || !FileExists(filepath.c_str())) {
// Look for the file in include_paths.
for (auto paths = include_paths; paths && *paths; paths++) {
filepath = flatbuffers::ConCatPathFileName(*paths, name);
if (FileExists(filepath.c_str())) break;
}
}
if (filepath.empty())
return Error("unable to locate include file: " + name);
if (source_filename)
files_included_per_file_[source_filename].insert(filepath);
std::string contents;
bool file_loaded = LoadFile(filepath.c_str(), true, &contents);
if (included_files_.find(HashFile(filepath.c_str(), contents.c_str())) ==
included_files_.end()) {
// We found an include file that we have not parsed yet.
// Parse it.
if (!file_loaded) return Error("unable to load include file: " + name);
ECHECK(DoParse(contents.c_str(), include_paths, filepath.c_str(),
name.c_str()));
// We generally do not want to output code for any included files:
if (!opts.generate_all) MarkGenerated();
// Reset these just in case the included file had them, and the
// parent doesn't.
root_struct_def_ = nullptr;
file_identifier_.clear();
file_extension_.clear();
// This is the easiest way to continue this file after an include:
// instead of saving and restoring all the state, we simply start the
// file anew. This will cause it to encounter the same include
// statement again, but this time it will skip it, because it was
// entered into included_files_.
// This is recursive, but only go as deep as the number of include
// statements.
included_files_.erase(source_hash);
return DoParse(source, include_paths, source_filename,
include_filename);
}
EXPECT(';');
} else {
break;
}
}
// Now parse all other kinds of declarations:
while (token_ != kTokenEof) {
if (opts.proto_mode) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("namespace")) {
ECHECK(ParseNamespace());
} else if (token_ == '{') {
return NoError();
} else if (IsIdent("enum")) {
ECHECK(ParseEnum(false, nullptr));
} else if (IsIdent("union")) {
ECHECK(ParseEnum(true, nullptr));
} else if (IsIdent("root_type")) {
NEXT();
auto root_type = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&root_type, nullptr));
if (opts.root_type.empty()) {
if (!SetRootType(root_type.c_str()))
return Error("unknown root type: " + root_type);
if (root_struct_def_->fixed) return Error("root type must be a table");
}
EXPECT(';');
} else if (IsIdent("file_identifier")) {
NEXT();
file_identifier_ = attribute_;
EXPECT(kTokenStringConstant);
if (file_identifier_.length() != FlatBufferBuilder::kFileIdentifierLength)
return Error("file_identifier must be exactly " +
NumToString(FlatBufferBuilder::kFileIdentifierLength) +
" characters");
EXPECT(';');
} else if (IsIdent("file_extension")) {
NEXT();
file_extension_ = attribute_;
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include")) {
return Error("includes must come before declarations");
} else if (IsIdent("attribute")) {
NEXT();
auto name = attribute_;
if (Is(kTokenIdentifier)) {
NEXT();
} else {
EXPECT(kTokenStringConstant);
}
EXPECT(';');
known_attributes_[name] = false;
} else if (IsIdent("rpc_service")) {
ECHECK(ParseService());
} else {
ECHECK(ParseDecl());
}
}
return NoError();
}
CheckedError Parser::DoParseJson() {
if (token_ != '{') {
EXPECT('{');
} else {
if (!root_struct_def_) return Error("no root type set to parse json with");
if (builder_.GetSize()) {
return Error("cannot have more than one json object in a file");
}
uoffset_t toff;
ECHECK(ParseTable(*root_struct_def_, nullptr, &toff));
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(
Offset<Table>(toff),
file_identifier_.length() ? file_identifier_.c_str() : nullptr);
} else {
builder_.Finish(Offset<Table>(toff), file_identifier_.length()
? file_identifier_.c_str()
: nullptr);
}
}
// Check that JSON file doesn't contain more objects or IDL directives.
// Comments after JSON are allowed.
EXPECT(kTokenEof);
return NoError();
}
std::set<std::string> Parser::GetIncludedFilesRecursive(
const std::string &file_name) const {
std::set<std::string> included_files;
std::list<std::string> to_process;
if (file_name.empty()) return included_files;
to_process.push_back(file_name);
while (!to_process.empty()) {
std::string current = to_process.front();
to_process.pop_front();
included_files.insert(current);
// Workaround the lack of const accessor in C++98 maps.
auto &new_files =
(*const_cast<std::map<std::string, std::set<std::string>> *>(
&files_included_per_file_))[current];
for (auto it = new_files.begin(); it != new_files.end(); ++it) {
if (included_files.find(*it) == included_files.end())
to_process.push_back(*it);
}
}
return included_files;
}
// Schema serialization functionality:
template<typename T> bool compareName(const T *a, const T *b) {
return a->defined_namespace->GetFullyQualifiedName(a->name) <
b->defined_namespace->GetFullyQualifiedName(b->name);
}
template<typename T> void AssignIndices(const std::vector<T *> &defvec) {
// Pre-sort these vectors, such that we can set the correct indices for them.
auto vec = defvec;
std::sort(vec.begin(), vec.end(), compareName<T>);
for (int i = 0; i < static_cast<int>(vec.size()); i++) vec[i]->index = i;
}
void Parser::Serialize() {
builder_.Clear();
AssignIndices(structs_.vec);
AssignIndices(enums_.vec);
std::vector<Offset<reflection::Object>> object_offsets;
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
object_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Enum>> enum_offsets;
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
enum_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Service>> service_offsets;
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
service_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
auto objs__ = builder_.CreateVectorOfSortedTables(&object_offsets);
auto enum__ = builder_.CreateVectorOfSortedTables(&enum_offsets);
auto fiid__ = builder_.CreateString(file_identifier_);
auto fext__ = builder_.CreateString(file_extension_);
auto serv__ = builder_.CreateVectorOfSortedTables(&service_offsets);
auto schema_offset = reflection::CreateSchema(
builder_, objs__, enum__, fiid__, fext__,
(root_struct_def_ ? root_struct_def_->serialized_location : 0), serv__);
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(schema_offset, reflection::SchemaIdentifier());
} else {
builder_.Finish(schema_offset, reflection::SchemaIdentifier());
}
}
static Namespace *GetNamespace(
const std::string &qualified_name, std::vector<Namespace *> &namespaces,
std::map<std::string, Namespace *> &namespaces_index) {
size_t dot = qualified_name.find_last_of('.');
std::string namespace_name = (dot != std::string::npos)
? std::string(qualified_name.c_str(), dot)
: "";
Namespace *&ns = namespaces_index[namespace_name];
if (!ns) {
ns = new Namespace();
namespaces.push_back(ns);
size_t pos = 0;
for (;;) {
dot = qualified_name.find('.', pos);
if (dot == std::string::npos) { break; }
ns->components.push_back(qualified_name.substr(pos, dot - pos));
pos = dot + 1;
}
}
return ns;
}
Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::Field>> field_offsets;
for (auto it = fields.vec.begin(); it != fields.vec.end(); ++it) {
field_offsets.push_back((*it)->Serialize(
builder, static_cast<uint16_t>(it - fields.vec.begin()), parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto flds__ = builder->CreateVectorOfSortedTables(&field_offsets);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateObject(*builder, name__, flds__, fixed,
static_cast<int>(minalign),
static_cast<int>(bytesize), attr__, docs__);
}
bool StructDef::Deserialize(Parser &parser, const reflection::Object *object) {
if (!DeserializeAttributes(parser, object->attributes())) return false;
DeserializeDoc(doc_comment, object->documentation());
name = parser.UnqualifiedName(object->name()->str());
predecl = false;
sortbysize = attributes.Lookup("original_order") == nullptr && !fixed;
const auto &of = *(object->fields());
auto indexes = std::vector<uoffset_t>(of.size());
for (uoffset_t i = 0; i < of.size(); i++) indexes[of.Get(i)->id()] = i;
size_t tmp_struct_size = 0;
for (size_t i = 0; i < indexes.size(); i++) {
auto field = of.Get(indexes[i]);
auto field_def = new FieldDef();
if (!field_def->Deserialize(parser, field) ||
fields.Add(field_def->name, field_def)) {
delete field_def;
return false;
}
if (fixed) {
// Recompute padding since that's currently not serialized.
auto size = InlineSize(field_def->value.type);
auto next_field =
i + 1 < indexes.size() ? of.Get(indexes[i + 1]) : nullptr;
tmp_struct_size += size;
field_def->padding =
next_field ? (next_field->offset() - field_def->value.offset) - size
: PaddingBytes(tmp_struct_size, minalign);
tmp_struct_size += field_def->padding;
}
}
FLATBUFFERS_ASSERT(static_cast<int>(tmp_struct_size) == object->bytesize());
return true;
}
Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder,
uint16_t id,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto type__ = value.type.Serialize(builder);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
double d;
StringToNumber(value.constant.c_str(), &d);
return reflection::CreateField(
*builder, name__, type__, id, value.offset,
// Is uint64>max(int64) tested?
IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0,
// result may be platform-dependent if underlying is float (not double)
IsFloat(value.type.base_type) ? d : 0.0, deprecated, IsRequired(), key,
attr__, docs__, IsOptional());
// TODO: value.constant is almost always "0", we could save quite a bit of
// space by sharing it. Same for common values of value.type.
}
bool FieldDef::Deserialize(Parser &parser, const reflection::Field *field) {
name = field->name()->str();
defined_namespace = parser.current_namespace_;
if (!value.type.Deserialize(parser, field->type())) return false;
value.offset = field->offset();
if (IsInteger(value.type.base_type)) {
value.constant = NumToString(field->default_integer());
} else if (IsFloat(value.type.base_type)) {
value.constant = FloatToString(field->default_real(), 16);
}
presence = FieldDef::MakeFieldPresence(field->optional(), field->required());
key = field->key();
if (!DeserializeAttributes(parser, field->attributes())) return false;
// TODO: this should probably be handled by a separate attribute
if (attributes.Lookup("flexbuffer")) {
flexbuffer = true;
parser.uses_flexbuffers_ = true;
if (value.type.base_type != BASE_TYPE_VECTOR ||
value.type.element != BASE_TYPE_UCHAR)
return false;
}
if (auto nested = attributes.Lookup("nested_flatbuffer")) {
auto nested_qualified_name =
parser.current_namespace_->GetFullyQualifiedName(nested->constant);
nested_flatbuffer = parser.LookupStruct(nested_qualified_name);
if (!nested_flatbuffer) return false;
}
shared = attributes.Lookup("shared") != nullptr;
DeserializeDoc(doc_comment, field->documentation());
return true;
}
Offset<reflection::RPCCall> RPCCall::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateRPCCall(
*builder, name__, request->serialized_location,
response->serialized_location, attr__, docs__);
}
bool RPCCall::Deserialize(Parser &parser, const reflection::RPCCall *call) {
name = call->name()->str();
if (!DeserializeAttributes(parser, call->attributes())) return false;
DeserializeDoc(doc_comment, call->documentation());
request = parser.structs_.Lookup(call->request()->name()->str());
response = parser.structs_.Lookup(call->response()->name()->str());
if (!request || !response) { return false; }
return true;
}
Offset<reflection::Service> ServiceDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::RPCCall>> servicecall_offsets;
for (auto it = calls.vec.begin(); it != calls.vec.end(); ++it) {
servicecall_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto call__ = builder->CreateVector(servicecall_offsets);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateService(*builder, name__, call__, attr__, docs__);
}
bool ServiceDef::Deserialize(Parser &parser,
const reflection::Service *service) {
name = parser.UnqualifiedName(service->name()->str());
if (service->calls()) {
for (uoffset_t i = 0; i < service->calls()->size(); ++i) {
auto call = new RPCCall();
if (!call->Deserialize(parser, service->calls()->Get(i)) ||
calls.Add(call->name, call)) {
delete call;
return false;
}
}
}
if (!DeserializeAttributes(parser, service->attributes())) return false;
DeserializeDoc(doc_comment, service->documentation());
return true;
}
Offset<reflection::Enum> EnumDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::EnumVal>> enumval_offsets;
for (auto it = vals.vec.begin(); it != vals.vec.end(); ++it) {
enumval_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto vals__ = builder->CreateVector(enumval_offsets);
auto type__ = underlying_type.Serialize(builder);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateEnum(*builder, name__, vals__, is_union, type__,
attr__, docs__);
}
bool EnumDef::Deserialize(Parser &parser, const reflection::Enum *_enum) {
name = parser.UnqualifiedName(_enum->name()->str());
for (uoffset_t i = 0; i < _enum->values()->size(); ++i) {
auto val = new EnumVal();
if (!val->Deserialize(parser, _enum->values()->Get(i)) ||
vals.Add(val->name, val)) {
delete val;
return false;
}
}
is_union = _enum->is_union();
if (!underlying_type.Deserialize(parser, _enum->underlying_type())) {
return false;
}
if (!DeserializeAttributes(parser, _enum->attributes())) return false;
DeserializeDoc(doc_comment, _enum->documentation());
return true;
}
Offset<reflection::EnumVal> EnumVal::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto type__ = union_type.Serialize(builder);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateEnumVal(
*builder, name__, value,
union_type.struct_def ? union_type.struct_def->serialized_location : 0,
type__, docs__);
}
bool EnumVal::Deserialize(const Parser &parser,
const reflection::EnumVal *val) {
name = val->name()->str();
value = val->value();
if (!union_type.Deserialize(parser, val->union_type())) return false;
DeserializeDoc(doc_comment, val->documentation());
return true;
}
Offset<reflection::Type> Type::Serialize(FlatBufferBuilder *builder) const {
return reflection::CreateType(
*builder, static_cast<reflection::BaseType>(base_type),
static_cast<reflection::BaseType>(element),
struct_def ? struct_def->index : (enum_def ? enum_def->index : -1),
fixed_length);
}
bool Type::Deserialize(const Parser &parser, const reflection::Type *type) {
if (type == nullptr) return true;
base_type = static_cast<BaseType>(type->base_type());
element = static_cast<BaseType>(type->element());
fixed_length = type->fixed_length();
if (type->index() >= 0) {
bool is_series = type->base_type() == reflection::Vector ||
type->base_type() == reflection::Array;
if (type->base_type() == reflection::Obj ||
(is_series && type->element() == reflection::Obj)) {
if (static_cast<size_t>(type->index()) < parser.structs_.vec.size()) {
struct_def = parser.structs_.vec[type->index()];
struct_def->refcount++;
} else {
return false;
}
} else {
if (static_cast<size_t>(type->index()) < parser.enums_.vec.size()) {
enum_def = parser.enums_.vec[type->index()];
} else {
return false;
}
}
}
return true;
}
flatbuffers::Offset<
flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
Definition::SerializeAttributes(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs;
for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) {
auto it = parser.known_attributes_.find(kv->first);
FLATBUFFERS_ASSERT(it != parser.known_attributes_.end());
if (parser.opts.binary_schema_builtins || !it->second) {
auto key = builder->CreateString(kv->first);
auto val = builder->CreateString(kv->second->constant);
attrs.push_back(reflection::CreateKeyValue(*builder, key, val));
}
}
if (attrs.size()) {
return builder->CreateVectorOfSortedTables(&attrs);
} else {
return 0;
}
}
bool Definition::DeserializeAttributes(
Parser &parser, const Vector<Offset<reflection::KeyValue>> *attrs) {
if (attrs == nullptr) return true;
for (uoffset_t i = 0; i < attrs->size(); ++i) {
auto kv = attrs->Get(i);
auto value = new Value();
if (kv->value()) { value->constant = kv->value()->str(); }
if (attributes.Add(kv->key()->str(), value)) {
delete value;
return false;
}
parser.known_attributes_[kv->key()->str()];
}
return true;
}
/************************************************************************/
/* DESERIALIZATION */
/************************************************************************/
bool Parser::Deserialize(const uint8_t *buf, const size_t size) {
flatbuffers::Verifier verifier(reinterpret_cast<const uint8_t *>(buf), size);
bool size_prefixed = false;
if (!reflection::SchemaBufferHasIdentifier(buf)) {
if (!flatbuffers::BufferHasIdentifier(buf, reflection::SchemaIdentifier(),
true))
return false;
else
size_prefixed = true;
}
auto verify_fn = size_prefixed ? &reflection::VerifySizePrefixedSchemaBuffer
: &reflection::VerifySchemaBuffer;
if (!verify_fn(verifier)) { return false; }
auto schema = size_prefixed ? reflection::GetSizePrefixedSchema(buf)
: reflection::GetSchema(buf);
return Deserialize(schema);
}
bool Parser::Deserialize(const reflection::Schema *schema) {
file_identifier_ = schema->file_ident() ? schema->file_ident()->str() : "";
file_extension_ = schema->file_ext() ? schema->file_ext()->str() : "";
std::map<std::string, Namespace *> namespaces_index;
// Create defs without deserializing so references from fields to structs and
// enums can be resolved.
for (auto it = schema->objects()->begin(); it != schema->objects()->end();
++it) {
auto struct_def = new StructDef();
struct_def->bytesize = it->bytesize();
struct_def->fixed = it->is_struct();
struct_def->minalign = it->minalign();
if (structs_.Add(it->name()->str(), struct_def)) {
delete struct_def;
return false;
}
auto type = new Type(BASE_TYPE_STRUCT, struct_def, nullptr);
if (types_.Add(it->name()->str(), type)) {
delete type;
return false;
}
}
for (auto it = schema->enums()->begin(); it != schema->enums()->end(); ++it) {
auto enum_def = new EnumDef();
if (enums_.Add(it->name()->str(), enum_def)) {
delete enum_def;
return false;
}
auto type = new Type(BASE_TYPE_UNION, nullptr, enum_def);
if (types_.Add(it->name()->str(), type)) {
delete type;
return false;
}
}
// Now fields can refer to structs and enums by index.
for (auto it = schema->objects()->begin(); it != schema->objects()->end();
++it) {
std::string qualified_name = it->name()->str();
auto struct_def = structs_.Lookup(qualified_name);
struct_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!struct_def->Deserialize(*this, *it)) { return false; }
if (schema->root_table() == *it) { root_struct_def_ = struct_def; }
}
for (auto it = schema->enums()->begin(); it != schema->enums()->end(); ++it) {
std::string qualified_name = it->name()->str();
auto enum_def = enums_.Lookup(qualified_name);
enum_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!enum_def->Deserialize(*this, *it)) { return false; }
}
if (schema->services()) {
for (auto it = schema->services()->begin(); it != schema->services()->end();
++it) {
std::string qualified_name = it->name()->str();
auto service_def = new ServiceDef();
service_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!service_def->Deserialize(*this, *it) ||
services_.Add(qualified_name, service_def)) {
delete service_def;
return false;
}
}
}
return true;
}
std::string Parser::ConformTo(const Parser &base) {
for (auto sit = structs_.vec.begin(); sit != structs_.vec.end(); ++sit) {
auto &struct_def = **sit;
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(struct_def.name);
auto struct_def_base = base.LookupStruct(qualified_name);
if (!struct_def_base) continue;
for (auto fit = struct_def.fields.vec.begin();
fit != struct_def.fields.vec.end(); ++fit) {
auto &field = **fit;
auto field_base = struct_def_base->fields.Lookup(field.name);
if (field_base) {
if (field.value.offset != field_base->value.offset)
return "offsets differ for field: " + field.name;
if (field.value.constant != field_base->value.constant)
return "defaults differ for field: " + field.name;
if (!EqualByName(field.value.type, field_base->value.type))
return "types differ for field: " + field.name;
} else {
// Doesn't have to exist, deleting fields is fine.
// But we should check if there is a field that has the same offset
// but is incompatible (in the case of field renaming).
for (auto fbit = struct_def_base->fields.vec.begin();
fbit != struct_def_base->fields.vec.end(); ++fbit) {
field_base = *fbit;
if (field.value.offset == field_base->value.offset) {
if (!EqualByName(field.value.type, field_base->value.type))
return "field renamed to different type: " + field.name;
break;
}
}
}
}
}
for (auto eit = enums_.vec.begin(); eit != enums_.vec.end(); ++eit) {
auto &enum_def = **eit;
auto qualified_name =
enum_def.defined_namespace->GetFullyQualifiedName(enum_def.name);
auto enum_def_base = base.enums_.Lookup(qualified_name);
if (!enum_def_base) continue;
for (auto evit = enum_def.Vals().begin(); evit != enum_def.Vals().end();
++evit) {
auto &enum_val = **evit;
auto enum_val_base = enum_def_base->Lookup(enum_val.name);
if (enum_val_base) {
if (enum_val != *enum_val_base)
return "values differ for enum: " + enum_val.name;
}
}
}
return "";
}
} // namespace flatbuffers
| 1 | 20,503 | This will cause someone writing a schema field like `inf:string` to get a pretty confusing error? If they intended to use `inf` as short for `information` or whatever :) Might it be better to keep it as `Identifier` and explicitly recognize the few identifiers we care about only when parsing values (not while parsing field names)? | google-flatbuffers | java |
@@ -46,7 +46,7 @@ export function generateRandomHexString(length: number = 8) {
export function signPayload(payload: JWTPayload, secret: string, options: JWTSignOptions) {
return jwt.sign(payload, secret, {
- notBefore: '1000', // Make sure the time will not rollback :)
+ notBefore: '1', // Make sure the time will not rollback :)
...options,
});
} | 1 | // @flow
import {createDecipher, createCipher, createHash, pseudoRandomBytes} from 'crypto';
import jwt from 'jsonwebtoken';
import type {JWTPayload, JWTSignOptions} from '../../types';
export const defaultAlgorithm = 'aes192';
export function aesEncrypt(buf: Buffer, secret: string): Buffer {
const c = createCipher(defaultAlgorithm, secret);
const b1 = c.update(buf);
const b2 = c.final();
return Buffer.concat([b1, b2]);
}
export function aesDecrypt(buf: Buffer, secret: string) {
try {
const c = createDecipher(defaultAlgorithm, secret);
const b1 = c.update(buf);
const b2 = c.final();
return Buffer.concat([b1, b2]);
} catch (_) {
return new Buffer(0);
}
}
export function createTarballHash() {
return createHash('sha1');
}
/**
* Express doesn't do etags with requests <= 1024b
* we use md5 here, it works well on 1k+ bytes, but sucks with fewer data
* could improve performance using crc32 after benchmarks.
* @param {Object} data
* @return {String}
*/
export function stringToMD5(data: Buffer | string) {
return createHash('md5').update(data).digest('hex');
}
export function generateRandomHexString(length: number = 8) {
return pseudoRandomBytes(length).toString('hex');
}
export function signPayload(payload: JWTPayload, secret: string, options: JWTSignOptions) {
return jwt.sign(payload, secret, {
notBefore: '1000', // Make sure the time will not rollback :)
...options,
});
}
export function verifyPayload(token: string, secret: string) {
return jwt.verify(token, secret);
}
| 1 | 18,855 | Mmm .... this affect #168 I'll need to research the collateral damage | verdaccio-verdaccio | js |
@@ -268,7 +268,7 @@ func checkSrvRecord(dnsBootstrap string) {
func doDeleteDNS(network string, noPrompt bool, excludePattern string) bool {
- if network == "" || network == "testnet" || network == "devnet" || network == "mainnet" {
+ if network == "" || network == "testnet" || network == "devnet" || network == "mainnet" || network == "beta" {
fmt.Fprintf(os.Stderr, "Deletion of network '%s' using this tool is not allowed\n", network)
return false
} | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"regexp"
"sort"
"strings"
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/tools/network/cloudflare"
)
var (
addFromName string
addToAddress string
deleteNetwork string
listNetwork string
recordType string
noPrompt bool
excludePattern string
exportNetwork string
outputFilename string
)
func init() {
dnsCmd.AddCommand(checkCmd)
dnsCmd.AddCommand(addCmd)
dnsCmd.AddCommand(deleteCmd)
dnsCmd.AddCommand(listCmd)
dnsCmd.AddCommand(exportCmd)
listCmd.AddCommand(listRecordsCmd)
listCmd.AddCommand(listZonesCmd)
addCmd.Flags().StringVarP(&addFromName, "from", "f", "", "From name to add new DNS entry")
addCmd.MarkFlagRequired("from")
addCmd.Flags().StringVarP(&addToAddress, "to", "t", "", "To address to map new DNS entry to")
addCmd.MarkFlagRequired("to")
deleteCmd.Flags().StringVarP(&deleteNetwork, "network", "n", "", "Network name for records to delete")
deleteCmd.MarkFlagRequired("network")
deleteCmd.Flags().BoolVarP(&noPrompt, "no-prompt", "y", false, "No prompting for records deletion")
deleteCmd.Flags().StringVarP(&excludePattern, "exclude", "e", "", "name records exclude pattern")
listRecordsCmd.Flags().StringVarP(&listNetwork, "network", "n", "", "Domain name for records to list")
listRecordsCmd.Flags().StringVarP(&recordType, "recordType", "t", "", "DNS record type to list (A, CNAME, SRV)")
listRecordsCmd.MarkFlagRequired("network")
exportCmd.Flags().StringVarP(&exportNetwork, "network", "n", "", "Domain name to export")
exportCmd.MarkFlagRequired("network")
exportCmd.Flags().StringVarP(&outputFilename, "zonefile", "z", "", "Output file for backup ( intead of outputing it to stdout ) ")
}
type byIP []net.IP
func (a byIP) Len() int { return len(a) }
func (a byIP) Less(i, j int) bool { return a[i].String() < a[j].String() }
func (a byIP) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
var dnsCmd = &cobra.Command{
Use: "dns",
Short: "Check status of required DNS entries",
Long: "Check status of required DNS entries",
Run: func(cmd *cobra.Command, args []string) {
// Fall back
cmd.HelpFunc()(cmd, args)
},
}
var listCmd = &cobra.Command{
Use: "list",
Short: "List the A/SRV/Zones entries of the given network",
Long: "List the A/SRV/Zones entries of the given network",
Run: func(cmd *cobra.Command, args []string) {
cmd.HelpFunc()(cmd, args)
},
}
var listRecordsCmd = &cobra.Command{
Use: "records",
Short: "List the A/SRV entries of the given network",
Long: "List the A/SRV entries of the given network",
Run: func(cmd *cobra.Command, args []string) {
recordType = strings.ToUpper(recordType)
if recordType == "" || recordType == "A" || recordType == "CNAME" || recordType == "SRV" {
listEntries(listNetwork, recordType)
} else {
fmt.Fprintf(os.Stderr, "Invalid recordType specified.\n")
os.Exit(1)
}
},
}
var listZonesCmd = &cobra.Command{
Use: "zones",
Short: "List the zones",
Long: "List the zones",
Run: func(cmd *cobra.Command, args []string) {
if !doListZones() {
os.Exit(1)
}
},
}
var checkCmd = &cobra.Command{
Use: "check",
Short: "Check the status",
Long: "Check the status",
Run: func(cmd *cobra.Command, args []string) {
checkDNSRecord("relay-us-ea-1.algorand.network")
checkDNSRecord("relay-us-ea-2.algorand.network")
checkDNSRecord("relay-us-ea-3.algorand.network")
checkDNSRecord("relay-us-ea-4.algorand.network")
checkDNSRecord("relay-us-ea-99876.algorand.network")
fmt.Printf("------------------------\n")
checkSrvRecord("devnet.algorand.network")
checkSrvRecord("testnet.algorand.network")
checkSrvRecord("bogus.algorand.network")
fmt.Printf("------------------------\n")
},
}
var addCmd = &cobra.Command{
Use: "add",
Short: "Add a DNS record",
Long: "Adds a DNS record to map --from to --to, using A if to == IP or CNAME otherwise\n",
Example: "algons dns add -f a.test.algodev.network -t r1.algodev.network\n" +
"algons dns add -f a.test.algodev.network -t 192.168.100.10",
Run: func(cmd *cobra.Command, args []string) {
err := doAddDNS(addFromName, addToAddress)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding DNS entry: %v\n", err)
os.Exit(1)
} else {
fmt.Printf("DNS Entry Added\n")
}
},
}
var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete DNS and SRV records for a specified network",
Run: func(cmd *cobra.Command, args []string) {
if !doDeleteDNS(deleteNetwork, noPrompt, excludePattern) {
os.Exit(1)
}
},
}
var exportCmd = &cobra.Command{
Use: "export",
Short: "Export DNS record entries for a specified network",
Run: func(cmd *cobra.Command, args []string) {
if !doExportZone(exportNetwork, outputFilename) {
os.Exit(1)
}
},
}
func doAddDNS(from string, to string) (err error) {
cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
if err != nil {
return fmt.Errorf("error getting DNS credentials: %v", err)
}
cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
const priority = 1
const proxied = false
// If we need to register anything, first register a DNS entry
// to map our network DNS name to our public name (or IP) provided to nodecfg
// Network HostName = eg r1.testnet.algorand.network
isIP := net.ParseIP(to) != nil
var recordType string
if isIP {
recordType = "A"
} else {
recordType = "CNAME"
}
cloudflareDNS.SetDNSRecord(context.Background(), recordType, from, to, cloudflare.AutomaticTTL, priority, proxied)
return
}
func getClouldflareAuthCredentials() (email string, authKey string, err error) {
email = os.Getenv("CLOUDFLARE_EMAIL")
authKey = os.Getenv("CLOUDFLARE_AUTH_KEY")
if email == "" || authKey == "" {
err = fmt.Errorf("one or more credentials missing from ENV")
}
return
}
func getClouldflareCredentials() (zoneID string, email string, authKey string, err error) {
email, authKey, err = getClouldflareAuthCredentials()
if err != nil {
return
}
zoneID = os.Getenv("CLOUDFLARE_ZONE_ID")
if zoneID == "" {
err = fmt.Errorf("one or more credentials missing from ENV")
}
return
}
func checkDNSRecord(dnsName string) {
fmt.Printf("------------------------\nDNS Lookup: %s\n", dnsName)
ips, err := net.LookupIP(dnsName)
if err != nil {
fmt.Printf("Cannot resolve %s: %v\n", dnsName, err)
} else {
sort.Sort(byIP(ips))
for _, ip := range ips {
fmt.Printf("-> %s\n", ip.String())
}
}
}
func checkSrvRecord(dnsBootstrap string) {
fmt.Printf("------------------------\nSRV Lookup: %s\n", dnsBootstrap)
_, addrs, err := net.LookupSRV("algobootstrap", "tcp", dnsBootstrap)
if err != nil {
if !strings.HasSuffix(err.Error(), "cannot unmarshal DNS message") {
// we weren't able to get the SRV records.
fmt.Printf("Cannot lookup SRV record for %s: %v\n", dnsBootstrap, err)
return
}
var resolver network.Resolver
_, addrs, err = resolver.LookupSRV(context.Background(), "algobootstrap", "tcp", dnsBootstrap)
if err != nil {
fmt.Printf("Cannot lookup SRV record for %s via neither default resolver nor via %s: %v\n", dnsBootstrap, resolver.EffectiveResolverDNS(), err)
return
}
}
for _, srv := range addrs {
fmt.Printf("%s:%d\n", srv.Target, srv.Port)
}
}
func doDeleteDNS(network string, noPrompt bool, excludePattern string) bool {
if network == "" || network == "testnet" || network == "devnet" || network == "mainnet" {
fmt.Fprintf(os.Stderr, "Deletion of network '%s' using this tool is not allowed\n", network)
return false
}
var excludeRegex *regexp.Regexp
if excludePattern != "" {
var err error
excludeRegex, err = regexp.Compile(excludePattern)
if err != nil {
fmt.Fprintf(os.Stderr, "specified regular expression exclude pattern ('%s') is not a valid regular expression : %v", excludePattern, err)
return false
}
}
cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
idsToDelete := make(map[string]string) // Maps record ID to Name
for _, service := range []string{"_algobootstrap", "_metrics"} {
records, err := cloudflareDNS.ListDNSRecord(context.Background(), "SRV", service+"._tcp."+network+".algodev.network", "", "", "", "")
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing SRV '%s' entries: %v\n", service, err)
os.Exit(1)
}
for _, r := range records {
if excludeRegex != nil {
if excludeRegex.MatchString(r.Name) {
fmt.Printf("Excluding SRV '%s' record: %s\n", service, r.Name)
continue
}
}
fmt.Printf("Found SRV '%s' record: %s\n", service, r.Name)
idsToDelete[r.ID] = r.Name
}
}
networkSuffix := "." + network + ".algodev.network"
for _, recordType := range []string{"A", "CNAME"} {
records, err := cloudflareDNS.ListDNSRecord(context.Background(), recordType, "", "", "", "", "")
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing DNS '%s' entries: %v\n", recordType, err)
os.Exit(1)
}
for _, r := range records {
if strings.Index(r.Name, networkSuffix) > 0 {
if excludeRegex != nil {
if excludeRegex.MatchString(r.Name) {
fmt.Printf("Excluding DNS '%s' record: %s\n", recordType, r.Name)
continue
}
}
fmt.Printf("Found DNS '%s' record: %s\n", recordType, r.Name)
idsToDelete[r.ID] = r.Name
}
}
}
if len(idsToDelete) == 0 {
fmt.Printf("No DNS/SRV records found\n")
return true
}
var text string
if !noPrompt {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("Delete these %d entries (type 'yes' to delete)? ", len(idsToDelete))
text, _ = reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
} else {
text = "yes"
}
if text == "yes" {
for id, name := range idsToDelete {
fmt.Fprintf(os.Stdout, "Deleting %s\n", name)
err = cloudflareDNS.DeleteDNSRecord(context.Background(), id)
if err != nil {
fmt.Fprintf(os.Stderr, " !! error deleting %s: %v\n", name, err)
}
}
}
return true
}
func listEntries(listNetwork string, recordType string) {
cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return
}
cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
recordTypes := []string{"A", "CNAME", "SRV"}
if recordType != "" {
recordTypes = []string{recordType}
}
for _, recType := range recordTypes {
records, err := cloudflareDNS.ListDNSRecord(context.Background(), recType, "", "", "", "", "")
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing DNS entries: %v\n", err)
os.Exit(1)
}
for _, record := range records {
if strings.HasSuffix(record.Name, listNetwork) {
fmt.Printf("%v\n", record.Name)
}
}
}
}
func doExportZone(network string, outputFilename string) bool {
cfEmail, cfKey, err := getClouldflareAuthCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
cloudflareCred := cloudflare.NewCred(cfEmail, cfKey)
zones, err := cloudflareCred.GetZones(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Error retrieving zones entries: %v\n", err)
return false
}
zoneID := ""
// find a zone that matches the requested network name.
for _, z := range zones {
if z.DomainName == network {
zoneID = z.ZoneID
break
}
fmt.Printf("%s : %s\n", z.DomainName, z.ZoneID)
}
if zoneID == "" {
fmt.Fprintf(os.Stderr, "No matching zoneID was found for %s\n", network)
return false
}
cloudflareDNS := cloudflare.NewDNS(zoneID, cfEmail, cfKey)
exportedZone, err := cloudflareDNS.ExportZone(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to export zone : %v\n", err)
return false
}
if outputFilename != "" {
err = ioutil.WriteFile(outputFilename, exportedZone, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to write exported zone file : %v\n", err)
return false
}
} else {
fmt.Fprint(os.Stdout, string(exportedZone))
}
return true
}
func doListZones() bool {
cfEmail, cfKey, err := getClouldflareAuthCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
cloudflareCred := cloudflare.NewCred(cfEmail, cfKey)
zones, err := cloudflareCred.GetZones(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing zones entries: %v\n", err)
return false
}
for _, z := range zones {
fmt.Printf("%s : %s\n", z.DomainName, z.ZoneID)
}
return true
}
| 1 | 35,964 | nit: refactor into a const map lookup. | algorand-go-algorand | go |
@@ -26,7 +26,7 @@ namespace MvvmCross.Forms.Droid.Views
{
BindingContext = new MvxAndroidBindingContext(this, this);
this.AddEventListeners();
- _resourceAssembly = Assembly.GetCallingAssembly();
+ _resourceAssembly = this.GetType().Assembly;
}
public object DataContext | 1 | using System.Reflection;
using Android.Content;
using Android.OS;
using Android.Util;
using Android.Views;
using MvvmCross.Binding.BindingContext;
using MvvmCross.Binding.Droid.BindingContext;
using MvvmCross.Binding.Droid.Views;
using MvvmCross.Core.ViewModels;
using MvvmCross.Droid.Platform;
using MvvmCross.Droid.Support.V7.AppCompat;
using MvvmCross.Droid.Views;
using MvvmCross.Forms.Droid.Views.EventSource;
using MvvmCross.Forms.Platform;
using MvvmCross.Forms.Views;
using MvvmCross.Platform;
namespace MvvmCross.Forms.Droid.Views
{
public class MvxFormsAppCompatActivity : MvxEventSourceFormsAppCompatActivity, IMvxAndroidView
{
private View _view;
private readonly Assembly _resourceAssembly;
protected MvxFormsAppCompatActivity()
{
BindingContext = new MvxAndroidBindingContext(this, this);
this.AddEventListeners();
_resourceAssembly = Assembly.GetCallingAssembly();
}
public object DataContext
{
get { return BindingContext.DataContext; }
set { BindingContext.DataContext = value; }
}
public IMvxViewModel ViewModel
{
get
{
return DataContext as IMvxViewModel;
}
set
{
DataContext = value;
OnViewModelSet();
}
}
private MvxFormsApplication _formsApplication;
protected MvxFormsApplication FormsApplication
{
get
{
if (_formsApplication == null)
{
var formsPresenter = Mvx.Resolve<IMvxFormsViewPresenter>();
_formsApplication = formsPresenter.FormsApplication;
}
return _formsApplication;
}
}
public void MvxInternalStartActivityForResult(Intent intent, int requestCode)
{
StartActivityForResult(intent, requestCode);
}
protected virtual void OnViewModelSet()
{
}
public IMvxBindingContext BindingContext { get; set; }
public override void SetContentView(int layoutResId)
{
_view = this.BindingInflate(layoutResId, null);
SetContentView(_view);
}
protected override void AttachBaseContext(Context @base)
{
if (this is IMvxAndroidSplashScreenActivity)
{
// Do not attach our inflater to splash screens.
base.AttachBaseContext(@base);
return;
}
base.AttachBaseContext(MvxContextWrapper.Wrap(@base, this));
}
protected override void OnCreate(Bundle bundle)
{
// Required for proper Push notifications handling
var setupSingleton = MvxAndroidSetupSingleton.EnsureSingletonAvailable(ApplicationContext);
setupSingleton.EnsureInitialized();
base.OnCreate(bundle);
ViewModel?.ViewCreated();
InitializeForms(bundle);
}
public virtual void InitializeForms(Bundle bundle)
{
if (FormsApplication.MainPage != null)
{
global::Xamarin.Forms.Forms.Init(this, bundle, GetResourceAssembly());
LoadApplication(FormsApplication);
}
}
protected virtual Assembly GetResourceAssembly()
{
return _resourceAssembly;
}
protected override void OnDestroy()
{
base.OnDestroy();
ViewModel?.ViewDestroy(IsFinishing);
}
protected override void OnStart()
{
base.OnStart();
ViewModel?.ViewAppearing();
}
protected override void OnResume()
{
base.OnResume();
ViewModel?.ViewAppeared();
}
protected override void OnPause()
{
base.OnPause();
ViewModel?.ViewDisappearing();
}
protected override void OnStop()
{
base.OnStop();
ViewModel?.ViewDisappeared();
}
public override View OnCreateView(View parent, string name, Context context, IAttributeSet attrs)
{
var view = MvxAppCompatActivityHelper.OnCreateView(parent, name, context, attrs);
return view ?? base.OnCreateView(parent, name, context, attrs);
}
}
public class MvxFormsAppCompatActivity<TViewModel>
: MvxFormsAppCompatActivity
, IMvxAndroidView<TViewModel> where TViewModel : class, IMvxViewModel
{
public new TViewModel ViewModel
{
get { return (TViewModel)base.ViewModel; }
set { base.ViewModel = value; }
}
}
}
| 1 | 13,532 | I don't think this is going to work. We need to get the actual assembly of the app project. @johnnywebb thoughts? | MvvmCross-MvvmCross | .cs |
@@ -306,7 +306,7 @@ def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
searchvalue, searchtype)
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
- use_memoise=False):
+ use_memoize=False):
"""
Return a tuple of values from key-value mapping kb.
| 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Provide API-callable functions for knowledge base management (using kb's).
"""
import os
import re
import json
from . import dblayer as bibknowledge_dblayer
from invenio.base.globals import cfg
processor_type = 0
try:
from lxml import etree
processor_type = 1
except ImportError:
try:
import libxml2
import libxslt
processor_type = 2
except ImportError:
pass
def get_kb_mappings(kb_name="", key="", value="", match_type="s"):
"""Get leftside/rightside mappings from kb kb_name.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a list of mappings
"""
return bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default=""):
"""Get one unique mapping. If not found, return default
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a mapping
"""
mappings = bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
if len(mappings) == 0:
return default
else:
return mappings[0]
def add_kb_mapping(kb_name, key, value=""):
"""
Adds a new mapping to given kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
@param value: the value of the mapping
"""
bibknowledge_dblayer.add_kb_mapping(kb_name, key, value)
def remove_kb_mapping(kb_name, key):
"""
Delete an existing kb mapping in kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
"""
bibknowledge_dblayer.remove_kb_mapping(kb_name, key)
def update_kb_mapping(kb_name, old_key, key, value):
"""
Update an existing kb mapping with key old_key with a new key and value
@param kb_name: the name of the kb where to insert the new value
@param old_key: the key of the mapping in the kb
@param key: the new key of the mapping
@param value: the new value of the mapping
"""
#check if this is a KEY change or a VALUE change.
if (old_key == key):
#value change, ok to change
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
else:
#you can change a key unless there is already a key like that
if kb_mapping_exists(kb_name, key):
pass #no, don't change
else:
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
def get_kb_mappings_json(kb_name="", key="", value="", match_type="s", limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@param limit: maximum number of results to return (are ALL if set to None)
@return a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': value})
return json.dumps(ret)
def get_kb_mappings_embedded_json(kb_name="", key="", value="", match_type="s", limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
The rightside is actually considered as a json string and hence embedded
within the final result.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@param limit: maximum number of results to return (are ALL if set to None)
@return a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': json.loads(value)})
return json.dumps(ret)
def kb_exists(kb_name):
"""Returns True if a kb with the given name exists
@param kb_name: the name of the knowledge base
"""
return bibknowledge_dblayer.kb_exists(kb_name)
def get_kb_name(kb_id):
"""
Returns the name of the kb given by id
@param kb_id: the id of the knowledge base
"""
return bibknowledge_dblayer.get_kb_name(kb_id)
def update_kb_attributes(kb_name, new_name, new_description=''):
"""Update kb kb_name with a new name and (optionally) description
@param kb_name: the name of the kb to update
@param new_name: the new name for the kb
@param new_description: the new description for the kb
"""
bibknowledge_dblayer.update_kb(kb_name, new_name, new_description)
def add_kb(kb_name="Untitled", kb_type=None):
"""
Adds a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
@param kb_name: the name of the kb
@param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
@return the id of the newly created kb
"""
name = kb_name
i = 1
while bibknowledge_dblayer.kb_exists(name):
name = kb_name + " " + str(i)
i += 1
kb_id = bibknowledge_dblayer.add_kb(name, "", kb_type)
return kb_id
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
"""A convenience method"""
kb_id = add_kb(kb_name=kbname, kb_type='dynamic')
bibknowledge_dblayer.save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
def kb_mapping_exists(kb_name, key):
"""
Returns the information if a mapping exists.
@param kb_name: knowledge base name
@param key: left side (mapFrom)
"""
return bibknowledge_dblayer.kb_mapping_exists(kb_name, key)
def delete_kb(kb_name):
"""
Deletes given kb from database
@param kb_name: knowledge base name
"""
bibknowledge_dblayer.delete_kb(kb_name)
def get_kb_id(kb_name):
"""
Gets the id by name
@param kb_name knowledge base name
"""
return bibknowledge_dblayer.get_kb_id(kb_name)
# Knowledge Bases Dependencies
##
def get_elements_that_use_kb(name):
"""
This routine is obsolete.
Returns a list of elements that call given kb
[ {'filename':"filename_1.py"
'name': "a name"
},
...
]
Returns elements sorted by name
"""
format_elements = {}
#Retrieve all elements in files
from invenio.modules.formatter.engine import TEMPLATE_CONTEXT_FUNCTIONS_CACHE
for element in TEMPLATE_CONTEXT_FUNCTIONS_CACHE.bibformat_elements().values():
path = element.__file__
filename = os.path.basename(element.__file__)
if filename.endswith(".py"):
formatf = open(path, 'r')
code = formatf.read()
formatf.close()
# Search for use of kb inside code
kb_pattern = re.compile('''
(bfo.kb)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<kb>%s) #kb
[\'"]+\s* #Single or double quote
, #comma
''' % name, re.VERBOSE | re.MULTILINE | re.IGNORECASE)
result = kb_pattern.search(code)
if result is not None:
name = ("".join(filename.split(".")[:-1])).lower()
if name.startswith("bfe_"):
name = name[4:]
format_elements[name] = {'filename':filename, 'name': name}
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
###kb functions for export
def get_kbs_info(kbtype="", searchkbname=""):
"""A convenience method that calls dblayer
@param kbtype: type of kb -- get only kb's of this type
@param searchkbname: get only kb's where this sting appears in the name
"""
return bibknowledge_dblayer.get_kbs_info(kbtype, searchkbname)
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""
Returns an array of values "authority file" type = just values.
@param kb_name: name of kb
@param searchname: get these values, according to searchtype
@param searchtype: s=substring, e=exact
"""
return bibknowledge_dblayer.get_kba_values(kb_name, searchname, searchtype)
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns an array of keys.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
"""
return bibknowledge_dblayer.get_kbr_keys(kb_name, searchkey,
searchvalue, searchtype)
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
use_memoise=False):
"""
Return a tuple of values from key-value mapping kb.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s=substring; e=exact
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
"""
if use_memoise:
return bibknowledge_dblayer.get_kbr_values_memoised(kb_name, searchkey,
searchvalue, searchtype,
use_memoise)
else:
return bibknowledge_dblayer.get_kbr_values(kb_name, searchkey,
searchvalue, searchtype,
use_memoise)
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns a list of dictionaries that match the search.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
@return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
return bibknowledge_dblayer.get_kbr_items(kb_name, searchkey,
searchvalue, searchtype)
def get_kbd_values(kbname, searchwith=""):
"""Return a list of values by searching a dynamic kb.
@param kbname: name of the knowledge base
@param searchwith: a term to search with
"""
from invenio.legacy import search_engine
#first check that the kb in question is dynamic
kbid = bibknowledge_dblayer.get_kb_id(kbname)
if not kbid:
return []
kbtype = bibknowledge_dblayer.get_kb_type(kbid)
if not kbtype:
return []
if kbtype != 'd':
return []
#get the configuration so that we see what the field is
confdict = bibknowledge_dblayer.get_kb_dyn_config(kbid)
if not confdict:
return []
if 'field' not in confdict:
return []
field = confdict['field']
expression = confdict['expression']
collection = ""
if 'collection' in confdict:
collection = confdict['collection']
reclist = [] # return this
if searchwith and expression:
if (expression.count('%') > 0):
expression = expression.replace("%", searchwith)
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
#no %.. just make a combination
expression = expression + " and " + searchwith
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else: # either no expr or no searchwith.. but never mind about searchwith
if expression: # in this case: only expression
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
#make a fake expression so that only records that have this field
#will be returned
fake_exp = "/.*/"
if searchwith:
fake_exp = searchwith
reclist = search_engine.perform_request_search(f=field, p=fake_exp,
cc=collection)
if reclist:
return [val for (val, dummy) in \
search_engine.get_most_popular_field_values(reclist, field)]
return [] # in case nothing worked
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
@param kbname: name of the knowledge base
@param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
def get_kbd_values_for_bibedit(tag, collection="", searchwith="", expression=""):
"""
Dynamically create a dynamic KB for a specific search; search; then destroy it.
This probably isn't the method you want.
Example1: tag=100__a : return values of 100__a
Example2: tag=100__a, searchwith=Jill: return values of 100__a that match with Jill
Example3: tag=100__a, searchwith=Ellis, expression="700__a:*%*: return values of
100__a for which Ellis matches some 700__a
Note: the performace of this function is ok compared to a plain
perform_request_search / get most popular fields -pair. The overhead
is about 5% with large record sets; the lookups are the xpensive part.
@param tag: the tag like 100__a
@param collection: collection id
@param searchwith: the string to search. If empty, match all.
@param expression: the search expression for perform_request_search; if
present, '%' is substituted with /searcwith/. If absent,
/searchwith/ is searched for in /tag/.
"""
dkbname = "tmp_dynamic_"+tag+'_'+expression
kb_id = add_kb(kb_name=dkbname, kb_type='dynamic')
#get the kb name since it may be catenated by a number
#in case there are concurrent calls.
kb_name = get_kb_name(kb_id)
bibknowledge_dblayer.save_kb_dyn_config(kb_id, tag, expression, collection)
#now, get stuff
myvalues = get_kbd_values(kb_name, searchwith)
#the tmp dyn kb is now useless, delete it
delete_kb(kb_name)
return myvalues
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile. If searchwith is defined,
return only items that match with it.
@param taxonomyfilename: full path+name of the RDF file
@param templatefile: full path+name of the XSLT file
@param searchwith: a term to search with
"""
if processor_type == 1:
# lxml
doc = etree.XML(taxonomyfilename)
styledoc = etree.XML(templatefilename)
style = etree.XSLT(styledoc)
result = style(doc)
strres = str(result)
del result
del style
del styledoc
del doc
elif processor_type == 2:
# libxml2 & libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
def get_kbt_items_for_bibedit(kbtname, tag="", searchwith=""):
"""
A simplifield, customized version of the function get_kbt_items.
Traverses an RDF document. By default returns all leaves. If
tag defined returns the content of that tag.
If searchwith defined, returns leaves that match it.
Warning! In order to make this faster, the matching field values
cannot be multi-line!
@param kbtname: name of the taxonony kb
@param tag: name of tag whose content
@param searchwith: a term to search with
"""
from lxml import etree
#get the actual file based on the kbt name
kb_id = get_kb_id(kbtname)
if not kb_id:
return []
#get the rdf file..
rdfname = cfg['CFG_WEBDIR'] + "/kbfiles/" + str(kb_id) + ".rdf"
if not os.path.exists(rdfname):
return []
#parse the doc with static xslt
styledoc = etree.XML("""
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<xsl:output method="xml" standalone="yes" omit-xml-declaration="yes" indent="no"/>
<xsl:template match="rdf:RDF">
<foo><!--just having some tag here speeds up output by 10x-->
<xsl:apply-templates />
</foo>
</xsl:template>
<xsl:template match="*">
<!--hi><xsl:value-of select="local-name()"/></hi-->
<xsl:if test="local-name()='"""+tag+"""'">
<myout><xsl:value-of select="normalize-space(.)"/></myout>
</xsl:if>
<!--traverse down in tree!-->
<xsl:text>
</xsl:text>
<xsl:apply-templates />
</xsl:template>
</xsl:stylesheet>
""")
style = etree.XSLT(styledoc)
doc = etree.parse(open(rdfname, 'r'))
strres = str(style(doc))
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
#take only those with myout..
if line.count("<myout>") > 0:
#remove the myout tag..
line = line[9:]
line = line[:-8]
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
ritems.append(line)
return ritems
if __name__ == "__main__":
pass
| 1 | 11,601 | @MSusik be careful about changing existing API. I think in this case you shouldn't rename the argument. | inveniosoftware-invenio | py |
@@ -176,8 +176,8 @@ int produce_message(struct flb_time *tm, msgpack_object *map,
/* Add extracted topic on the fly to topiclist */
if (ctx->dynamic_topic) {
/* Only if default topic is set and this topicname is not set for this message */
- if (strncmp(topic->name, flb_kafka_topic_default(ctx)->name, val.via.str.size) == 0 &&
- (strncmp(topic->name, val.via.str.ptr, val.via.str.size) != 0) ) {
+ if (strcmp(topic->name, flb_kafka_topic_default(ctx)->name) == 0 &&
+ (strcmp(topic->name, val.via.str.ptr) != 0) ) {
if (strstr(val.via.str.ptr, ",")) {
/* Don't allow commas in kafkatopic name */
flb_warn("',' not allowed in dynamic_kafka topic names"); | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2020 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_output_plugin.h>
#include <fluent-bit/flb_time.h>
#include <fluent-bit/flb_pack.h>
#include <fluent-bit/flb_utils.h>
#include "kafka_config.h"
#include "kafka_topic.h"
void cb_kafka_msg(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
void *opaque)
{
struct flb_kafka *ctx = (struct flb_kafka *) opaque;
if (rkmessage->err) {
flb_plg_warn(ctx->ins, "message delivery failed: %s",
rd_kafka_err2str(rkmessage->err));
}
else {
flb_plg_debug(ctx->ins, "message delivered (%zd bytes, "
"partition %"PRId32")",
rkmessage->len, rkmessage->partition);
}
}
void cb_kafka_logger(const rd_kafka_t *rk, int level,
const char *fac, const char *buf)
{
struct flb_kafka *ctx;
ctx = (struct flb_kafka *) rd_kafka_opaque(rk);
if (level <= FLB_KAFKA_LOG_ERR) {
flb_plg_error(ctx->ins, "%s: %s",
rk ? rd_kafka_name(rk) : NULL, buf);
}
else if (level == FLB_KAFKA_LOG_WARNING) {
flb_plg_warn(ctx->ins, "%s: %s",
rk ? rd_kafka_name(rk) : NULL, buf);
}
else if (level == FLB_KAFKA_LOG_NOTICE || level == FLB_KAFKA_LOG_INFO) {
flb_plg_info(ctx->ins, "%s: %s",
rk ? rd_kafka_name(rk) : NULL, buf);
}
else if (level == FLB_KAFKA_LOG_DEBUG) {
flb_plg_debug(ctx->ins, "%s: %s",
rk ? rd_kafka_name(rk) : NULL, buf);
}
}
static int cb_kafka_init(struct flb_output_instance *ins,
struct flb_config *config,
void *data)
{
struct flb_kafka *ctx;
/* Configuration */
ctx = flb_kafka_conf_create(ins, config);
if (!ctx) {
flb_plg_error(ins, "failed to initialize");
return -1;
}
/* Set global context */
flb_output_set_context(ins, ctx);
return 0;
}
int produce_message(struct flb_time *tm, msgpack_object *map,
struct flb_kafka *ctx, struct flb_config *config)
{
int i;
int ret;
int size;
int queue_full_retries = 0;
char *out_buf;
size_t out_size;
struct mk_list *head;
struct mk_list *topics;
struct flb_split_entry *entry;
char *dynamic_topic;
char *message_key = NULL;
size_t message_key_len = 0;
struct flb_kafka_topic *topic = NULL;
msgpack_sbuffer mp_sbuf;
msgpack_packer mp_pck;
msgpack_object key;
msgpack_object val;
flb_sds_t s;
/* Init temporal buffers */
msgpack_sbuffer_init(&mp_sbuf);
msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
if (ctx->format == FLB_KAFKA_FMT_JSON || ctx->format == FLB_KAFKA_FMT_MSGP) {
/* Make room for the timestamp */
size = map->via.map.size + 1;
msgpack_pack_map(&mp_pck, size);
/* Pack timestamp */
msgpack_pack_str(&mp_pck, ctx->timestamp_key_len);
msgpack_pack_str_body(&mp_pck,
ctx->timestamp_key, ctx->timestamp_key_len);
switch (ctx->timestamp_format) {
case FLB_JSON_DATE_DOUBLE:
msgpack_pack_double(&mp_pck, flb_time_to_double(tm));
break;
case FLB_JSON_DATE_ISO8601:
{
size_t date_len;
int len;
struct tm _tm;
char time_formatted[32];
/* Format the time; use microsecond precision (not nanoseconds). */
gmtime_r(&tm->tm.tv_sec, &_tm);
date_len = strftime(time_formatted, sizeof(time_formatted) - 1,
FLB_JSON_DATE_ISO8601_FMT, &_tm);
len = snprintf(time_formatted + date_len, sizeof(time_formatted) - 1 - date_len,
".%06" PRIu64 "Z", (uint64_t) tm->tm.tv_nsec / 1000);
date_len += len;
msgpack_pack_str(&mp_pck, date_len);
msgpack_pack_str_body(&mp_pck, time_formatted, date_len);
}
break;
}
}
else {
size = map->via.map.size;
msgpack_pack_map(&mp_pck, size);
}
for (i = 0; i < map->via.map.size; i++) {
key = map->via.map.ptr[i].key;
val = map->via.map.ptr[i].val;
msgpack_pack_object(&mp_pck, key);
msgpack_pack_object(&mp_pck, val);
/* Lookup message key */
if (ctx->message_key_field && !message_key && val.type == MSGPACK_OBJECT_STR) {
if (key.via.str.size == ctx->message_key_field_len &&
strncmp(key.via.str.ptr, ctx->message_key_field, ctx->message_key_field_len) == 0) {
message_key = (char *) val.via.str.ptr;
message_key_len = val.via.str.size;
}
}
/* Lookup key/topic */
if (ctx->topic_key && !topic && val.type == MSGPACK_OBJECT_STR) {
if (key.via.str.size == ctx->topic_key_len &&
strncmp(key.via.str.ptr, ctx->topic_key, ctx->topic_key_len) == 0) {
topic = flb_kafka_topic_lookup((char *) val.via.str.ptr,
val.via.str.size, ctx);
/* Add extracted topic on the fly to topiclist */
if (ctx->dynamic_topic) {
/* Only if default topic is set and this topicname is not set for this message */
if (strncmp(topic->name, flb_kafka_topic_default(ctx)->name, val.via.str.size) == 0 &&
(strncmp(topic->name, val.via.str.ptr, val.via.str.size) != 0) ) {
if (strstr(val.via.str.ptr, ",")) {
/* Don't allow commas in kafkatopic name */
flb_warn("',' not allowed in dynamic_kafka topic names");
continue;
}
if (val.via.str.size > 64) {
/* Don't allow length of dynamic kafka topics > 64 */
flb_warn(" dynamic kafka topic length > 64 not allowed");
continue;
}
dynamic_topic = flb_malloc(val.via.str.size + 1);
if (!dynamic_topic) {
/* Use default topic */
flb_errno();
continue;
}
strncpy(dynamic_topic, val.via.str.ptr, val.via.str.size);
dynamic_topic[val.via.str.size] = '\0';
topics = flb_utils_split(dynamic_topic, ',', 0);
if (!topics) {
/* Use the default topic */
flb_errno();
flb_free(dynamic_topic);
continue;
}
mk_list_foreach(head, topics) {
/* Add the (one) found topicname to the topic configuration */
entry = mk_list_entry(head, struct flb_split_entry, _head);
topic = flb_kafka_topic_create(entry->value, ctx);
if (!topic) {
/* Use default topic */
flb_error("[out_kafka] cannot register topic '%s'",
entry->value);
topic = flb_kafka_topic_lookup((char *) val.via.str.ptr,
val.via.str.size, ctx);
}
else {
flb_info("[out_kafka] new topic added: %s", dynamic_topic);
}
}
flb_free(dynamic_topic);
}
}
}
}
}
if (ctx->format == FLB_KAFKA_FMT_JSON) {
s = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
if (!s) {
flb_plg_error(ctx->ins, "error encoding to JSON");
msgpack_sbuffer_destroy(&mp_sbuf);
return FLB_ERROR;
}
out_buf = s;
out_size = flb_sds_len(out_buf);
}
else if (ctx->format == FLB_KAFKA_FMT_MSGP) {
out_buf = mp_sbuf.data;
out_size = mp_sbuf.size;
}
else if (ctx->format == FLB_KAFKA_FMT_GELF) {
s = flb_msgpack_raw_to_gelf(mp_sbuf.data, mp_sbuf.size,
tm, &(ctx->gelf_fields));
if (s == NULL) {
flb_plg_error(ctx->ins, "error encoding to GELF");
msgpack_sbuffer_destroy(&mp_sbuf);
return FLB_ERROR;
}
out_buf = s;
out_size = flb_sds_len(s);
}
if (!message_key) {
message_key = ctx->message_key;
message_key_len = ctx->message_key_len;
}
if (!topic) {
topic = flb_kafka_topic_default(ctx);
}
if (!topic) {
flb_plg_error(ctx->ins, "no default topic found");
msgpack_sbuffer_destroy(&mp_sbuf);
return FLB_ERROR;
}
retry:
if (queue_full_retries >= 10) {
if (ctx->format == FLB_KAFKA_FMT_JSON) {
flb_free(out_buf);
}
if (ctx->format == FLB_KAFKA_FMT_GELF) {
flb_sds_destroy(s);
}
msgpack_sbuffer_destroy(&mp_sbuf);
return FLB_RETRY;
}
ret = rd_kafka_produce(topic->tp,
RD_KAFKA_PARTITION_UA,
RD_KAFKA_MSG_F_COPY,
out_buf, out_size,
message_key, message_key_len,
ctx);
if (ret == -1) {
fprintf(stderr,
"%% Failed to produce to topic %s: %s\n",
rd_kafka_topic_name(topic->tp),
rd_kafka_err2str(rd_kafka_last_error()));
/*
* rdkafka queue is full, keep trying 'locally' for a few seconds,
* otherwise let the caller to issue a main retry againt the engine.
*/
if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
flb_plg_warn(ctx->ins, "internal queue is full, "
"retrying in one second");
/*
* If the queue is full, first make sure to discard any further
* flush request from the engine. This means 'the caller will
* issue a retry at a later time'.
*/
ctx->blocked = FLB_TRUE;
/*
* Next step is to give it some time to the background rdkafka
* library to do it own work. By default rdkafka wait 1 second
* or up to 10000 messages to be enqueued before delivery.
*
* If the kafka broker is down we should try a couple of times
* to enqueue this message, if we exceed 10 times, we just
* issue a full retry of the data chunk.
*/
flb_time_sleep(1000, config);
rd_kafka_poll(ctx->producer, 0);
/* Issue a re-try */
queue_full_retries++;
goto retry;
}
}
else {
flb_plg_debug(ctx->ins, "enqueued message (%zd bytes) for topic '%s'",
out_size, rd_kafka_topic_name(topic->tp));
}
ctx->blocked = FLB_FALSE;
rd_kafka_poll(ctx->producer, 0);
if (ctx->format == FLB_KAFKA_FMT_JSON) {
flb_sds_destroy(s);
}
if (ctx->format == FLB_KAFKA_FMT_GELF) {
flb_sds_destroy(s);
}
msgpack_sbuffer_destroy(&mp_sbuf);
return FLB_OK;
}
static void cb_kafka_flush(const void *data, size_t bytes,
const char *tag, int tag_len,
struct flb_input_instance *i_ins,
void *out_context,
struct flb_config *config)
{
int ret;
size_t off = 0;
struct flb_kafka *ctx = out_context;
struct flb_time tms;
msgpack_object *obj;
msgpack_unpacked result;
/*
* If the context is blocked, means rdkafka queue is full and no more
* messages can be appended. For our called (Fluent Bit engine) means
* that is not possible to work on this now and it need to 'retry'.
*/
if (ctx->blocked == FLB_TRUE) {
FLB_OUTPUT_RETURN(FLB_RETRY);
}
/* Iterate the original buffer and perform adjustments */
msgpack_unpacked_init(&result);
while (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) {
flb_time_pop_from_msgpack(&tms, &result, &obj);
ret = produce_message(&tms, obj, ctx, config);
if (ret == FLB_ERROR) {
msgpack_unpacked_destroy(&result);
FLB_OUTPUT_RETURN(FLB_ERROR);
}
else if (ret == FLB_RETRY) {
msgpack_unpacked_destroy(&result);
FLB_OUTPUT_RETURN(FLB_RETRY);
}
}
msgpack_unpacked_destroy(&result);
FLB_OUTPUT_RETURN(FLB_OK);
}
static int cb_kafka_exit(void *data, struct flb_config *config)
{
struct flb_kafka *ctx = data;
flb_kafka_conf_destroy(ctx);
return 0;
}
struct flb_output_plugin out_kafka_plugin = {
.name = "kafka",
.description = "Kafka",
.cb_init = cb_kafka_init,
.cb_flush = cb_kafka_flush,
.cb_exit = cb_kafka_exit,
.flags = 0
};
| 1 | 11,546 | This is bad. It is not a NULL-terminated string here. | fluent-fluent-bit | c |
@@ -48,9 +48,10 @@ class UserIpReader
protected $server;
/**
- * Should we respect the X-Forwarded-For header?
+ * Configuration specifying allowed HTTP headers containing IPs (false for none).
+ * See [Proxy] allow_forwarded_ips setting in config.ini for more details.
*
- * @var bool
+ * @var string|bool
*/
protected $allowForwardedIps;
| 1 | <?php
/**
* Service to retrieve user IP address.
*
* PHP version 7
*
* Copyright (C) Villanova University 2020.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Net
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
*/
namespace VuFind\Net;
use Laminas\Stdlib\Parameters;
/**
* Service to retrieve user IP address.
*
* @category VuFind
* @package Net
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
*/
class UserIpReader
{
/**
* Server parameters
*
* @var Parameters
*/
protected $server;
/**
* Should we respect the X-Forwarded-For header?
*
* @var bool
*/
protected $allowForwardedIps;
/**
* Constructor
*
* @param Parameters $server Server parameters
* @param bool $allowForwardedIps Should we respect the X-Forwarded-For
* header?
*/
public function __construct(Parameters $server, $allowForwardedIps = false)
{
$this->server = $server;
$this->allowForwardedIps = $allowForwardedIps;
}
/**
* Get the active user's IP address. Returns null if no address can be found.
*
* @return string
*/
public function getUserIp()
{
if ($this->allowForwardedIps) {
// First check X-Real-IP; this is most accurate when set...
$realIp = $this->server->get('HTTP_X_REAL_IP');
if (!empty($realIp)) {
return $realIp;
}
// Next, try X-Forwarded-For; if it's a comma-separated list, use
// only the first part.
$forwarded = $this->server->get('HTTP_X_FORWARDED_FOR');
if (!empty($forwarded)) {
$parts = explode(',', $forwarded);
return trim($parts[0]);
}
}
// Default case: use REMOTE_ADDR directly.
return $this->server->get('REMOTE_ADDR');
}
}
| 1 | 29,879 | This probably should be @param string|false. | vufind-org-vufind | php |
@@ -195,7 +195,7 @@ public class SmartSqlTest extends SmartStoreTestCase {
*/
public void testSmartQueryReturningOneRowWithTwoIntegers() throws JSONException {
loadData();
- JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select mgr.{employees:salary}, e.{employees:salary} from {employees} as mgr, {employees} as e where e.{employees:lastName} = 'Thompson'", 1), 0);
+ JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select mgr.{employees:salary}, e.{employees:salary} from {employees} as mgr, {employees} as e where e.{employees:lastName} = 'Thompson' and mgr.{employees:employeeId} = e.{employees:managerId}", 1), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[200000,120000]]"), result);
}
| 1 | /*
* Copyright (c) 2012, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.store;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.salesforce.androidsdk.smartstore.store.DBOpenHelper;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartSqlHelper.SmartSqlException;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
/**
* Tests for "smart" sql
*
*/
public class SmartSqlTest extends SmartStoreTestCase {
private static final String BUDGET = "budget";
private static final String NAME = "name";
private static final String SALARY = "salary";
private static final String MANAGER_ID = "managerId";
private static final String EMPLOYEE_ID = "employeeId";
private static final String LAST_NAME = "lastName";
private static final String FIRST_NAME = "firstName";
private static final String DEPT_CODE = "deptCode";
private static final String EMPLOYEES_SOUP = "employees";
private static final String DEPARTMENTS_SOUP = "departments";
@Override
protected SQLiteDatabase getWritableDatabase() {
return DBOpenHelper.getOpenHelper(targetContext).getWritableDatabase("");
}
@Override
public void setUp() throws Exception {
super.setUp();
store.registerSoup(EMPLOYEES_SOUP, new IndexSpec[] { // should be TABLE_1
new IndexSpec(FIRST_NAME, Type.string), // should be TABLE_1_0
new IndexSpec(LAST_NAME, Type.string), // should be TABLE_1_1
new IndexSpec(DEPT_CODE, Type.string), // should be TABLE_1_2
new IndexSpec(EMPLOYEE_ID, Type.string), // should be TABLE_1_3
new IndexSpec(MANAGER_ID, Type.string), // should be TABLE_1_4
new IndexSpec(SALARY, Type.integer) }); // should be TABLE_1_5
store.registerSoup(DEPARTMENTS_SOUP, new IndexSpec[] { // should be TABLE_2
new IndexSpec(DEPT_CODE, Type.string), // should be TABLE_2_0
new IndexSpec(NAME, Type.string), // should be TABLE_2_1
new IndexSpec(BUDGET, Type.integer) } ); // should be TABLE_2_2
}
/**
* Testing simple smart sql to sql conversion
*/
public void testSimpleConvertSmartSql() {
assertEquals("select TABLE_1_0, TABLE_1_1 from TABLE_1 order by TABLE_1_1",
store.convertSmartSql("select {employees:firstName}, {employees:lastName} from {employees} order by {employees:lastName}"));
assertEquals("select TABLE_2_1 from TABLE_2 order by TABLE_2_0",
store.convertSmartSql("select {departments:name} from {departments} order by {departments:deptCode}"));
}
/**
* Testing smart sql to sql conversion when there is a join
*/
public void testConvertSmartSqlWithJoin() {
assertEquals("select TABLE_2_1, TABLE_1_0 || ' ' || TABLE_1_1 "
+ "from TABLE_1, TABLE_2 "
+ "where TABLE_2_0 = TABLE_1_2 "
+ "order by TABLE_2_1, TABLE_1_1",
store.convertSmartSql("select {departments:name}, {employees:firstName} || ' ' || {employees:lastName} "
+ "from {employees}, {departments} "
+ "where {departments:deptCode} = {employees:deptCode} "
+ "order by {departments:name}, {employees:lastName}"));
}
/**
* Testing smart sql to sql conversion when there is a self join
*/
public void testConvertSmartSqlWithSelfJoin() {
assertEquals("select mgr.TABLE_1_1, e.TABLE_1_1 "
+ "from TABLE_1 as mgr, TABLE_1 as e "
+ "where mgr.TABLE_1_3 = e.TABLE_1_4",
store.convertSmartSql("select mgr.{employees:lastName}, e.{employees:lastName} "
+ "from {employees} as mgr, {employees} as e "
+ "where mgr.{employees:employeeId} = e.{employees:managerId}"));
}
/**
* Testing smart sql to sql conversion when path is: _soup, _soupEntryId or _soupLastModifiedDate
*/
public void testConvertSmartSqlWithSpecialColumns() {
assertEquals("select TABLE_1.id, TABLE_1.lastModified, TABLE_1.soup from TABLE_1",
store.convertSmartSql("select {employees:_soupEntryId}, {employees:_soupLastModifiedDate}, {employees:_soup} from {employees}"));
}
/**
* Testing smart sql to sql conversion when path is: _soup, _soupEntryId or _soupLastModifiedDate and there is a join
*/
public void testConvertSmartSqlWithSpecialColumnsAndJoin() {
assertEquals("select TABLE_1.id, TABLE_2.id from TABLE_1, TABLE_2",
store.convertSmartSql("select {employees:_soupEntryId}, {departments:_soupEntryId} from {employees}, {departments}"));
}
/**
* Testing smart sql to sql conversion when path is: _soup, _soupEntryId or _soupLastModifiedDate and there is a join
*/
public void testConvertSmartSqlWithSpecialColumnsAndSelfJoin() {
assertEquals("select mgr.id, e.id from TABLE_1 as mgr, TABLE_1 as e",
store.convertSmartSql("select mgr.{employees:_soupEntryId}, e.{employees:_soupEntryId} from {employees} as mgr, {employees} as e"));
}
/**
* Test smart sql to sql conversation with insert/update/delete: expect exception
*/
public void testConvertSmartSqlWithInsertUpdateDelete() {
for (String smartSql : new String[] { "insert into {employees}", "update {employees}", "delete from {employees}"}) {
try {
store.convertSmartSql(smartSql);
fail("Should have thrown exception for " + smartSql);
}
catch (SmartSqlException e) {
// Expected
}
}
}
/**
* Test running smart query that does a select count
* @throws JSONException
*/
public void testSmartQueryDoingCount() throws JSONException {
loadData();
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select count(*) from {employees}", 1), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[7]]"), result);
}
/**
* Test running smart query that does a select sum
* @throws JSONException
*/
public void testSmartQueryDoingSum() throws JSONException {
loadData();
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select sum({departments:budget}) from {departments}", 1), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[3000000]]"), result);
}
/**
* Test running smart query that return one row with one integer
* @throws JSONException
*/
public void testSmartQueryReturningOneRowWithOneInteger() throws JSONException {
loadData();
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select {employees:salary} from {employees} where {employees:lastName} = 'Haas'", 1), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[200000]]"), result);
}
/**
* Test running smart query that return one row with two integers
* @throws JSONException
*/
public void testSmartQueryReturningOneRowWithTwoIntegers() throws JSONException {
loadData();
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select mgr.{employees:salary}, e.{employees:salary} from {employees} as mgr, {employees} as e where e.{employees:lastName} = 'Thompson'", 1), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[200000,120000]]"), result);
}
/**
* Test running smart query that return two rows with one integer each
* @throws JSONException
*/
public void testSmartQueryReturningTwoRowsWithOneIntegerEach() throws JSONException {
loadData();
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select {employees:salary} from {employees} where {employees:managerId} = '00010' order by {employees:firstName}", 2), 0);
assertSameJSONArray("Wrong result", new JSONArray("[[120000],[100000]]"), result);
}
/**
* Test running smart query that return a soup along with a string and an integer
* @throws JSONException
*/
public void testSmartQueryReturningSoupStringAndInteger() throws JSONException {
loadData();
JSONObject christineJson = store.query(QuerySpec.buildExactQuerySpec(EMPLOYEES_SOUP, "employeeId", "00010", 1), 0).getJSONObject(0);
assertEquals("Wrong elt", "Christine", christineJson.getString(FIRST_NAME));
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select {employees:_soup}, {employees:firstName}, {employees:salary} from {employees} where {employees:lastName} = 'Haas'", 1) , 0);
assertEquals("Expected one row", 1, result.length());
assertSameJSON("Wrong soup", christineJson, result.getJSONArray(0).getJSONObject(0));
assertEquals("Wrong first name", "Christine", result.getJSONArray(0).getString(1));
assertEquals("Wrong salary", 200000, result.getJSONArray(0).getInt(2));
}
/**
* Test running smart query with paging
* @throws JSONException
*/
public void testSmartQueryWithPaging() throws JSONException {
loadData();
QuerySpec query = QuerySpec.buildSmartQuerySpec("select {employees:firstName} from {employees} order by {employees:firstName}", 1);
assertEquals("Expected 7 employees", 7, store.countQuery(query));
String[] expectedResults = new String[] {"Christine", "Eileen", "Eva", "Irving", "John", "Michael", "Sally"};
for (int i = 0; i<7; i++) {
JSONArray result = store.query(query , i);
assertSameJSONArray("Wrong result at page " + i, new JSONArray("[[" + expectedResults[i] + "]]"), result);
}
}
/**
* Test running smart query that targets _soup, _soupEntryId and _soupLastModifiedDate
* @throws JSONException
*/
public void testSmartQueryWithSpecialFields() throws JSONException {
loadData();
JSONObject christineJson = store.query(QuerySpec.buildExactQuerySpec(EMPLOYEES_SOUP, "employeeId", "00010", 1), 0).getJSONObject(0);
assertEquals("Wrong elt", "Christine", christineJson.getString(FIRST_NAME));
JSONArray result = store.query(QuerySpec.buildSmartQuerySpec("select {employees:_soup}, {employees:_soupEntryId}, {employees:_soupLastModifiedDate}, {employees:salary} from {employees} where {employees:lastName} = 'Haas'", 1) , 0);
assertEquals("Expected one row", 1, result.length());
assertSameJSON("Wrong soup", christineJson, result.getJSONArray(0).getJSONObject(0));
assertSameJSON("Wrong soupEntryId", christineJson.getString(SmartStore.SOUP_ENTRY_ID), result.getJSONArray(0).getInt(1));
assertSameJSON("Wrong soupLastModifiedDate", christineJson.getString(SmartStore.SOUP_LAST_MODIFIED_DATE), result.getJSONArray(0).getLong(2));
}
/**
* Load some datq in the smart store
* @throws JSONException
*/
private void loadData() throws JSONException {
// Employees
createEmployee("Christine", "Haas", "A00", "00010", null, 200000);
createEmployee("Michael", "Thompson", "A00", "00020", "00010", 120000);
createEmployee("Sally", "Kwan", "A00", "00310", "00010", 100000);
createEmployee("John", "Geyer", "B00", "00040", null, 102000);
createEmployee("Irving", "Stern", "B00", "00050", "00040", 100000);
createEmployee("Eva", "Pulaski", "B00", "00060", "00050", 80000);
createEmployee("Eileen", "Henderson", "B00", "00070", "00050", 70000);
// Departments
createDepartment("A00", "Sales", 1000000);
createDepartment("B00", "R&D", 2000000);
}
private void createEmployee(String firstName, String lastName, String deptCode, String employeeId, String managerId, int salary) throws JSONException {
JSONObject employee = new JSONObject();
employee.put(FIRST_NAME, firstName);
employee.put(LAST_NAME, lastName);
employee.put(DEPT_CODE, deptCode);
employee.put(EMPLOYEE_ID, employeeId);
employee.put(MANAGER_ID, managerId);
employee.put(SALARY, salary);
store.create(EMPLOYEES_SOUP, employee);
}
private void createDepartment(String deptCode, String name, int budget) throws JSONException {
JSONObject department = new JSONObject();
department.put(DEPT_CODE, deptCode);
department.put(NAME, name);
department.put(BUDGET, budget);
store.create(DEPARTMENTS_SOUP, department);
}
}
| 1 | 13,873 | Unrelated test fix. Already in cordova34 branch. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -87,10 +87,6 @@ func (sc *stakingCommand) CreatePostSystemActions(ctx context.Context, sr protoc
}
func (sc *stakingCommand) Handle(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) {
- // no height here, v1 v2 has the same validate method, so directly use common one
- if err := validate(ctx, sm, sc, act); err != nil {
- return nil, err
- }
if sc.useV2(ctx, sm) {
return sc.stakingV2.Handle(ctx, act, sm)
} | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package poll
import (
"context"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/state"
)
type stakingCommand struct {
addr address.Address
stakingV1 Protocol
stakingV2 Protocol
}
// NewStakingCommand creates a staking command center to manage staking committee and new native staking
func NewStakingCommand(stkV1 Protocol, stkV2 Protocol) (Protocol, error) {
if stkV1 == nil && stkV2 == nil {
return nil, errors.New("empty staking protocol")
}
h := hash.Hash160b([]byte(protocolID))
addr, err := address.FromBytes(h[:])
if err != nil {
return nil, err
}
return &stakingCommand{
addr: addr,
stakingV1: stkV1,
stakingV2: stkV2,
}, nil
}
func (sc *stakingCommand) CreateGenesisStates(ctx context.Context, sm protocol.StateManager) error {
// if v1 exists, bootstrap from v1 only
if sc.stakingV1 != nil {
return sc.stakingV1.CreateGenesisStates(ctx, sm)
}
return sc.stakingV2.CreateGenesisStates(ctx, sm)
}
func (sc *stakingCommand) Start(ctx context.Context, sr protocol.StateReader) (interface{}, error) {
if sc.stakingV1 != nil {
if starter, ok := sc.stakingV1.(protocol.Starter); ok {
if _, err := starter.Start(ctx, sr); err != nil {
return nil, err
}
}
}
if sc.stakingV2 != nil {
if starter, ok := sc.stakingV2.(protocol.Starter); ok {
return starter.Start(ctx, sr)
}
}
return nil, nil
}
func (sc *stakingCommand) CreatePreStates(ctx context.Context, sm protocol.StateManager) error {
if sc.useV2(ctx, sm) {
if p, ok := sc.stakingV2.(protocol.PreStatesCreator); ok {
return p.CreatePreStates(ctx, sm)
}
}
if p, ok := sc.stakingV1.(protocol.PreStatesCreator); ok {
return p.CreatePreStates(ctx, sm)
}
return nil
}
func (sc *stakingCommand) CreatePostSystemActions(ctx context.Context, sr protocol.StateReader) ([]action.Envelope, error) {
// no height here, v1 v2 has the same createPostSystemActions method, so directly use common one
return createPostSystemActions(ctx, sr, sc)
}
func (sc *stakingCommand) Handle(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) {
// no height here, v1 v2 has the same validate method, so directly use common one
if err := validate(ctx, sm, sc, act); err != nil {
return nil, err
}
if sc.useV2(ctx, sm) {
return sc.stakingV2.Handle(ctx, act, sm)
}
return sc.stakingV1.Handle(ctx, act, sm)
}
func (sc *stakingCommand) CalculateCandidatesByHeight(ctx context.Context, sr protocol.StateReader, height uint64) (state.CandidateList, error) {
if sc.useV2ByHeight(ctx, height) {
return sc.stakingV2.CalculateCandidatesByHeight(ctx, sr, height)
}
return sc.stakingV1.CalculateCandidatesByHeight(ctx, sr, height)
}
// Delegates returns exact number of delegates of current epoch
func (sc *stakingCommand) Delegates(ctx context.Context, sr protocol.StateReader) (state.CandidateList, error) {
if sc.useV2(ctx, sr) {
return sc.stakingV2.Delegates(ctx, sr)
}
return sc.stakingV1.Delegates(ctx, sr)
}
// NextDelegates returns exact number of delegates of next epoch
func (sc *stakingCommand) NextDelegates(ctx context.Context, sr protocol.StateReader) (state.CandidateList, error) {
if sc.useV2(ctx, sr) {
return sc.stakingV2.NextDelegates(ctx, sr)
}
return sc.stakingV1.NextDelegates(ctx, sr)
}
// Candidates returns candidate list from state factory of current epoch
func (sc *stakingCommand) Candidates(ctx context.Context, sr protocol.StateReader) (state.CandidateList, error) {
if sc.useV2(ctx, sr) {
return sc.stakingV2.Candidates(ctx, sr)
}
return sc.stakingV1.Candidates(ctx, sr)
}
// NextCandidates returns candidate list from state factory of next epoch
func (sc *stakingCommand) NextCandidates(ctx context.Context, sr protocol.StateReader) (state.CandidateList, error) {
if sc.useV2(ctx, sr) {
return sc.stakingV2.NextCandidates(ctx, sr)
}
return sc.stakingV1.NextCandidates(ctx, sr)
}
func (sc *stakingCommand) ReadState(ctx context.Context, sr protocol.StateReader, method []byte, args ...[]byte) ([]byte, error) {
if sc.useV2(ctx, sr) {
res, err := sc.stakingV2.ReadState(ctx, sr, method, args...)
if err != nil && sc.stakingV1 != nil {
// check if reading from v1 only method
return sc.stakingV1.ReadState(ctx, sr, method, args...)
}
return res, nil
}
return sc.stakingV1.ReadState(ctx, sr, method, args...)
}
// Register registers the protocol with a unique ID
func (sc *stakingCommand) Register(r *protocol.Registry) error {
return r.Register(protocolID, sc)
}
// ForceRegister registers the protocol with a unique ID and force replacing the previous protocol if it exists
func (sc *stakingCommand) ForceRegister(r *protocol.Registry) error {
return r.ForceRegister(protocolID, sc)
}
func (sc *stakingCommand) Name() string {
return protocolID
}
func (sc *stakingCommand) useV2(ctx context.Context, sr protocol.StateReader) bool {
height, err := sr.Height()
if err != nil {
panic("failed to return out height from state reader")
}
return sc.useV2ByHeight(ctx, height)
}
func (sc *stakingCommand) useV2ByHeight(ctx context.Context, height uint64) bool {
bcCtx := protocol.MustGetBlockchainCtx(ctx)
hu := config.NewHeightUpgrade(&bcCtx.Genesis)
if sc.stakingV1 == nil || hu.IsPost(config.Fairbank, height) {
return true
}
return false
}
| 1 | 21,659 | validate() will be called by either V1 or V2 | iotexproject-iotex-core | go |
@@ -100,6 +100,13 @@ class CppGenerator : public BaseGenerator {
assert(!cur_name_space_);
+ code_ += "#if defined(_MSC_VER)";
+ code_ += "#define NOEXCEPT";
+ code_ += "#else";
+ code_ += "#define NOEXCEPT noexcept";
+ code_ += "#endif";
+ code_ += "";
+
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin(); | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
namespace flatbuffers {
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr) {}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) { return !isalnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.namespaces_.back();
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ::toupper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
const auto basename =
flatbuffers::StripPath(flatbuffers::StripExtension(it->first));
if (basename != file_name_) {
code_ += "#include \"" + parser_.opts.include_prefix + basename +
"_generated.h\"";
num_includes++;
}
}
if (num_includes) code_ += "";
}
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
if (IsEverythingGenerated()) return true;
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning());
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
code_ += "#include \"flatbuffers/flatbuffers.h\"";
code_ += "";
if (parser_.opts.include_dependence_headers) {
GenIncludeDependencies();
}
assert(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + struct_def.name + ";";
if (parser_.opts.generate_object_based_api && !struct_def.fixed) {
code_ += "struct " + NativeName(struct_def.name) + ";";
}
code_ += "";
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
const auto &name = struct_def.name;
const auto qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(name);
const auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
// The root datatype accessor:
code_ += "inline \\";
code_ += "const {{CPP_NAME}} *Get{{STRUCT_NAME}}(const void *buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def));
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
assert(cur_name_space_);
SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
static const char *ctypename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: {
return "void";
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
// TODO(wvo): make this configurable.
static std::string NativeName(const std::string &name) { return name + "T"; }
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
const std::string NativeString(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr;
auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type;
if (ret.empty()) {
return "std::string";
}
return ret;
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return ptr_type + "<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return NativeString(&field);
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
type_name = native_type->constant;
}
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(NativeName(type_name), &field, false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: {
return GenTypeBasic(type, true);
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + enum_def.name;
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? enum_def.name + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return enum_def.name + "::" + enum_val.name;
} else if (opts.prefixed_enums) {
return enum_def.name + "_" + enum_val.name;
} else {
return enum_val.name;
}
}
static std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name +
"(flatbuffers::Verifier &verifier, const void *obj, " +
enum_def.name + " type)";
}
static std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
static std::string UnionUnPackSignature(const EnumDef &enum_def,
bool inclass) {
return (inclass ? "static " : "") +
std::string("flatbuffers::NativeTable *") +
(inclass ? "" : enum_def.name + "Union::") +
"UnPack(const void *obj, " + enum_def.name +
" type, const flatbuffers::resolver_function_t *resolver)";
}
static std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : enum_def.name + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableCreateSignature(const StructDef &struct_def,
bool predecl) {
return "flatbuffers::Offset<" + struct_def.name + "> Create" +
struct_def.name +
"(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(struct_def.name) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
static std::string TablePackSignature(const StructDef &struct_def,
bool inclass) {
return std::string(inclass ? "static " : "") +
"flatbuffers::Offset<" + struct_def.name + "> " +
(inclass ? "" : struct_def.name + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const " + NativeName(struct_def.name) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
static std::string TableUnPackSignature(const StructDef &struct_def,
bool inclass) {
return NativeName(struct_def.name) + " *" +
(inclass ? "" : struct_def.name + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableUnPackToSignature(const StructDef &struct_def,
bool inclass) {
return "void " + (inclass ? "" : struct_def.name + "::") +
"UnPackTo(" + NativeName(struct_def.name) + " *" + "_o, " +
"const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
// Generate an enum declaration and an enum string lookup table.
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", enum_def.name);
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
if (parser_.opts.scoped_enums)
code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
int64_t anyv = 0;
const EnumVal *minv = nullptr, *maxv = nullptr;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
GenComment(ev.doc_comment, " ");
code_.SetValue("KEY", GenEnumValDecl(enum_def, ev.name));
code_.SetValue("VALUE", NumToString(ev.value));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
minv = !minv || minv->value > ev.value ? &ev : minv;
maxv = !maxv || maxv->value < ev.value ? &ev : maxv;
anyv |= ev.value;
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
assert(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE", NumToString(anyv));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ += "DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range =
enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
code_ += "inline const char **EnumNames{{ENUM_NAME}}() {";
code_ += " static const char *names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
while (val++ != ev.value) {
code_ += " \"\",";
}
code_ += " \"" + ev.name + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " const size_t index = static_cast<int>(e)\\";
if (enum_def.vals.vec.front()->value) {
auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front());
code_ += " - static_cast<int>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union) {
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (it == enum_def.vals.vec.begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
}
else {
auto name = WrapInNameSpace(*ev.struct_def);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", enum_def.name);
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " flatbuffers::NativeTable *table;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), table(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u):";
code_ += " type({{NONE}}), table(nullptr)";
code_ += " { std::swap(type, u.type); std::swap(table, u.table); }";
code_ += " {{NAME}}Union(const {{NAME}}Union &);";
code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &);";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
code_ += " template <typename T>";
code_ += " void Set(T&& value) {";
code_ += " Reset();";
code_ += " type = {{NAME}}Traits<typename T::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " table = new T(std::forward<T>(value));";
code_ += " }";
code_ += " }";
code_ += "";
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
const auto native_type = NativeName(WrapInNameSpace(*ev.struct_def));
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", ev.name);
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(table) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", enum_def.name);
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.value) {
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return verifier.VerifyTable(ptr);";
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + enum_def.name + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" + enum_def.name + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return ptr->UnPack(resolver);";
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_.SetValue("NAME", ev.struct_def->name);
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(table);";
code_ += " return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(table);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " table = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = field.name;
std::transform(uname.begin(), uname.end(), uname.begin(), ::toupper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const std::string &name) {
if (!parser_.opts.generate_name_strings) {
return;
}
auto fullname = parser_.namespaces_.back()->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
return field.value.type.base_type == BASE_TYPE_FLOAT
? field.value.constant + "f"
: field.value.constant;
}
std::string GetDefaultScalarValue(const FieldDef &field) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
static_cast<int>(StringToInt(field.value.constant.c_str())), false);
if (ev) {
return WrapInNameSpace(
field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(field, true, field.value.constant);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", field.name);
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type = (cpp_type ? cpp_type->constant + " *" : type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef& struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name;
initializer_list += "(" + GetDefaultScalarValue(field) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_default = field.attributes.Lookup("native_default");
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
field.name + "(" + native_default->constant + ")";
}
}
} else if (cpp_type) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name = NativeName(struct_def.name);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenDefaultConstructor(struct_def);
code_ += "};";
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char* prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", field.name);
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
code_ += "{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), {{NAME}}_type())\\";
break;
}
default:
break;
}
break;
}
default: {
break;
}
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) {
GenNativeTable(struct_def);
}
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
GenFullyQualifiedNameGetter(struct_def.name);
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ += " enum {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", field.name);
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) {
call += ", " + GenDefaultConstant(field);
}
call += ")";
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "const ", " *", true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (field.value.type.base_type == BASE_TYPE_UNION) {
auto u = field.value.type.enum_def;
code_ += " template<typename T> "
"const T *{{FIELD_NAME}}_as() const;";
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
if (!(*u_it)->struct_def) {
continue;
}
auto arg_struct_def = (*u_it)->struct_def;
auto full_struct_name = WrapInNameSpace(*arg_struct_def);
// @TODO: Mby make this decisions more universal? How?
code_.SetValue("U_GET_TYPE", field.name + UnionTypeFieldSuffix());
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, **u_it)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + (*u_it)->name);
// `const Type *union_name_asType() const` accessor.
code_ += " {{U_FIELD_TYPE}}{{U_FIELD_NAME}}() const {";
code_ += " return ({{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}})? "
"static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) "
": nullptr;";
code_ += " }";
}
}
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("SET_FN", "SetField<" + type + ">");
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field));
code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ += " return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, {{DEFAULT_VALUE}});";
code_ += " }";
} else {
auto type = GenTypeGet(field.value.type, " ", "", " *", true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(
nested->constant);
auto nested_root = parser_.structs_.Lookup(qualified_name);
assert(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ += " const uint8_t* data = {{FIELD_NAME}}()->Data();";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(data);";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " if (key < val) {";
code_ += " return -1;";
code_ += " } else if (key > val) {";
code_ += " return 1;";
code_ += " } else {";
code_ += " return 0;";
code_ += " }";
code_ += " }";
}
}
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ += " " + TableUnPackSignature(struct_def, true) + ";";
code_ += " " + TableUnPackToSignature(struct_def, true) + ";";
code_ += " " + TablePackSignature(struct_def, true) + ";";
}
code_ += "};"; // End of table.
code_ += "";
// Explicit specializations for union accessors
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated ||
field.value.type.base_type != BASE_TYPE_UNION) {
continue;
}
auto u = field.value.type.enum_def;
code_.SetValue("FIELD_NAME", field.name);
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
if (!(*u_it)->struct_def) {
continue;
}
auto arg_struct_def = (*u_it)->struct_def;
auto full_struct_name = WrapInNameSpace(*arg_struct_def);
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, **u_it)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + (*u_it)->name);
// `template<> const T *union_name_as<T>() const` accessor.
code_ += "template<> "
"inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as"
"<{{U_ELEMENT_NAME}}>() const {";
code_ += " return {{U_FIELD_NAME}}();";
code_ += "}";
code_ += "";
}
}
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) {
has_string_or_vector_fields = true;
}
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, field.name);
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", struct_def.name + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ += " {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder &_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ += " {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
auto num_fields = NumToString(struct_def.fields.vec.size());
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_, " + num_fields + ");";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, false, ",\n ");
}
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", field.name);
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, true, ",\n ");
}
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", field.name);
if (field.value.type.base_type == BASE_TYPE_STRING) {
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateString({{FIELD_NAME}}) : 0\\";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateVector<" + type + ">(*{{FIELD_NAME}}) : 0\\";
} else {
code_ += ",\n {{FIELD_NAME}}\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return val + "->str()";
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(NativeName(name), &afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
default: {
return val;
break;
}
}
};
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
std::string indexing;
if (field.value.type.enum_def) {
indexing += "(" + field.value.type.enum_def->name + ")";
}
indexing += "_e->Get(_i)";
if (field.value.type.element == BASE_TYPE_BOOL) {
indexing += " != 0";
}
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
code += "{ _o->" + field.name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
code += "_o->" + field.name + "[_i] = ";
code += GenUnpackVal(field.value.type.VectorType(),
indexing, true, field);
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
assert(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union table, of the form:
// _o->field.table = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + field.name + ".table = ";
code += field.value.type.enum_def->name + "Union::UnPack(";
code += "_e, " + field.name + UnionTypeFieldSuffix() + "(),";
code += "_resolver);";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + field.name + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
code += " else ";
code += "_o->" + field.name + " = nullptr;";
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + field.name + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += field.name.substr(0, field.name.size() -
strlen(UnionTypeFieldSuffix()));
value += ".type";
} else {
value += field.name;
}
if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value = "_rehasher ? "
"static_cast<" + type + ">((*_rehasher)(" + value + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
case BASE_TYPE_STRING: {
code += "_fbb.CreateString(" + value + ")";
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
code += "_fbb.CreateVectorOfStructs(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">>";
code += "(" + value + ".size(), [&](size_t i) {";
code += " return Create" + vector_type.struct_def->name;
code += "(_fbb, " + value + "[i]" + GenPtrGet(field) + ", ";
code += "_rehasher); })";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVector((const " + basetype + "*)" + value +
".data(), " + value + ".size())";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " + TableUnPackSignature(struct_def, false) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " + TableUnPackToSignature(struct_def, false) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", field.name);
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false) + " {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " + TableCreateSignature(struct_def, false) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
code_ += " auto _" + field.name + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
pass_by_address = true;
}
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + field.name + "\\";
} else {
code_ += ",\n _" + field.name + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
assert(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
GenFullyQualifiedNameGetter(struct_def.name);
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a copy constructor.
code_ += " {{STRUCT_NAME}}(const {{STRUCT_NAME}} &_o) {";
code_ += " memcpy(this, &_o, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = field.name + "_";
const auto arg_name = "_" + field.name;
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = field.name + "_";
auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")"
: member;
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
}
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) {
return;
}
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) {
code_ += "";
}
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) {
code_ += "";
}
cur_name_space_ = ns;
}
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 11,540 | rather than generate code for this every time, stick it in `flatbuffers.h` (and call it `FLATBUFFERS_NOEXCEPT` to avoid clashes). | google-flatbuffers | java |
@@ -14,7 +14,9 @@ import (
// GetBlockByNumber see https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
// see internal/ethapi.PublicBlockChainAPI.GetBlockByNumber
func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
- tx, err := api.dbReader.Begin(ctx)
+ // TODO the next line used to be "tx, err := api.dbReader.Begin(ctx)" When it changed, there was concern that it would have
+ // negative impact on performance. Revisit performance of using this type of database transaction
+ tx, err := api.db.Begin(ctx, nil, false)
if err != nil {
return nil, err
} | 1 | package commands
import (
"context"
"fmt"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/hexutil"
"github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/rpc"
"github.com/ledgerwatch/turbo-geth/turbo/adapter/ethapi"
)
// GetBlockByNumber see https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
// see internal/ethapi.PublicBlockChainAPI.GetBlockByNumber
func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
tx, err := api.dbReader.Begin(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
blockNum, err := getBlockNumber(number, tx)
if err != nil {
return nil, err
}
additionalFields := make(map[string]interface{})
block := rawdb.ReadBlockByNumber(tx, blockNum)
if block == nil {
return nil, fmt.Errorf("block not found: %d", blockNum)
}
additionalFields["totalDifficulty"] = (*hexutil.Big)(rawdb.ReadTd(tx, block.Hash(), blockNum))
response, err := ethapi.RPCMarshalBlock(block, true, fullTx, additionalFields)
if err == nil && number == rpc.PendingBlockNumber {
// Pending blocks need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner"} {
response[field] = nil
}
}
return response, err
}
// GetBlockByHash see https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbyhash
// see internal/ethapi.PublicBlockChainAPI.GetBlockByHash
func (api *APIImpl) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
tx, err := api.dbReader.Begin(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
additionalFields := make(map[string]interface{})
block := rawdb.ReadBlockByHash(tx, hash)
if block == nil {
return nil, fmt.Errorf("block not found: %x", hash)
}
number := block.NumberU64()
additionalFields["totalDifficulty"] = (*hexutil.Big)(rawdb.ReadTd(tx, hash, number))
response, err := ethapi.RPCMarshalBlock(block, true, fullTx, additionalFields)
if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() {
// Pending blocks need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner"} {
response[field] = nil
}
}
return response, err
}
// GetBlockTransactionCountByNumber returns the number of transactions in the block
func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) {
tx, err := api.dbReader.Begin(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
blockNum, err := getBlockNumber(blockNr, tx)
if err != nil {
return nil, err
}
block := rawdb.ReadBlockByNumber(tx, blockNum)
if block == nil {
return nil, fmt.Errorf("block not found: %d", blockNum)
}
n := hexutil.Uint(len(block.Transactions()))
return &n, nil
}
// GetBlockTransactionCountByHash returns the number of transactions in the block
func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) (*hexutil.Uint, error) {
tx, err := api.dbReader.Begin(ctx)
if err != nil {
return nil, err
}
defer tx.Rollback()
block := rawdb.ReadBlockByHash(tx, blockHash)
if block == nil {
return nil, fmt.Errorf("block not found: %x", blockHash)
}
n := hexutil.Uint(len(block.Transactions()))
return &n, nil
}
| 1 | 21,891 | @AskAlexSharov @tjayrush , I'm still feeling uncomfortable with this change: - because it works by accident. For example in next lines `tx` object used as: `ReadBlockByNumber(tx)`. If you go inside `ReadBlockByNumber` you can find `!errors.Is(err, ethdb.ErrKeyNotFound)` - but ethdb.Tx doesn't return this error - and it satisfy `DatabaseReader` by accident - (I faced this problem last week - when passed Tx to place which expected Db - and I promised to change tx API - rename Get to GetOne or even remove the method). - we can merge this PR if it doesn't break things and solve problem of localDb use of RPCDaemon. But I will make PR now which will allow open Read tx by TxDb/ObjectDb - and will ask you to use it. Then will take a look how hard to change LmdbTx to not satisfy rawdb.DatabaseReader interface. | ledgerwatch-erigon | go |
@@ -159,3 +159,4 @@ def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS,
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
+ | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
_VERBOSITY = 0
# Default config fields for SPRegion
_SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"maxBoost": 1.0,
}
# Default config fields for TPRegion
_TP_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS,
temporalParams=_TP_PARAMS,
verbosity=_VERBOSITY):
"""Generates a Network with connected RecordSensor, SP, TP, Anomaly regions.
This function takes care of generating regions and the canonical links.
The network has a sensor region reading data from a specified input and
passing the encoded representation to an SPRegion.
The SPRegion output is passed to a TPRegion.
Note: this function returns a network that needs to be initialized. This
allows the user to extend the network by adding further regions and
connections.
:param recordParams: a dict with parameters for creating RecordSensor region.
:param spatialParams: a dict with parameters for creating SPRegion.
:param temporalParams: a dict with parameters for creating TPRegion.
:param verbosity: an integer representing how chatty the network will be.
"""
inputFilePath= recordParams["inputFilePath"]
scalarEncoderArgs = recordParams["scalarEncoderArgs"]
dateEncoderArgs = recordParams["dateEncoderArgs"]
scalarEncoder = ScalarEncoder(**scalarEncoderArgs)
dateEncoder = DateEncoder(**dateEncoderArgs)
encoder = MultiEncoder()
encoder.addEncoder(scalarEncoderArgs["name"], scalarEncoder)
encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)
network = Network()
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": verbosity}))
sensor = network.regions["sensor"].getSelf()
sensor.encoder = encoder
sensor.dataSource = FileRecordStream(streamID=inputFilePath)
# Create the spatial pooler region
spatialParams["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion",
json.dumps(spatialParams))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# Add the TPRegion on top of the SPRegion
network.addRegion("temporalPoolerRegion", "py.TPRegion",
json.dumps(temporalParams))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
# Add the AnomalyRegion on top of the TPRegion
network.addRegion("anomalyRegion", "py.AnomalyRegion", json.dumps({}))
network.link("spatialPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="bottomUpOut", destInput="activeColumns")
network.link("temporalPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="predictedColumns")
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score. This actually doesn't work
# now so doesn't matter. We instead compute the anomaly score based on
# topDownOut (predicted columns) and SP bottomUpOut (active columns).
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
| 1 | 18,778 | I think this means the file doesn't have a newline character at the end. | numenta-nupic | py |
@@ -469,6 +469,15 @@ fpga_result fpgaPerfCounterGet(fpga_token token, fpga_perf_counter *fpga_perf)
memset(fpga_perf, 0, sizeof(fpga_perf_counter));
+ //check if its being run as root or not
+ uid_t uid = getuid();
+ if (uid != 0) {
+ fpga_perf->previlege = false;
+ return FPGA_OK;
+ } else {
+ fpga_perf->previlege = true;
+ }
+
ret = get_fpga_sbdf(token, &segment, &bus, &device, &function);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to get sbdf"); | 1 | // Copyright(c) 2021, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "fpgaperf_counter.h"
#include <errno.h>
#include <glob.h>
#include <regex.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <libudev.h>
#include <linux/perf_event.h>
#include <opae/fpga.h>
#include <opae/log.h>
#include <opae/properties.h>
#include <opae/utils.h>
#include "opae_int.h"
#define PCI_DEV_ADDRS "/sys/bus/pci/devices/*%x*:*%x*:*%x*.*%x*/"
#define DFL_PERF_FME PCI_DEV_ADDRS "fpga_region/region*/dfl-fme.*"
#define DFL_PERF_SYSFS "/sys/bus/event_source/devices/dfl_fme"
#define PERF_EVENT "event=(0x[0-9a-fA-F]{2}),"
#define PERF_EVTYPE "evtype=(0x[0-9a-fA-F]{2}),"
#define PERF_PORTID "portid=(0x[0-9a-fA-F]{2})"
#define PERF_EVENT_PATTERN PERF_EVENT PERF_EVTYPE PERF_PORTID
#define PERF_CONFIG_PATTERN "config:([0-9]{1,})-([0-9]{2,})"
#define PARSE_MATCH_INT(_p, _m, _v, _b) \
do { \
errno = 0; \
_v = strtoul(_p + _m, NULL, _b); \
if (errno) { \
OPAE_MSG("error parsing int"); \
} \
} while (0)
/* Read format structure*/
struct read_format {
uint64_t nr;
struct {
uint64_t value;
uint64_t id;
} values[];
};
/*
* Check perf handle object for validity and lock its mutex
* If fpga_perf_check_and_lock() returns FPGA_OK, assume the mutex to be
* locked.
*/
STATIC fpga_result fpga_perf_check_and_lock(fpga_perf_counter *fpga_perf)
{
int res = 0;
if (!fpga_perf)
return FPGA_INVALID_PARAM;
if (opae_mutex_lock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf->magic != FPGA_PERF_MAGIC) {
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_INVALID_PARAM;
}
return FPGA_OK;
}
/* parse the each format and get the shift val
* parse the events for the particular device directory */
STATIC fpga_result parse_perf_attributes(struct udev_device *dev,
fpga_perf_counter *fpga_perf, const char *attr)
{
regex_t re;
char err[128] = { 0 };
int reg_res = 0;
uint64_t loop = 0;
uint64_t inner_loop = 0;
uint64_t value = 0;
char attr_path[DFL_PERF_STR_MAX] = { 0,};
char attr_value[128] = { 0,};
int gres = 0;
size_t i = 0;
FILE *file = NULL;
glob_t pglob;
regmatch_t f_matches[3] = { {0} };
regmatch_t e_matches[4] = { {0} };
if (!dev || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (snprintf(attr_path, sizeof(attr_path), "%s/%s/*",
udev_device_get_syspath(dev), attr) < 0) {
OPAE_ERR("snprintf buffer overflow");
return FPGA_EXCEPTION;
}
gres = glob(attr_path, GLOB_NOSORT, NULL, &pglob);
if (gres || !pglob.gl_pathc) {
OPAE_ERR("Failed pattern match %s", attr_path);
globfree(&pglob);
return FPGA_EXCEPTION;
}
if (strcmp(attr, "format") == 0 ) {
fpga_perf->num_format = pglob.gl_pathc;
if (!fpga_perf->format_type) {
fpga_perf->format_type = calloc(fpga_perf->num_format,
sizeof(perf_format_type));
if (!fpga_perf->format_type) {
fpga_perf->num_format = 0;
OPAE_ERR("Failed to allocate Memory");
globfree(&pglob);
return FPGA_NO_MEMORY;
}
}
} else {
fpga_perf->num_perf_events = pglob.gl_pathc;
if (!fpga_perf->perf_events) {
fpga_perf->perf_events = calloc(fpga_perf->num_perf_events,
sizeof(perf_events_type));
if (!fpga_perf->perf_events) {
fpga_perf->num_perf_events = 0;
OPAE_ERR("Failed to allocate Memory");
globfree(&pglob);
return FPGA_NO_MEMORY;
}
}
}
for (i = 0; i < pglob.gl_pathc; i++) {
file = fopen(pglob.gl_pathv[i], "r");
if (!file) {
OPAE_ERR("fopen(%s) failed\n", pglob.gl_pathv[i]);
globfree(&pglob);
return FPGA_EXCEPTION;
}
if (fscanf(file, "%s", attr_value) != 1) {
OPAE_ERR("Failed to read %s", pglob.gl_pathv[i]);
goto out;
}
if (strcmp(attr, "format") == 0 ) {
reg_res = regcomp(&re, PERF_CONFIG_PATTERN,
REG_EXTENDED | REG_ICASE);
if (reg_res) {
OPAE_ERR("Error compiling regex");
goto out;
}
reg_res = regexec(&re, attr_value, 4, f_matches, 0);
if (reg_res) {
regerror(reg_res, &re, err, sizeof(err));
OPAE_MSG("Error executing regex: %s", err);
} else {
PARSE_MATCH_INT(attr_value, f_matches[1].rm_so, value, 10);
fpga_perf->format_type[loop].shift = value;
if (snprintf(fpga_perf->format_type[loop].format_name,
sizeof(fpga_perf->format_type[loop].format_name),
"%s", (strstr(pglob.gl_pathv[i], attr)
+ strlen(attr)+1)) < 0) {
OPAE_ERR("snprintf buffer overflow");
goto out;
}
loop++;
}
} else {
reg_res = regcomp(&re, PERF_EVENT_PATTERN,
REG_EXTENDED | REG_ICASE);
if (reg_res) {
OPAE_ERR("Error compiling regex");
goto out;
}
reg_res = regexec(&re, attr_value, 4, e_matches, 0);
if (reg_res) {
regerror(reg_res, &re, err, sizeof(err));
OPAE_MSG("Error executing regex: %s", err);
} else {
uint64_t config = 0;
uint64_t event = 0;
if (snprintf(fpga_perf->perf_events[inner_loop].event_name,
sizeof(fpga_perf->perf_events[inner_loop].event_name),
"%s", (strstr(pglob.gl_pathv[i], attr)
+ strlen(attr) + 1)) < 0) {
OPAE_ERR("snprintf buffer overflow");
goto out;
}
for (loop = 0; loop < fpga_perf->num_format; loop++) {
PARSE_MATCH_INT(attr_value,
e_matches[loop + 1].rm_so, event, 16);
config |= event << fpga_perf->format_type[loop].shift;
}
fpga_perf->perf_events[inner_loop].config = config;
inner_loop++;
}
}
fclose(file);
}
globfree(&pglob);
return FPGA_OK;
out:
fclose(file);
globfree(&pglob);
return FPGA_EXCEPTION;
}
STATIC fpga_result fpga_perf_events(char* perf_sysfs_path, fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
struct udev *udev = NULL;
struct udev_device *dev = NULL;
int fd = 0;
int grpfd = 0;
uint64_t loop = 0;
struct perf_event_attr pea;
if (!perf_sysfs_path || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
/* create udev object */
udev = udev_new();
if (!udev) {
OPAE_ERR("Cannot create udev context");
return FPGA_EXCEPTION;
}
dev = udev_device_new_from_syspath(udev, perf_sysfs_path);
if (!dev) {
OPAE_ERR("Failed to get device");
udev_unref(udev);
return FPGA_EXCEPTION;
}
const char * ptr = udev_device_get_sysattr_value(dev, "cpumask");
if (ptr)
PARSE_MATCH_INT(ptr, 0, fpga_perf->cpumask, 10);
ptr = udev_device_get_sysattr_value(dev, "type");
if (ptr)
PARSE_MATCH_INT(ptr, 0, fpga_perf->type, 10);
/* parse the format value */
ret = parse_perf_attributes(dev, fpga_perf, "format");
if (ret != FPGA_OK)
goto out;
/* parse the event value */
ret = parse_perf_attributes(dev, fpga_perf, "events");
if (ret != FPGA_OK)
goto out;
/* initialize the pea structure to 0 */
memset(&pea, 0, sizeof(struct perf_event_attr));
for (loop = 0; loop < fpga_perf->num_perf_events; loop++) {
if (fpga_perf->perf_events[0].fd <= 0)
grpfd = -1;
else
grpfd = fpga_perf->perf_events[0].fd;
if (!fpga_perf->perf_events[loop].config)
continue;
pea.type = fpga_perf->type;
pea.size = sizeof(struct perf_event_attr);
pea.config = fpga_perf->perf_events[loop].config;
pea.disabled = 1;
pea.inherit = 1;
pea.sample_type = PERF_SAMPLE_IDENTIFIER;
pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID;
fd = syscall(__NR_perf_event_open, &pea, -1, fpga_perf->cpumask, grpfd, 0);
if (fd == -1) {
OPAE_ERR("Error opening leader %llx\n", pea.config);
ret = FPGA_EXCEPTION;
goto out;
} else {
fpga_perf->perf_events[loop].fd = fd;
if (ioctl(fpga_perf->perf_events[loop].fd, PERF_EVENT_IOC_ID,
&fpga_perf->perf_events[loop].id) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ID ioctl failed: %s",
strerror(errno));
ret = FPGA_EXCEPTION;
goto out;
}
}
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_RESET,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_RESET ioctl failed: %s", strerror(errno));
ret = FPGA_EXCEPTION;
goto out;
}
out:
udev_device_unref(dev);
udev_unref(udev);
return ret;
}
/* get fpga sbdf from token */
STATIC fpga_result get_fpga_sbdf(fpga_token token,
uint16_t *segment,
uint8_t *bus,
uint8_t *device,
uint8_t *function)
{
fpga_result res = FPGA_OK;
fpga_properties props = NULL;
if (!segment || !bus ||
!device || !function) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
res = fpgaGetProperties(token, &props);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get properties");
return res;
}
res = fpgaPropertiesGetBus(props, bus);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get bus");
return res;
}
res = fpgaPropertiesGetSegment(props, segment);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Segment");
return res;
}
res = fpgaPropertiesGetDevice(props, device);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Device");
return res;
}
res = fpgaPropertiesGetFunction(props, function);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Function");
return res;
}
return res;
}
/* Initialises magic number, mutex attributes and set the mutex attribute
* type to PTHREAD_MUTEX_RECURSIVE. Also initialises the mutex referenced by
* fpga_perf->lock with attributes specified by mutex attributes */
STATIC fpga_result fpga_perf_mutex_init(fpga_perf_counter *fpga_perf)
{
pthread_mutexattr_t mattr;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
fpga_perf->magic = FPGA_PERF_MAGIC;
if (pthread_mutexattr_init(&mattr)) {
OPAE_ERR("pthread_mutexattr_init() failed");
return FPGA_EXCEPTION;
}
if (pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE)) {
OPAE_ERR("pthread_mutexattr_settype() failed");
pthread_mutexattr_destroy(&mattr);
return FPGA_EXCEPTION;
}
if (pthread_mutex_init(&fpga_perf->lock, &mattr)) {
OPAE_ERR("pthread_mutex_init() failed");
pthread_mutexattr_destroy(&mattr);
return FPGA_EXCEPTION;
}
pthread_mutexattr_destroy(&mattr);
return FPGA_OK;
}
/* Reset the magic number and destroy the mutex created */
STATIC fpga_result fpga_perf_mutex_destroy(fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
int res = 0;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
ret = fpga_perf_check_and_lock(fpga_perf);
if (ret) {
OPAE_ERR("Failed to lock perf mutex");
return ret;
}
fpga_perf->magic = 0;
ret = opae_mutex_unlock(res, &fpga_perf->lock);
if (ret) {
OPAE_ERR("Failed to unlock perf mutex");
return ret;
}
ret = pthread_mutex_destroy(&fpga_perf->lock);
if (ret) {
OPAE_ERR("Failed to destroy pthread mutex destroy");
return ret;
}
return FPGA_OK;
}
fpga_result fpgaPerfCounterGet(fpga_token token, fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
int res = 0;
char sysfs_path[DFL_PERF_STR_MAX] = { 0 };
char sysfs_perf[DFL_PERF_STR_MAX] = { 0 };
int gres = 0;
uint32_t fpga_id = -1;
char *endptr = NULL;
glob_t pglob;
uint8_t bus = (uint8_t)-1;
uint16_t segment = (uint16_t)-1;
uint8_t device = (uint8_t)-1;
uint8_t function = (uint8_t)-1;
if (!token || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
memset(fpga_perf, 0, sizeof(fpga_perf_counter));
ret = get_fpga_sbdf(token, &segment, &bus, &device, &function);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to get sbdf");
return ret;
}
ret = fpga_perf_mutex_init(fpga_perf);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to initialize the mutex");
return ret;
}
/* when we bind with new device id we will get updated function value */
/* not able to read the sysfs path using that */
if(function)
function = 0;
if (snprintf(sysfs_path, sizeof(sysfs_path),
DFL_PERF_FME,
segment, bus, device, function) < 0) {
OPAE_ERR("snprintf buffer overflow");
return FPGA_EXCEPTION;
}
gres = glob(sysfs_path, GLOB_NOSORT, NULL, &pglob);
if (gres) {
OPAE_ERR("Failed pattern match %s: %s", sysfs_path, strerror(errno));
globfree(&pglob);
return FPGA_NOT_FOUND;
}
if (pglob.gl_pathc == 1) {
char *ptr = strstr(pglob.gl_pathv[0], "fme");
if (!ptr) {
ret = FPGA_INVALID_PARAM;
goto out;
}
errno = 0;
fpga_id = strtoul(ptr + 4, &endptr, 10);
if (snprintf(sysfs_perf, sizeof(sysfs_perf),
DFL_PERF_SYSFS"%d", fpga_id) < 0) {
OPAE_ERR("snprintf buffer overflow");
ret = FPGA_EXCEPTION;
goto out;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
ret = FPGA_EXCEPTION;
goto out;
}
if (snprintf(fpga_perf->dfl_fme_name, sizeof(fpga_perf->dfl_fme_name),
"dfl_fme%d", fpga_id) < 0) {
OPAE_ERR("snprintf buffer overflow");
opae_mutex_unlock(res, &fpga_perf->lock);
ret = FPGA_EXCEPTION;
goto out;
}
ret = fpga_perf_events(sysfs_perf, fpga_perf);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to parse fpga perf event");
opae_mutex_unlock(res, &fpga_perf->lock);
goto out;
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
ret = FPGA_EXCEPTION;
goto out;
}
} else {
ret = FPGA_NOT_FOUND;
goto out;
}
out:
globfree(&pglob);
return ret;
}
fpga_result fpgaPerfCounterStartRecord(fpga_perf_counter *fpga_perf)
{
uint64_t loop = 0;
uint64_t inner_loop = 0;
int res = 0;
char buf[DFL_PERF_STR_MAX] = { 0 };
struct read_format *rdft = (struct read_format *) buf;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_ENABLE,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ENABLE ioctl failed: %s",
strerror(errno));
goto out;
}
if (read(fpga_perf->perf_events[0].fd, rdft, sizeof(buf)) == -1) {
OPAE_ERR("read fpga perf counter failed");
goto out;
}
for (loop = 0; loop < (uint64_t)rdft->nr; loop++) {
for (inner_loop = 0; inner_loop < fpga_perf->num_perf_events;
inner_loop++) {
if (rdft->values[loop].id == fpga_perf->perf_events[inner_loop].id)
fpga_perf->perf_events[inner_loop].start_value = rdft->values[loop].value;
}
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
out:
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_EXCEPTION;
}
fpga_result fpgaPerfCounterStopRecord(fpga_perf_counter *fpga_perf)
{
char buf[DFL_PERF_STR_MAX] = { 0 };
uint64_t loop = 0;
uint64_t inner_loop = 0;
int res = 0;
struct read_format *rdft = (struct read_format *) buf;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_DISABLE,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ENABLE ioctl failed: %s",
strerror(errno));
goto out;
}
if (read(fpga_perf->perf_events[0].fd, rdft, sizeof(buf)) == -1) {
OPAE_ERR("read fpga perf counter failed");
goto out;
}
for (loop = 0; loop < (uint64_t)rdft->nr; loop++) {
for (inner_loop = 0; inner_loop < fpga_perf->num_perf_events;
inner_loop++) {
if (rdft->values[loop].id == fpga_perf->perf_events[inner_loop].id)
fpga_perf->perf_events[inner_loop].stop_value = rdft->values[loop].value;
}
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
out:
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_EXCEPTION;
}
fpga_result fpgaPerfCounterPrint(FILE *f, fpga_perf_counter *fpga_perf)
{
uint64_t loop = 0;
int res = 0;
if (!fpga_perf || !f) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
fprintf(f, "\n");
for (loop = 0; loop < fpga_perf->num_perf_events; loop++)
fprintf(f, "%s\t", fpga_perf->perf_events[loop].event_name);
fprintf(f, "\n");
for (loop = 0; loop < fpga_perf->num_perf_events; loop++) {
if (!fpga_perf->perf_events[loop].config)
continue;
fprintf(f, "%ld\t\t", (fpga_perf->perf_events[loop].stop_value
- fpga_perf->perf_events[loop].start_value));
}
fprintf(f, "\n");
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
fpga_result fpgaPerfCounterDestroy(fpga_perf_counter *fpga_perf)
{
int res = 0;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf->format_type) {
free(fpga_perf->format_type);
fpga_perf->format_type = NULL;
}
if (fpga_perf->perf_events) {
free(fpga_perf->perf_events);
fpga_perf->perf_events = NULL;
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf_mutex_destroy(fpga_perf) != FPGA_OK) {
OPAE_ERR("Failed to destroy the mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
| 1 | 20,838 | Hi Ramya, rather than checking here in the perf counter library, we should add the privilege check in the host_exerciser app. | OPAE-opae-sdk | c |
@@ -20,7 +20,9 @@ class TrackedObject:
"""
def __del__(self):
- notifyObjectDeletion(self)
+ # notifyObjectDeletion could be None if Python is shutting down.
+ if notifyObjectDeletion:
+ notifyObjectDeletion(self)
_collectionThreadID = 0 | 1 | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020 NV Access Limited
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import gc
import threading
from logHandler import log
""" Watches Python's cyclic garbage collector and reports questionable collections. """
class TrackedObject:
"""
An object that notifies garbageHandler when it is destructed,
so that NVDA can log when important unreachable objects are being deleted
by Python's cyclic garbage collector.
"""
def __del__(self):
notifyObjectDeletion(self)
_collectionThreadID = 0
_reportCountDuringCollection = 0
def initialize():
""" Initializes NVDA's garbage handler. """
# Instruct Python to keep all unreachable objects for later inspection
# gc.set_debug(gc.DEBUG_SAVEALL)
# Register a callback with Python's garbage collector
# That will notify us of the start and end of each collection run.
gc.callbacks.append(_collectionCallback)
def _collectionCallback(action, info):
global _collectionThreadID, _reportCountDuringCollection
if action == "start":
_collectionThreadID = threading.currentThread().ident
_reportCountDuringCollection = 0
elif action == "stop":
_collectionThreadID = 0
if _reportCountDuringCollection > 0:
log.error(f"Found at least {_reportCountDuringCollection} unreachable objects in run")
else:
log.error(f"Unknown action: {action}")
def notifyObjectDeletion(obj):
"""
Logs a message about the given object being deleted,
if it is due to Python's cyclic garbage collector.
"""
global _reportCountDuringCollection
if _collectionThreadID != threading.currentThread().ident:
return
_reportCountDuringCollection += 1
if _reportCountDuringCollection == 1:
log.warning(
"Garbage collector has found one or more unreachable objects. See further warnings for specific objects.",
stack_info=True
)
log.warning(f"Deleting unreachable object {obj}")
def terminate():
""" Terminates NVDA's garbage handler. """
gc.callbacks.remove(_collectionCallback)
| 1 | 31,729 | Could you elaborate on this some more? Has this behavior changed with Python 3.8? | nvaccess-nvda | py |
@@ -89,7 +89,10 @@ const logReqMsg = `DEBUG: Request %s/%s Details:
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf("Error dumping request: %s", err))
+ }
if logBody {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's | 1 | package client
import (
"fmt"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Config provides configuration to a service client instance.
type Config struct {
Config *aws.Config
Handlers request.Handlers
Endpoint, SigningRegion string
}
// ConfigProvider provides a generic way for a service client to receive
// the ClientConfig without circular dependencies.
type ConfigProvider interface {
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
}
// A Client implements the base client request and response handling
// used by all service clients.
type Client struct {
request.Retryer
metadata.ClientInfo
Config aws.Config
Handlers request.Handlers
}
// New will return a pointer to a new initialized service client.
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
svc := &Client{
Config: cfg,
ClientInfo: info,
Handlers: handlers,
}
switch retryer, ok := cfg.Retryer.(request.Retryer); {
case ok:
svc.Retryer = retryer
case cfg.Retryer != nil && cfg.Logger != nil:
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
cfg.Logger.Log(s)
fallthrough
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}
svc.AddDebugHandlers()
for _, option := range options {
option(svc)
}
return svc
}
// NewRequest returns a new Request pointer for the service API
// operation and parameters.
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
}
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFront(logRequest)
c.Handlers.Send.PushBack(logResponse)
}
const logReqMsg = `DEBUG: Request %s/%s Details:
---[ REQUEST POST-SIGN ]-----------------------------
%s
-----------------------------------------------------`
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if logBody {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
}
const logRespMsg = `DEBUG: Response %s/%s Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`
func logResponse(r *request.Request) {
var msg = "no response data"
if r.HTTPResponse != nil {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
msg = string(dumpedBody)
} else if r.Error != nil {
msg = r.Error.Error()
}
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
}
| 1 | 8,254 | Do we want to continue on logging the `dumpedBody`, if an error was thrown? It is probably an empty string. This would make the log after this one pretty much useless. | aws-aws-sdk-go | go |
@@ -1,6 +1,7 @@
<fieldset class="annotation_fields" id="<%= unique_dom_id(f.object, "fields") %>">
<div class="form-group col-md-10">
- <%= f.label(:type, f.object.type.humanize, class: "control-label") %>
+ <% lbl = f.object.type.humanize.downcase == 'example answer' ? _('Example answer') : _('Guidance') %>
+ <%= f.label(:type, lbl, class: "control-label") %>
<div data-toggle="tooltip" title="<%= tooltip_for_annotation_text(f.object) %>">
<em class="sr-only"><%= tooltip_for_annotation_text(f.object) %></em>
<%= f.text_area(:text, class: 'question', | 1 | <fieldset class="annotation_fields" id="<%= unique_dom_id(f.object, "fields") %>">
<div class="form-group col-md-10">
<%= f.label(:type, f.object.type.humanize, class: "control-label") %>
<div data-toggle="tooltip" title="<%= tooltip_for_annotation_text(f.object) %>">
<em class="sr-only"><%= tooltip_for_annotation_text(f.object) %></em>
<%= f.text_area(:text, class: 'question',
id: "question_annotations_attributes_#{unique_dom_id(f.object)}_text") %>
</div>
</div>
<%= f.hidden_field(:id) %>
<%= f.hidden_field(:org_id) %>
<%= f.hidden_field(:type) %>
<%= f.hidden_field(:_destroy) %>
</fieldset>
| 1 | 18,439 | the text here is derived from the object type and not getting properly handled by get text | DMPRoadmap-roadmap | rb |
@@ -27,10 +27,13 @@ const (
var (
// runtimeMode tells which mode antctl is running against.
runtimeMode string
+ inPod bool
)
func init() {
- if strings.HasPrefix(os.Getenv("POD_NAME"), "antrea-agent") {
+ podName, found := os.LookupEnv("POD_NAME")
+ inPod = found && (strings.HasPrefix(podName, "antrea-agent") || strings.HasPrefix(podName, "antrea-controller"))
+ if strings.HasPrefix(podName, "antrea-agent") {
runtimeMode = ModeAgent
} else {
runtimeMode = ModeController | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package antctl
import (
"os"
"strings"
)
const (
ModeController string = "controller"
ModeAgent string = "agent"
)
var (
// runtimeMode tells which mode antctl is running against.
runtimeMode string
)
func init() {
if strings.HasPrefix(os.Getenv("POD_NAME"), "antrea-agent") {
runtimeMode = ModeAgent
} else {
runtimeMode = ModeController
}
}
| 1 | 15,833 | This will make other pods that run antctl will also connect its localhost? Maybe only do it when it's in antrea-agent and antrea-controller | antrea-io-antrea | go |
@@ -168,9 +168,6 @@ define(["jQuery", "globalize", "scripts/taskbutton", "dom", "libraryMenu", "layo
}), menuItems.push({
name: "Xml TV",
id: "xmltv"
- }), menuItems.push({
- name: globalize.translate("ButtonOther"),
- id: "other"
}), require(["actionsheet"], function(actionsheet) {
actionsheet.show({
items: menuItems, | 1 | define(["jQuery", "globalize", "scripts/taskbutton", "dom", "libraryMenu", "layoutManager", "loading", "listViewStyle", "flexStyles", "emby-itemscontainer", "cardStyle", "material-icons", "emby-button"], function($, globalize, taskButton, dom, libraryMenu, layoutManager, loading) {
"use strict";
function getDeviceHtml(device) {
var padderClass, html = "",
cssClass = "card scalableCard",
cardBoxCssClass = "cardBox visualCardBox";
return cssClass += " backdropCard backdropCard-scalable", padderClass = "cardPadder-backdrop", layoutManager.tv && (cssClass += " card-focusscale", cardBoxCssClass += " cardBox-focustransform"), cardBoxCssClass += " card-focuscontent", html += '<div type="button" class="' + cssClass + '" data-id="' + device.Id + '">', html += '<div class="' + cardBoxCssClass + '">', html += '<div class="cardScalable visualCardBox-cardScalable">', html += '<div class="' + padderClass + '"></div>', html += '<div class="cardContent searchImage">', html += '<div class="cardImageContainer coveredImage"><i class="cardImageIcon md-icon">dvr</i></div>', html += "</div>", html += "</div>", html += '<div class="cardFooter visualCardBox-cardFooter">', html += '<button is="paper-icon-button-light" class="itemAction btnCardOptions autoSize" data-action="menu"><i class="md-icon">more_horiz</i></button>', html += '<div class="cardText">' + (device.FriendlyName || getTunerName(device.Type)) + "</div>", html += '<div class="cardText cardText-secondary">', html += device.Url || " ", html += "</div>", html += "</div>", html += "</div>", html += "</div>"
}
function renderDevices(page, devices) {
var html = devices.map(getDeviceHtml).join("");
page.querySelector(".devicesList").innerHTML = html
}
function deleteDevice(page, id) {
var message = globalize.translate("MessageConfirmDeleteTunerDevice");
require(["confirm"], function(confirm) {
confirm(message, globalize.translate("HeaderDeleteDevice")).then(function() {
loading.show(), ApiClient.ajax({
type: "DELETE",
url: ApiClient.getUrl("LiveTv/TunerHosts", {
Id: id
})
}).then(function() {
reload(page)
})
})
})
}
function reload(page) {
loading.show(), ApiClient.getNamedConfiguration("livetv").then(function(config) {
renderDevices(page, config.TunerHosts), renderProviders(page, config.ListingProviders)
}), loading.hide()
}
function submitAddDeviceForm(page) {
page.querySelector(".dlgAddDevice").close(), loading.show(), ApiClient.ajax({
type: "POST",
url: ApiClient.getUrl("LiveTv/TunerHosts"),
data: JSON.stringify({
Type: $("#selectTunerDeviceType", page).val(),
Url: $("#txtDevicePath", page).val()
}),
contentType: "application/json"
}).then(function() {
reload(page)
}, function() {
Dashboard.alert({
message: globalize.translate("ErrorAddingTunerDevice")
})
})
}
function renderProviders(page, providers) {
var html = "";
if (providers.length) {
html += '<div class="paperList">';
for (var i = 0, length = providers.length; i < length; i++) {
var provider = providers[i];
html += '<div class="listItem">', html += '<i class="listItemIcon md-icon">dvr</i>', html += '<div class="listItemBody two-line">', html += '<a is="emby-linkbutton" style="display:block;padding:0;margin:0;text-align:left;" class="clearLink" href="' + getProviderConfigurationUrl(provider.Type) + "&id=" + provider.Id + '">', html += '<h3 class="listItemBodyText">', html += getProviderName(provider.Type), html += "</h3>", html += '<div class="listItemBodyText secondary">', html += provider.Path || provider.ListingsId || "", html += "</div>", html += "</a>", html += "</div>", html += '<button type="button" is="paper-icon-button-light" class="btnOptions" data-id="' + provider.Id + '"><i class="md-icon listItemAside">more_horiz</i></button>', html += "</div>"
}
html += "</div>"
}
var elem = $(".providerList", page).html(html);
$(".btnOptions", elem).on("click", function() {
var id = this.getAttribute("data-id");
showProviderOptions(page, id, this)
})
}
function showProviderOptions(page, providerId, button) {
var items = [];
items.push({
name: globalize.translate("ButtonDelete"),
id: "delete"
}), items.push({
name: globalize.translate("MapChannels"),
id: "map"
}), require(["actionsheet"], function(actionsheet) {
actionsheet.show({
items: items,
positionTo: button
}).then(function(id) {
switch (id) {
case "delete":
deleteProvider(page, providerId);
break;
case "map":
mapChannels(page, providerId)
}
})
})
}
function mapChannels(page, providerId) {
require(["components/channelmapper/channelmapper"], function(channelmapper) {
new channelmapper({
serverId: ApiClient.serverInfo().Id,
providerId: providerId
}).show()
})
}
function deleteProvider(page, id) {
var message = globalize.translate("MessageConfirmDeleteGuideProvider");
require(["confirm"], function(confirm) {
confirm(message, globalize.translate("HeaderDeleteProvider")).then(function() {
loading.show(), ApiClient.ajax({
type: "DELETE",
url: ApiClient.getUrl("LiveTv/ListingProviders", {
Id: id
})
}).then(function() {
reload(page)
}, function() {
reload(page)
})
})
})
}
function getTunerName(providerId) {
switch (providerId = providerId.toLowerCase()) {
case "m3u":
return "M3U";
case "hdhomerun":
return "HDHomerun";
case "hauppauge":
return "Hauppauge";
case "satip":
return "DVB";
default:
return "Unknown"
}
}
function getProviderName(providerId) {
switch (providerId = providerId.toLowerCase()) {
case "schedulesdirect":
return "Schedules Direct";
case "xmltv":
return "Xml TV";
case "emby":
return "Emby Guide";
default:
return "Unknown"
}
}
function getProviderConfigurationUrl(providerId) {
switch (providerId = providerId.toLowerCase()) {
case "xmltv":
return "livetvguideprovider.html?type=xmltv";
case "schedulesdirect":
return "livetvguideprovider.html?type=schedulesdirect";
case "emby":
return "livetvguideprovider.html?type=emby"
}
}
function addProvider(button) {
var menuItems = [];
menuItems.push({
name: "Schedules Direct",
id: "SchedulesDirect"
}), menuItems.push({
name: "Xml TV",
id: "xmltv"
}), menuItems.push({
name: globalize.translate("ButtonOther"),
id: "other"
}), require(["actionsheet"], function(actionsheet) {
actionsheet.show({
items: menuItems,
positionTo: button,
callback: function(id) {
"other" == id ? Dashboard.alert({
message: globalize.translate("ForAdditionalLiveTvOptions")
}) : Dashboard.navigate(getProviderConfigurationUrl(id))
}
})
})
}
function addDevice(button) {
Dashboard.navigate("livetvtuner.html")
}
function showDeviceMenu(button, tunerDeviceId) {
var items = [];
items.push({
name: globalize.translate("ButtonDelete"),
id: "delete"
}), items.push({
name: globalize.translate("ButtonEdit"),
id: "edit"
}), require(["actionsheet"], function(actionsheet) {
actionsheet.show({
items: items,
positionTo: button
}).then(function(id) {
switch (id) {
case "delete":
deleteDevice(dom.parentWithClass(button, "page"), tunerDeviceId);
break;
case "edit":
Dashboard.navigate("livetvtuner.html?id=" + tunerDeviceId)
}
})
})
}
function onDevicesListClick(e) {
var card = dom.parentWithClass(e.target, "card");
if (card) {
var id = card.getAttribute("data-id"),
btnCardOptions = dom.parentWithClass(e.target, "btnCardOptions");
btnCardOptions ? showDeviceMenu(btnCardOptions, id) : Dashboard.navigate("livetvtuner.html?id=" + id)
}
}
$(document).on("pageinit", "#liveTvStatusPage", function() {
var page = this;
$(".btnAddDevice", page).on("click", function() {
addDevice(this)
}), $(".formAddDevice", page).on("submit", function() {
return submitAddDeviceForm(page), !1
}), $(".btnAddProvider", page).on("click", function() {
addProvider(this)
}), page.querySelector(".devicesList").addEventListener("click", onDevicesListClick)
}).on("pageshow", "#liveTvStatusPage", function() {
var page = this;
reload(page), taskButton({
mode: "on",
progressElem: page.querySelector(".refreshGuideProgress"),
taskKey: "RefreshGuide",
button: page.querySelector(".btnRefresh")
})
}).on("pagehide", "#liveTvStatusPage", function() {
var page = this;
taskButton({
mode: "off",
progressElem: page.querySelector(".refreshGuideProgress"),
taskKey: "RefreshGuide",
button: page.querySelector(".btnRefresh")
})
})
}); | 1 | 11,662 | The corresponding action for this key needs to be removed as well, along with any unused translations. | jellyfin-jellyfin-web | js |
@@ -48,15 +48,15 @@ namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AWS.SQS
var requestProxy = request.DuckCast<ISendMessageBatchRequest>();
- var scope = AwsSqsCommon.CreateScope(Tracer.Instance, Operation, out AwsSqsTags tags);
+ var scope = AwsSqsCommon.CreateScope(Tracer.InternalInstance, Operation, out AwsSqsTags tags);
tags.QueueUrl = requestProxy.QueueUrl;
- if (scope?.Span?.Context != null && requestProxy.Entries.Count > 0)
+ if (scope?.Span.Context is SpanContext spanContext && requestProxy.Entries.Count > 0)
{
for (int i = 0; i < requestProxy.Entries.Count; i++)
{
var entry = requestProxy.Entries[i].DuckCast<IContainsMessageAttributes>();
- ContextPropagation.InjectHeadersIntoMessage<TSendMessageBatchRequest>(entry, scope?.Span?.Context);
+ ContextPropagation.InjectHeadersIntoMessage<TSendMessageBatchRequest>(entry, spanContext);
}
}
| 1 | // <copyright file="SendMessageBatchAsyncIntegration.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.ComponentModel;
using System.Threading;
using Datadog.Trace.ClrProfiler.CallTarget;
using Datadog.Trace.DuckTyping;
using Datadog.Trace.Tagging;
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AWS.SQS
{
/// <summary>
/// AWSSDK.SQS SendMessageBatchAsync calltarget instrumentation
/// </summary>
[InstrumentMethod(
AssemblyName = "AWSSDK.SQS",
TypeName = "Amazon.SQS.AmazonSQSClient",
MethodName = "SendMessageBatchAsync",
ReturnTypeName = "System.Threading.Tasks.Task`1<Amazon.SQS.Model.SendMessageBatchResponse>",
ParameterTypeNames = new[] { "Amazon.SQS.Model.SendMessageBatchRequest", ClrNames.CancellationToken },
MinimumVersion = "3.0.0",
MaximumVersion = "3.*.*",
IntegrationName = AwsSqsCommon.IntegrationName)]
[Browsable(false)]
[EditorBrowsable(EditorBrowsableState.Never)]
public class SendMessageBatchAsyncIntegration
{
private const string Operation = "SendMessageBatch";
/// <summary>
/// OnMethodBegin callback
/// </summary>
/// <typeparam name="TTarget">Type of the target</typeparam>
/// <typeparam name="TSendMessageBatchRequest">Type of the request object</typeparam>
/// <param name="instance">Instance value, aka `this` of the instrumented method</param>
/// <param name="request">The request for the SQS operation</param>
/// <param name="cancellationToken">CancellationToken value</param>
/// <returns>Calltarget state value</returns>
public static CallTargetState OnMethodBegin<TTarget, TSendMessageBatchRequest>(TTarget instance, TSendMessageBatchRequest request, CancellationToken cancellationToken)
{
if (request is null)
{
return CallTargetState.GetDefault();
}
var requestProxy = request.DuckCast<ISendMessageBatchRequest>();
var scope = AwsSqsCommon.CreateScope(Tracer.Instance, Operation, out AwsSqsTags tags);
tags.QueueUrl = requestProxy.QueueUrl;
if (scope?.Span?.Context != null && requestProxy.Entries.Count > 0)
{
for (int i = 0; i < requestProxy.Entries.Count; i++)
{
var entry = requestProxy.Entries[i].DuckCast<IContainsMessageAttributes>();
ContextPropagation.InjectHeadersIntoMessage<TSendMessageBatchRequest>(entry, scope?.Span?.Context);
}
}
return new CallTargetState(scope);
}
/// <summary>
/// OnAsyncMethodEnd callback
/// </summary>
/// <typeparam name="TTarget">Type of the target</typeparam>
/// <typeparam name="TResponse">Type of the response, in an async scenario will be T of Task of T</typeparam>
/// <param name="instance">Instance value, aka `this` of the instrumented method.</param>
/// <param name="response">Response instance</param>
/// <param name="exception">Exception instance in case the original code threw an exception.</param>
/// <param name="state">Calltarget state value</param>
/// <returns>A response value, in an async scenario will be T of Task of T</returns>
public static TResponse OnAsyncMethodEnd<TTarget, TResponse>(TTarget instance, TResponse response, Exception exception, CallTargetState state)
{
state.Scope.DisposeWithException(exception);
return response;
}
}
}
| 1 | 24,081 | Can't Span cannot be null anymore? I assume it was a useless check as there are discrepencies within integrations, but as you explicitly removed this one, I was wondering | DataDog-dd-trace-dotnet | .cs |
@@ -210,7 +210,7 @@ namespace Nethermind.JsonRpc.Modules.Trace
Block block = blockSearch.Object;
- ParityLikeTxTrace txTrace = TraceTx(block, txHash, ParityTraceTypes.Trace | ParityTraceTypes.Rewards);
+ ParityLikeTxTrace txTrace = TraceTx(block, txHash, ParityTraceTypes.Trace);
return ResultWrapper<ParityTxTraceFromStore[]>.Success(ParityTxTraceFromStore.FromTxTrace(txTrace));
}
| 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using MathNet.Numerics.Distributions;
using Nethermind.Blockchain.Find;
using Nethermind.Blockchain.Receipts;
using Nethermind.Blockchain.Tracing;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Specs;
using Nethermind.Evm.Tracing;
using Nethermind.Evm.Tracing.ParityStyle;
using Nethermind.Int256;
using Nethermind.JsonRpc.Data;
using Nethermind.Logging;
using Nethermind.Serialization.Rlp;
namespace Nethermind.JsonRpc.Modules.Trace
{
public class TraceRpcModule : ITraceRpcModule
{
private readonly IReceiptFinder _receiptFinder;
private readonly ITracer _tracer;
private readonly IBlockFinder _blockFinder;
private readonly TxDecoder _txDecoder = new();
private readonly IJsonRpcConfig _jsonRpcConfig;
private readonly ILogManager _logManager;
private readonly ILogger _logger;
private readonly ISpecProvider _specProvider;
private readonly TimeSpan _cancellationTokenTimeout;
public TraceRpcModule(IReceiptFinder receiptFinder, ITracer tracer, IBlockFinder blockFinder, IJsonRpcConfig jsonRpcConfig, ISpecProvider specProvider ,ILogManager logManager)
{
_receiptFinder = receiptFinder ?? throw new ArgumentNullException(nameof(receiptFinder));
_tracer = tracer ?? throw new ArgumentNullException(nameof(tracer));
_blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder));
_jsonRpcConfig = jsonRpcConfig ?? throw new ArgumentNullException(nameof(jsonRpcConfig));
_specProvider = specProvider ?? throw new ArgumentNullException(nameof(specProvider));
_logManager = logManager ?? throw new ArgumentNullException(nameof(logManager));
_logger = logManager.GetClassLogger();
_cancellationTokenTimeout = TimeSpan.FromMilliseconds(_jsonRpcConfig.Timeout);
}
private static ParityTraceTypes GetParityTypes(string[] types)
{
return types.Select(s => (ParityTraceTypes) Enum.Parse(typeof(ParityTraceTypes), s, true)).Aggregate((t1, t2) => t1 | t2);
}
public ResultWrapper<ParityTxTraceFromReplay> trace_call(TransactionForRpc message, string[] traceTypes, BlockParameter blockParameter)
{
Transaction tx = message.ToTransaction();
return TraceTx(tx, traceTypes, blockParameter);
}
public ResultWrapper<ParityTxTraceFromReplay[]> trace_callMany((TransactionForRpc message, string[] traceTypes, BlockParameter numberOrTag)[] a)
{
throw new NotImplementedException();
}
public ResultWrapper<ParityTxTraceFromReplay> trace_rawTransaction(byte[] data, string[] traceTypes)
{
Transaction tx = _txDecoder.Decode(new RlpStream(data), RlpBehaviors.SkipTypedWrapping);
return TraceTx(tx, traceTypes, BlockParameter.Latest);
}
private ResultWrapper<ParityTxTraceFromReplay> TraceTx(Transaction tx, string[] traceTypes, BlockParameter blockParameter)
{
SearchResult<BlockHeader> headerSearch = _blockFinder.SearchForHeader(blockParameter);
if (headerSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromReplay>.Fail(headerSearch);
}
BlockHeader header = headerSearch.Object;
if (header.IsGenesis)
{
UInt256 baseFee = header.BaseFeePerGas;
header = new BlockHeader(
header.Hash,
Keccak.OfAnEmptySequenceRlp,
Address.Zero,
header.Difficulty,
header.Number + 1,
header.GasLimit,
header.Timestamp + 1,
header.ExtraData);
header.TotalDifficulty = 2 * header.Difficulty;
header.BaseFeePerGas = baseFee;
}
Block block = new(header, new[] {tx}, Enumerable.Empty<BlockHeader>());
IReadOnlyCollection<ParityLikeTxTrace> result = TraceBlock(block, GetParityTypes(traceTypes));
return ResultWrapper<ParityTxTraceFromReplay>.Success(new ParityTxTraceFromReplay(result.SingleOrDefault()));
}
public ResultWrapper<ParityTxTraceFromReplay> trace_replayTransaction(Keccak txHash, string[] traceTypes)
{
SearchResult<Keccak> blockHashSearch = _receiptFinder.SearchForReceiptBlockHash(txHash);
if (blockHashSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromReplay>.Fail(blockHashSearch);
}
SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(new BlockParameter(blockHashSearch.Object));
if (blockSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromReplay>.Fail(blockSearch);
}
Block block = blockSearch.Object;
ParityLikeTxTrace txTrace = TraceTx(block, txHash, GetParityTypes(traceTypes));
return ResultWrapper<ParityTxTraceFromReplay>.Success(new ParityTxTraceFromReplay(txTrace));
}
public ResultWrapper<ParityTxTraceFromReplay[]> trace_replayBlockTransactions(BlockParameter blockParameter, string[] traceTypes)
{
SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(blockParameter);
if (blockSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromReplay[]>.Fail(blockSearch);
}
Block block = blockSearch.Object;
IReadOnlyCollection<ParityLikeTxTrace> txTraces = TraceBlock(block, GetParityTypes(traceTypes));
// ReSharper disable once CoVariantArrayConversion
return ResultWrapper<ParityTxTraceFromReplay[]>.Success(txTraces.Select(t => new ParityTxTraceFromReplay(t, true)).ToArray());
}
public ResultWrapper<ParityTxTraceFromStore[]> trace_filter(TraceFilterForRpc traceFilterForRpc)
{
TxTraceFilter txTracerFilter = new(traceFilterForRpc.FromAddress, traceFilterForRpc.ToAddress, traceFilterForRpc.After, traceFilterForRpc.Count, _specProvider, _logManager);
List<ParityLikeTxTrace> txTraces = new();
IEnumerable<SearchResult<Block>> blocksSearch =
_blockFinder.SearchForBlocksOnMainChain(traceFilterForRpc.FromBlock ?? BlockParameter.Latest, traceFilterForRpc.ToBlock ?? BlockParameter.Latest);
foreach (SearchResult<Block> blockSearch in blocksSearch)
{
if (!txTracerFilter.ShouldContinue())
break;
if (blockSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockSearch);
}
Block block = blockSearch.Object;
if (!txTracerFilter.ShouldTraceBlock(block))
continue;
IReadOnlyCollection<ParityLikeTxTrace> txTracesFromOneBlock =
TraceBlock(block, ParityTraceTypes.Trace | ParityTraceTypes.Rewards, txTracerFilter);
txTraces.AddRange(txTracesFromOneBlock);
}
return ResultWrapper<ParityTxTraceFromStore[]>.Success(txTraces.SelectMany(ParityTxTraceFromStore.FromTxTrace).ToArray());
}
public ResultWrapper<ParityTxTraceFromStore[]> trace_block(BlockParameter blockParameter)
{
SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(blockParameter);
if (blockSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockSearch);
}
Block block = blockSearch.Object;
IReadOnlyCollection<ParityLikeTxTrace> txTraces = TraceBlock(block, ParityTraceTypes.Trace | ParityTraceTypes.Rewards);
return ResultWrapper<ParityTxTraceFromStore[]>.Success(txTraces.SelectMany(ParityTxTraceFromStore.FromTxTrace).ToArray());
}
public ResultWrapper<ParityTxTraceFromStore[]> trace_get(Keccak txHash, int[] positions)
{
throw new NotImplementedException();
}
public ResultWrapper<ParityTxTraceFromStore[]> trace_transaction(Keccak txHash)
{
SearchResult<Keccak> blockHashSearch = _receiptFinder.SearchForReceiptBlockHash(txHash);
if (blockHashSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockHashSearch);
}
SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(new BlockParameter(blockHashSearch.Object));
if (blockSearch.IsError)
{
return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockSearch);
}
Block block = blockSearch.Object;
ParityLikeTxTrace txTrace = TraceTx(block, txHash, ParityTraceTypes.Trace | ParityTraceTypes.Rewards);
return ResultWrapper<ParityTxTraceFromStore[]>.Success(ParityTxTraceFromStore.FromTxTrace(txTrace));
}
private IReadOnlyCollection<ParityLikeTxTrace> TraceBlock(Block block, ParityTraceTypes traceTypes, TxTraceFilter txTraceFilter = null)
{
using CancellationTokenSource cancellationTokenSource = new(_cancellationTokenTimeout);
CancellationToken cancellationToken = cancellationTokenSource.Token;
ParityLikeBlockTracer listener = new(traceTypes, txTraceFilter, _specProvider);
_tracer.Trace(block, listener.WithCancellation(cancellationToken));
return listener.BuildResult();
}
private ParityLikeTxTrace TraceTx(Block block, Keccak txHash, ParityTraceTypes traceTypes)
{
using CancellationTokenSource cancellationTokenSource = new(_cancellationTokenTimeout);
CancellationToken cancellationToken = cancellationTokenSource.Token;
ParityLikeBlockTracer listener = new(txHash, traceTypes);
_tracer.Trace(block, listener.WithCancellation(cancellationToken));
return listener.BuildResult().SingleOrDefault();
}
}
}
| 1 | 26,145 | hmmm, ok, this is interesting -> Lukasz definitely uses rewards traces | NethermindEth-nethermind | .cs |
@@ -78,6 +78,9 @@ public class TableProperties {
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
+ public static final String SPLIT_BY_PARTITION = "read.split.by-partition";
+ public static final boolean SPLIT_BY_PARTITION_DEFAULT = false;
+
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = 536870912; // 512 MB
public static final String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED = "write.spark.fanout.enabled";
public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
public static final String WRITE_DISTRIBUTION_MODE = "write.distribution-mode";
public static final String WRITE_DISTRIBUTION_MODE_NONE = "none";
public static final String WRITE_DISTRIBUTION_MODE_HASH = "hash";
public static final String WRITE_DISTRIBUTION_MODE_RANGE = "range";
public static final String WRITE_DISTRIBUTION_MODE_DEFAULT = WRITE_DISTRIBUTION_MODE_NONE;
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms";
public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days
public static final String MIN_SNAPSHOTS_TO_KEEP = "history.expire.min-snapshots-to-keep";
public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT = 1;
public static final String DELETE_ISOLATION_LEVEL = "write.delete.isolation-level";
public static final String DELETE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String DELETE_MODE = "write.delete.mode";
public static final String DELETE_MODE_DEFAULT = "copy-on-write";
public static final String UPDATE_ISOLATION_LEVEL = "write.update.isolation-level";
public static final String UPDATE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String UPDATE_MODE = "write.update.mode";
public static final String UPDATE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_ISOLATION_LEVEL = "write.merge.isolation-level";
public static final String MERGE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String MERGE_MODE = "write.merge.mode";
public static final String MERGE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_CARDINALITY_CHECK_ENABLED = "write.merge.cardinality-check.enabled";
public static final boolean MERGE_CARDINALITY_CHECK_ENABLED_DEFAULT = true;
}
| 1 | 34,209 | Why do we need this flag? can this be detected if the table is bucketed/partitioned and enabled/disabled automatically? Is this for backwards compatibility? | apache-iceberg | java |
@@ -188,7 +188,7 @@ bool is_name_type(const char* name)
{
if(*name == '$')
name++;
-
+
if(*name == '_')
name++;
| 1 | #include "ponyassert.h"
#include "id.h"
#include "id_internal.h"
bool check_id(pass_opt_t* opt, ast_t* id_node, const char* desc, int spec)
{
pony_assert(id_node != NULL);
pony_assert(ast_id(id_node) == TK_ID);
pony_assert(desc != NULL);
const char* name = ast_name(id_node);
pony_assert(name != NULL);
char prev = '\0';
// Ignore leading $, handled by lexer
if(*name == '$')
{
// We assume this variable has been placed by someone or something who
// knows what they are doing
return true;
}
// Ignore leading _
if(*name == '_')
{
name++;
prev = '_';
if(*name == '\0')
{
if((spec & ALLOW_DONTCARE) == 0)
{
ast_error(opt->check.errors, id_node,
"%s name cannot be \"%s\"", desc, ast_name(id_node));
return false;
}
return true;
}
if((spec & ALLOW_LEADING_UNDERSCORE) == 0)
{
ast_error(opt->check.errors, id_node,
"%s name \"%s\" cannot start with underscores", desc,
ast_name(id_node));
return false;
}
}
if((spec & START_LOWER) != 0 && (*name < 'a' || *name > 'z'))
{
if ((spec & ALLOW_LEADING_UNDERSCORE) == 0) {
ast_error(opt->check.errors, id_node,
"%s name \"%s\" must start a-z", desc, ast_name(id_node));
return false;
} else {
ast_error(opt->check.errors, id_node,
"%s name \"%s\" must start a-z or _(a-z)", desc, ast_name(id_node));
return false;
}
}
if((spec & START_UPPER) != 0 && (*name < 'A' || *name > 'Z'))
{
if ((spec & ALLOW_LEADING_UNDERSCORE) == 0) {
ast_error(opt->check.errors, id_node,
"%s name \"%s\" must start A-Z", desc, ast_name(id_node));
return false;
} else {
ast_error(opt->check.errors, id_node,
"%s name \"%s\" must start A-Z or _(A-Z)", desc, ast_name(id_node));
return false;
}
}
// Check each character looking for ticks and underscores
for(; *name != '\0' && *name != '\''; name++)
{
if(*name == '_')
{
if((spec & ALLOW_UNDERSCORE) == 0)
{
ast_error(opt->check.errors, id_node,
"%s name \"%s\" cannot contain underscores", desc, ast_name(id_node));
return false;
}
if(prev == '_')
{
ast_error(opt->check.errors, id_node,
"%s name \"%s\" cannot contain double underscores", desc,
ast_name(id_node));
return false;
}
}
prev = *name;
}
// Only ticks (or nothing) left
// Check for ending with _
if(prev == '_')
{
ast_error(opt->check.errors, id_node,
"%s name \"%s\" cannot have a trailing underscore", desc, ast_name(id_node));
return false;
}
if(*name == '\0')
return true;
// Should only be ticks left
pony_assert(*name == '\'');
if((spec & ALLOW_TICK) == 0)
{
ast_error(opt->check.errors, id_node,
"%s name \"%s\" cannot contain prime (')", desc, ast_name(id_node));
return false;
}
for(; *name != '\0'; name++)
{
if(*name != '\'')
{
ast_error(opt->check.errors, id_node,
"prime(') can only appear at the end of %s name \"%s\"", desc,
ast_name(id_node));
return false;
}
}
return true;
}
bool check_id_type(pass_opt_t* opt, ast_t* id_node, const char* entity_desc)
{
// _?[A-Z][A-Za-z0-9]*
return check_id(opt, id_node, entity_desc,
START_UPPER | ALLOW_LEADING_UNDERSCORE);
}
bool check_id_type_param(pass_opt_t* opt, ast_t* id_node)
{
// [A-Z][A-Za-z0-9]*
return check_id(opt, id_node, "type parameter",
START_UPPER);
}
bool check_id_package(pass_opt_t* opt, ast_t* id_node)
{
// [a-z][A-Za-z0-9_]* (and no double or trailing underscores)
return check_id(opt, id_node, "package",
START_LOWER | ALLOW_UNDERSCORE);
}
bool check_id_field(pass_opt_t* opt, ast_t* id_node)
{
// _?[a-z][A-Za-z0-9_]* (and no double or trailing underscores)
return check_id(opt, id_node, "field",
START_LOWER | ALLOW_LEADING_UNDERSCORE | ALLOW_UNDERSCORE | ALLOW_TICK);
}
bool check_id_method(pass_opt_t* opt, ast_t* id_node)
{
// _?[a-z][A-Za-z0-9_]* (and no double or trailing underscores)
return check_id(opt, id_node, "method",
START_LOWER | ALLOW_LEADING_UNDERSCORE | ALLOW_UNDERSCORE);
}
bool check_id_param(pass_opt_t* opt, ast_t* id_node)
{
// [a-z][A-Za-z0-9_]*'* (and no double or trailing underscores)
return check_id(opt, id_node, "parameter",
START_LOWER | ALLOW_UNDERSCORE | ALLOW_TICK);
}
bool check_id_local(pass_opt_t* opt, ast_t* id_node)
{
// (_|[a-z][A-Za-z0-9_]*'*) (and no double or trailing underscores)
return check_id(opt, id_node, "local variable",
START_LOWER | ALLOW_UNDERSCORE | ALLOW_TICK | ALLOW_DONTCARE);
}
bool is_name_type(const char* name)
{
if(*name == '$')
name++;
if(*name == '_')
name++;
return (*name >= 'A') && (*name <= 'Z');
}
bool is_name_private(const char* name)
{
return ((name[0] == '_') && (name[1] != '\0')) ||
(is_name_internal_test(name) && (name[1] == '_'));
}
bool is_name_ffi(const char* name)
{
return name[0] == '@';
}
bool is_name_internal_test(const char* name)
{
return name[0] == '$';
}
bool is_name_dontcare(const char* name)
{
return (name[0] == '_') && (name[1] == '\0');
}
| 1 | 13,816 | can you revert changes to this file. | ponylang-ponyc | c |
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Package soc provides the single-owner chunk implemenation
+// and validator.
package soc
import ( | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package soc
import (
"bytes"
"errors"
"fmt"
"github.com/ethersphere/bee/pkg/bmtpool"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/swarm"
)
const (
IdSize = 32
SignatureSize = 65
AddressSize = crypto.AddressSize
minChunkSize = IdSize + SignatureSize + swarm.SpanSize
)
// Id is a soc identifier
type Id []byte
// Owner is a wrapper that enforces valid length address of soc owner.
type Owner struct {
address []byte
}
// NewOwner creates a new Owner.
func NewOwner(address []byte) (*Owner, error) {
if len(address) != AddressSize {
return nil, fmt.Errorf("invalid address %x", address)
}
return &Owner{
address: address,
}, nil
}
// Soc wraps a single soc.
type Soc struct {
id Id
signature []byte
signer crypto.Signer
owner *Owner
Chunk swarm.Chunk
}
// NewChunk is a convenience function to create a single-owner chunk ready to be sent
// on the network.
func NewChunk(id Id, ch swarm.Chunk, signer crypto.Signer) (swarm.Chunk, error) {
s := New(id, ch)
err := s.AddSigner(signer)
if err != nil {
return nil, err
}
return s.ToChunk()
}
// New creates a new Soc representation from arbitrary soc id and
// a content-addressed chunk.
//
// By default the span of the soc data is set to the length
// of the payload.
func New(id Id, ch swarm.Chunk) *Soc {
return &Soc{
id: id,
Chunk: ch,
}
}
// WithOwnerAddress provides the possibility of setting the ethereum
// address for the owner of an soc in the absence of a signer.
func (s *Soc) WithOwnerAddress(ownerAddress *Owner) *Soc {
s.owner = ownerAddress
return s
}
// AddSigner currently sets a single signer for the soc.
//
// This method will overwrite any value set with WithOwnerAddress with
// the address derived from the given signer.
func (s *Soc) AddSigner(signer crypto.Signer) error {
publicKey, err := signer.PublicKey()
if err != nil {
return err
}
ownerAddressBytes, err := crypto.NewEthereumAddress(*publicKey)
if err != nil {
return err
}
ownerAddress, err := NewOwner(ownerAddressBytes)
if err != nil {
return err
}
s.signer = signer
s.owner = ownerAddress
return nil
}
// OwnerAddress returns the ethereum address of the signer of the Chunk.
func (s *Soc) OwnerAddress() []byte {
return s.owner.address
}
// Address returns the soc Chunk address.
func (s *Soc) Address() (swarm.Address, error) {
return CreateAddress(s.id, s.owner)
}
// FromChunk recreates an Soc representation from swarm.Chunk data.
func FromChunk(sch swarm.Chunk) (*Soc, error) {
chunkData := sch.Data()
if len(chunkData) < minChunkSize {
return nil, errors.New("less than minimum length")
}
// add all the data fields
s := &Soc{}
cursor := 0
s.id = chunkData[cursor : cursor+IdSize]
cursor += IdSize
s.signature = chunkData[cursor : cursor+SignatureSize]
cursor += SignatureSize
spanBytes := chunkData[cursor : cursor+swarm.SpanSize]
cursor += swarm.SpanSize
ch, err := contentAddressedChunk(chunkData[cursor:], spanBytes)
if err != nil {
return nil, err
}
toSignBytes, err := toSignDigest(s.id, ch.Address().Bytes())
if err != nil {
return nil, err
}
// recover owner information
recoveredEthereumAddress, err := recoverAddress(s.signature, toSignBytes)
if err != nil {
return nil, err
}
owner, err := NewOwner(recoveredEthereumAddress)
if err != nil {
return nil, err
}
s.owner = owner
s.Chunk = ch
return s, nil
}
// ToChunk generates a signed chunk payload ready for submission to the swarm network.
//
// The method will fail if no signer has been added.
func (s *Soc) ToChunk() (swarm.Chunk, error) {
var err error
if s.signer == nil {
return nil, errors.New("signer missing")
}
// generate the data to sign
toSignBytes, err := toSignDigest(s.id, s.Chunk.Address().Bytes())
if err != nil {
return nil, err
}
// sign the chunk
signature, err := s.signer.Sign(toSignBytes)
if err != nil {
return nil, err
}
// prepare the payload
buf := bytes.NewBuffer(nil)
buf.Write(s.id)
buf.Write(signature)
buf.Write(s.Chunk.Data())
// create chunk
socAddress, err := s.Address()
if err != nil {
return nil, err
}
return swarm.NewChunk(socAddress, buf.Bytes()), nil
}
// toSignDigest creates a digest suitable for signing to represent the soc.
func toSignDigest(id Id, sum []byte) ([]byte, error) {
h := swarm.NewHasher()
_, err := h.Write(id)
if err != nil {
return nil, err
}
_, err = h.Write(sum)
if err != nil {
return nil, err
}
return h.Sum(nil), nil
}
// CreateAddress creates a new soc address from the soc id and the ethereum address of the signer
func CreateAddress(id Id, owner *Owner) (swarm.Address, error) {
h := swarm.NewHasher()
_, err := h.Write(id)
if err != nil {
return swarm.ZeroAddress, err
}
_, err = h.Write(owner.address)
if err != nil {
return swarm.ZeroAddress, err
}
sum := h.Sum(nil)
return swarm.NewAddress(sum), nil
}
// recoverOwner returns the ethereum address of the owner of an soc.
func recoverAddress(signature, digest []byte) ([]byte, error) {
recoveredPublicKey, err := crypto.Recover(signature, digest)
if err != nil {
return nil, err
}
recoveredEthereumAddress, err := crypto.NewEthereumAddress(*recoveredPublicKey)
if err != nil {
return nil, err
}
return recoveredEthereumAddress, nil
}
func contentAddressedChunk(data, spanBytes []byte) (swarm.Chunk, error) {
hasher := bmtpool.Get()
defer bmtpool.Put(hasher)
// execute hash, compare and return result
err := hasher.SetSpanBytes(spanBytes)
if err != nil {
return nil, err
}
_, err = hasher.Write(data)
if err != nil {
return nil, err
}
s := hasher.Sum(nil)
payload := append(spanBytes, data...)
address := swarm.NewAddress(s)
return swarm.NewChunk(address, payload), nil
}
| 1 | 13,700 | suggestion to add: An soc is a chunks whose address is derived of (...) | ethersphere-bee | go |
@@ -110,6 +110,8 @@ export const API_ERROR = {
DEPRECATED_BASIC_HEADER: 'basic authentication is deprecated, please use JWT instead',
BAD_FORMAT_USER_GROUP: 'user groups is different than an array',
RESOURCE_UNAVAILABLE: 'resource unavailable',
+ USERNAME_PASSWORD_REQUIRED: 'username and password is required',
+ USERNAME_ALREADY_REGISTERED: 'username is already registered',
};
export const APP_ERROR = { | 1 | /**
* @prettier
*/
// @flow
export const DEFAULT_PORT: string = '4873';
export const DEFAULT_PROTOCOL: string = 'http';
export const DEFAULT_DOMAIN: string = 'localhost';
export const TIME_EXPIRATION_24H: string = '24h';
export const TIME_EXPIRATION_7D: string = '7d';
export const DIST_TAGS = 'dist-tags';
export const keyPem = 'verdaccio-key.pem';
export const certPem = 'verdaccio-cert.pem';
export const csrPem = 'verdaccio-csr.pem';
export const HEADERS = {
JSON: 'application/json',
CONTENT_TYPE: 'Content-type',
FORWARDED_PROTO: 'X-Forwarded-Proto',
ETAG: 'ETag',
JSON_CHARSET: 'application/json; charset=utf-8',
OCTET_STREAM: 'application/octet-stream; charset=utf-8',
TEXT_CHARSET: 'text/plain; charset=utf-8',
WWW_AUTH: 'WWW-Authenticate',
GZIP: 'gzip',
};
export const CHARACTER_ENCODING = {
UTF8: 'utf8',
};
export const HEADER_TYPE = {
CONTENT_ENCODING: 'content-encoding',
CONTENT_TYPE: 'content-type',
CONTENT_LENGTH: 'content-length',
ACCEPT_ENCODING: 'accept-encoding',
};
export const ERROR_CODE = {
token_required: 'token is required',
};
export const TOKEN_BASIC = 'Basic';
export const TOKEN_BEARER = 'Bearer';
export const DEFAULT_REGISTRY = 'https://registry.npmjs.org';
export const DEFAULT_UPLINK = 'npmjs';
export const ROLES = {
$ALL: '$all',
ALL: 'all',
$AUTH: '$authenticated',
$ANONYMOUS: '$anonymous',
DEPRECATED_ALL: '@all',
DEPRECATED_AUTH: '@authenticated',
DEPRECATED_ANONYMOUS: '@anonymous',
};
export const HTTP_STATUS = {
OK: 200,
CREATED: 201,
MULTIPLE_CHOICES: 300,
NOT_MODIFIED: 304,
BAD_REQUEST: 400,
UNAUTHORIZED: 401,
FORBIDDEN: 403,
NOT_FOUND: 404,
CONFLICT: 409,
UNSUPPORTED_MEDIA: 415,
BAD_DATA: 422,
INTERNAL_ERROR: 500,
SERVICE_UNAVAILABLE: 503,
LOOP_DETECTED: 508,
};
export const API_MESSAGE = {
PKG_CREATED: 'created new package',
PKG_CHANGED: 'package changed',
PKG_REMOVED: 'package removed',
PKG_PUBLISHED: 'package published',
TARBALL_REMOVED: 'tarball removed',
TAG_UPDATED: 'tags updated',
TAG_REMOVED: 'tag removed',
TAG_ADDED: 'package tagged',
LOGGED_OUT: 'Logged out',
};
export const API_ERROR = {
PLUGIN_ERROR: 'bug in the auth plugin system',
CONFIG_BAD_FORMAT: 'config file must be an object',
BAD_USERNAME_PASSWORD: 'bad username/password, access denied',
NO_PACKAGE: 'no such package available',
BAD_DATA: 'bad data',
NOT_ALLOWED: 'not allowed to access package',
INTERNAL_SERVER_ERROR: 'internal server error',
UNKNOWN_ERROR: 'unknown error',
NOT_PACKAGE_UPLINK: 'package does not exist on uplink',
UPLINK_OFFLINE_PUBLISH: 'one of the uplinks is down, refuse to publish',
UPLINK_OFFLINE: 'uplink is offline',
CONTENT_MISMATCH: 'content length mismatch',
NOT_FILE_UPLINK: "file doesn't exist on uplink",
MAX_USERS_REACHED: 'maximum amount of users reached',
VERSION_NOT_EXIST: "this version doesn't exist",
FILE_NOT_FOUND: 'File not found',
BAD_STATUS_CODE: 'bad status code',
PACKAGE_EXIST: 'this package is already present',
BAD_AUTH_HEADER: 'bad authorization header',
WEB_DISABLED: 'Web interface is disabled in the config file',
DEPRECATED_BASIC_HEADER: 'basic authentication is deprecated, please use JWT instead',
BAD_FORMAT_USER_GROUP: 'user groups is different than an array',
RESOURCE_UNAVAILABLE: 'resource unavailable',
};
export const APP_ERROR = {
CONFIG_NOT_VALID: 'CONFIG: it does not look like a valid config file',
};
export const DEFAULT_NO_README = 'ERROR: No README data found!';
export const MODULE_NOT_FOUND = 'MODULE_NOT_FOUND';
export const WEB_TITLE = 'Verdaccio';
export const PACKAGE_ACCESS = {
SCOPE: '@*/*',
ALL: '**',
};
export const UPDATE_BANNER = {
CHANGELOG_URL: 'https://github.com/verdaccio/verdaccio/releases/tag/',
};
export const STORAGE = {
PACKAGE_FILE_NAME: 'package.json',
FILE_EXIST_ERROR: 'EEXISTS',
NO_SUCH_FILE_ERROR: 'ENOENT',
DEFAULT_REVISION: '0-0000000000000000',
};
| 1 | 19,492 | It doesn't make sense. if we are going to have the profile page where the user can change the password and he / she is already logged in ... I do not need to register a new username. only: new password..confirm new password..something similar...and "USERNAME_ALREADY_REGISTERED" only if I have a register page..are we going to have? We need to talk about it. | verdaccio-verdaccio | js |
@@ -747,14 +747,14 @@ namespace Nethermind.State
_needsStateRootUpdate = false;
}
- public void CommitTree()
+ public void CommitTree(long blockNumber)
{
if (_needsStateRootUpdate)
{
RecalculateStateRoot();
}
- _tree.Commit();
+ _tree.Commit(blockNumber);
}
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Resettables;
using Nethermind.Core.Specs;
using Nethermind.Db;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Logging;
using Nethermind.Trie;
using Metrics = Nethermind.Db.Metrics;
[assembly: InternalsVisibleTo("Nethermind.State.Test")]
[assembly: InternalsVisibleTo("Nethermind.Benchmark")]
[assembly: InternalsVisibleTo("Nethermind.Blockchain.Test")]
[assembly: InternalsVisibleTo("Nethermind.Synchronization.Test")]
namespace Nethermind.State
{
public class StateProvider : IStateProvider
{
private const int StartCapacity = Resettable.StartCapacity;
private ResettableDictionary<Address, Stack<int>> _intraBlockCache = new ResettableDictionary<Address, Stack<int>>();
private ResettableHashSet<Address> _committedThisRound = new ResettableHashSet<Address>();
private readonly List<Change> _keptInCache = new List<Change>();
private readonly ILogger _logger;
private readonly IDb _codeDb;
private readonly ILogManager _logManager;
private int _capacity = StartCapacity;
private Change[] _changes = new Change[StartCapacity];
private int _currentPosition = -1;
public StateProvider(StateTree stateTree, IDb codeDb, ILogManager logManager)
{
_logManager = logManager ?? throw new ArgumentNullException(nameof(logManager));
_logger = logManager.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
_codeDb = codeDb ?? throw new ArgumentNullException(nameof(codeDb));
_tree = stateTree ?? throw new ArgumentNullException(nameof(stateTree));
}
public StateProvider(ISnapshotableDb stateDb, IDb codeDb, ILogManager logManager)
: this(new StateTree(stateDb), codeDb, logManager)
{
}
public void Accept(ITreeVisitor visitor, Keccak stateRoot)
{
if (visitor == null) throw new ArgumentNullException(nameof(visitor));
if (stateRoot == null) throw new ArgumentNullException(nameof(stateRoot));
_tree.Accept(visitor, stateRoot, true);
}
public string DumpState()
{
TreeDumper dumper = new TreeDumper();
_tree.Accept(dumper, _tree.RootHash, true);
return dumper.ToString();
}
public TrieStats CollectStats()
{
TrieStatsCollector collector = new TrieStatsCollector(_codeDb, _logManager);
_tree.Accept(collector, _tree.RootHash, true);
return collector.Stats;
}
private bool _needsStateRootUpdate;
public void RecalculateStateRoot()
{
_tree.UpdateRootHash();
_needsStateRootUpdate = false;
}
public Keccak StateRoot
{
get
{
if (_needsStateRootUpdate)
{
throw new InvalidOperationException();
}
return _tree.RootHash;
}
set => _tree.RootHash = value;
}
private readonly StateTree _tree;
public bool AccountExists(Address address)
{
if (_intraBlockCache.ContainsKey(address))
{
return _changes[_intraBlockCache[address].Peek()].ChangeType != ChangeType.Delete;
}
return GetAndAddToCache(address) != null;
}
public bool IsEmptyAccount(Address address)
{
return GetThroughCache(address).IsEmpty;
}
public Account GetAccount(Address address)
{
return GetThroughCache(address);
}
public bool IsDeadAccount(Address address)
{
Account account = GetThroughCache(address);
return account?.IsEmpty ?? true;
}
public UInt256 GetNonce(Address address)
{
Account account = GetThroughCache(address);
return account?.Nonce ?? UInt256.Zero;
}
public Keccak GetStorageRoot(Address address)
{
Account account = GetThroughCache(address);
return account.StorageRoot;
}
public UInt256 GetBalance(Address address)
{
Account account = GetThroughCache(address);
return account?.Balance ?? UInt256.Zero;
}
public void UpdateCodeHash(Address address, Keccak codeHash, IReleaseSpec releaseSpec)
{
_needsStateRootUpdate = true;
Account account = GetThroughCache(address);
if (account.CodeHash != codeHash)
{
if (_logger.IsTrace) _logger.Trace($" Update {address} C {account.CodeHash} -> {codeHash}");
Account changedAccount = account.WithChangedCodeHash(codeHash);
PushUpdate(address, changedAccount);
}
else if (releaseSpec.IsEip158Enabled)
{
if (_logger.IsTrace) _logger.Trace($" Touch {address} (code hash)");
Account touched = GetThroughCache(address);
if (touched.IsEmpty)
{
PushTouch(address, touched, releaseSpec, touched.Balance.IsZero);
}
}
}
private void SetNewBalance(Address address, in UInt256 balanceChange, IReleaseSpec releaseSpec, bool isSubtracting)
{
_needsStateRootUpdate = true;
Account GetThroughCacheCheckExists()
{
Account result = GetThroughCache(address);
if (result == null)
{
if (_logger.IsError) _logger.Error("Updating balance of a non-existing account");
throw new InvalidOperationException("Updating balance of a non-existing account");
}
return result;
}
bool isZero = balanceChange.IsZero;
if (isZero)
{
if (releaseSpec.IsEip158Enabled)
{
Account touched = GetThroughCacheCheckExists();
if (_logger.IsTrace) _logger.Trace($" Touch {address} (balance)");
if (touched.IsEmpty)
{
PushTouch(address, touched, releaseSpec, isZero);
}
}
return;
}
Account account = GetThroughCacheCheckExists();
if (isSubtracting && account.Balance < balanceChange)
{
throw new InsufficientBalanceException();
}
UInt256 newBalance = isSubtracting ? account.Balance - balanceChange : account.Balance + balanceChange;
Account changedAccount = account.WithChangedBalance(newBalance);
if (_logger.IsTrace) _logger.Trace($" Update {address} B {account.Balance} -> {newBalance} ({(isSubtracting ? "-" : "+")}{balanceChange})");
PushUpdate(address, changedAccount);
}
public void SubtractFromBalance(Address address, in UInt256 balanceChange, IReleaseSpec releaseSpec)
{
_needsStateRootUpdate = true;
SetNewBalance(address, balanceChange, releaseSpec, true);
}
public void AddToBalance(Address address, in UInt256 balanceChange, IReleaseSpec releaseSpec)
{
_needsStateRootUpdate = true;
SetNewBalance(address, balanceChange, releaseSpec, false);
}
/// <summary>
/// This is a coupling point between storage provider and state provider.
/// This is pointing at the architectural change likely required where Storage and State Provider are represented by a single world state class.
/// </summary>
/// <param name="address"></param>
/// <param name="storageRoot"></param>
public void UpdateStorageRoot(Address address, Keccak storageRoot)
{
_needsStateRootUpdate = true;
Account account = GetThroughCache(address);
if (account.StorageRoot != storageRoot)
{
if (_logger.IsTrace) _logger.Trace($" Update {address} S {account.StorageRoot} -> {storageRoot}");
Account changedAccount = account.WithChangedStorageRoot(storageRoot);
PushUpdate(address, changedAccount);
}
}
public void IncrementNonce(Address address)
{
_needsStateRootUpdate = true;
Account account = GetThroughCache(address);
Account changedAccount = account.WithChangedNonce(account.Nonce + 1);
if (_logger.IsTrace) _logger.Trace($" Update {address} N {account.Nonce} -> {changedAccount.Nonce}");
PushUpdate(address, changedAccount);
}
public void DecrementNonce(Address address)
{
_needsStateRootUpdate = true;
Account account = GetThroughCache(address);
Account changedAccount = account.WithChangedNonce(account.Nonce - 1);
if (_logger.IsTrace) _logger.Trace($" Update {address} N {account.Nonce} -> {changedAccount.Nonce}");
PushUpdate(address, changedAccount);
}
public Keccak UpdateCode(byte[] code)
{
_needsStateRootUpdate = true;
if (code.Length == 0)
{
return Keccak.OfAnEmptyString;
}
Keccak codeHash = Keccak.Compute(code);
_codeDb[codeHash.Bytes] = code;
return codeHash;
}
public Keccak GetCodeHash(Address address)
{
Account account = GetThroughCache(address);
return account?.CodeHash ?? Keccak.OfAnEmptyString;
}
public byte[] GetCode(Keccak codeHash)
{
return codeHash == Keccak.OfAnEmptyString ? Array.Empty<byte>() : _codeDb[codeHash.Bytes];
}
public byte[] GetCode(Address address)
{
Account account = GetThroughCache(address);
if (account == null)
{
return Array.Empty<byte>();
}
return GetCode(account.CodeHash);
}
public void DeleteAccount(Address address)
{
_needsStateRootUpdate = true;
PushDelete(address);
}
public int TakeSnapshot()
{
if (_logger.IsTrace) _logger.Trace($"State snapshot {_currentPosition}");
return _currentPosition;
}
public void Restore(int snapshot)
{
if (snapshot > _currentPosition)
{
throw new InvalidOperationException($"{nameof(StateProvider)} tried to restore snapshot {snapshot} beyond current position {_currentPosition}");
}
if (_logger.IsTrace) _logger.Trace($"Restoring state snapshot {snapshot}");
if (snapshot == _currentPosition)
{
return;
}
for (int i = 0; i < _currentPosition - snapshot; i++)
{
Change change = _changes[_currentPosition - i];
if (_intraBlockCache[change.Address].Count == 1)
{
if (change.ChangeType == ChangeType.JustCache)
{
int actualPosition = _intraBlockCache[change.Address].Pop();
if (actualPosition != _currentPosition - i)
{
throw new InvalidOperationException($"Expected actual position {actualPosition} to be equal to {_currentPosition} - {i}");
}
_keptInCache.Add(change);
_changes[actualPosition] = null;
continue;
}
}
_changes[_currentPosition - i] = null; // TODO: temp, ???
int forChecking = _intraBlockCache[change.Address].Pop();
if (forChecking != _currentPosition - i)
{
throw new InvalidOperationException($"Expected checked value {forChecking} to be equal to {_currentPosition} - {i}");
}
if (_intraBlockCache[change.Address].Count == 0)
{
_intraBlockCache.Remove(change.Address);
}
}
_currentPosition = snapshot;
foreach (Change kept in _keptInCache)
{
_currentPosition++;
_changes[_currentPosition] = kept;
_intraBlockCache[kept.Address].Push(_currentPosition);
}
_keptInCache.Clear();
}
public void CreateAccount(Address address, in UInt256 balance)
{
_needsStateRootUpdate = true;
if (_logger.IsTrace) _logger.Trace($"Creating account: {address} with balance {balance}");
Account account = balance.IsZero ? Account.TotallyEmpty : new Account(balance);
PushNew(address, account);
}
public void Commit(IReleaseSpec releaseSpec)
{
Commit(releaseSpec, null);
}
private struct ChangeTrace
{
public ChangeTrace(Account before, Account after)
{
After = after;
Before = before;
}
public ChangeTrace(Account after)
{
After = after;
Before = null;
}
public Account Before { get; }
public Account After { get; }
}
public void Commit(IReleaseSpec releaseSpec, IStateTracer stateTracer)
{
if (_currentPosition == -1)
{
if (_logger.IsTrace) _logger.Trace(" no state changes to commit");
return;
}
if (_logger.IsTrace) _logger.Trace($"Committing state changes (at {_currentPosition})");
if (_changes[_currentPosition] == null)
{
throw new InvalidOperationException($"Change at current position {_currentPosition} was null when commiting {nameof(StateProvider)}");
}
if (_changes[_currentPosition + 1] != null)
{
throw new InvalidOperationException($"Change after current position ({_currentPosition} + 1) was not null when commiting {nameof(StateProvider)}");
}
bool isTracing = stateTracer != null;
Dictionary<Address, ChangeTrace> trace = null;
if (isTracing)
{
trace = new Dictionary<Address, ChangeTrace>();
}
for (int i = 0; i <= _currentPosition; i++)
{
Change change = _changes[_currentPosition - i];
if (!isTracing && change.ChangeType == ChangeType.JustCache)
{
continue;
}
if (_committedThisRound.Contains(change.Address))
{
if (isTracing && change.ChangeType == ChangeType.JustCache)
{
trace[change.Address] = new ChangeTrace(change.Account, trace[change.Address].After);
}
continue;
}
// because it was not committed yet it means that the just cache is the only state (so it was read only)
if (isTracing && change.ChangeType == ChangeType.JustCache)
{
_readsForTracing.Add(change.Address);
continue;
}
int forAssertion = _intraBlockCache[change.Address].Pop();
if (forAssertion != _currentPosition - i)
{
throw new InvalidOperationException($"Expected checked value {forAssertion} to be equal to {_currentPosition} - {i}");
}
_committedThisRound.Add(change.Address);
switch (change.ChangeType)
{
case ChangeType.JustCache:
{
break;
}
case ChangeType.Touch:
case ChangeType.Update:
{
if (releaseSpec.IsEip158Enabled && change.Account.IsEmpty)
{
if (_logger.IsTrace) _logger.Trace($" Commit remove empty {change.Address} B = {change.Account.Balance} N = {change.Account.Nonce}");
SetState(change.Address, null);
if (isTracing)
{
trace[change.Address] = new ChangeTrace(null);
}
}
else
{
if (_logger.IsTrace) _logger.Trace($" Commit update {change.Address} B = {change.Account.Balance} N = {change.Account.Nonce} C = {change.Account.CodeHash}");
SetState(change.Address, change.Account);
if (isTracing)
{
trace[change.Address] = new ChangeTrace(change.Account);
}
}
break;
}
case ChangeType.New:
{
if (!releaseSpec.IsEip158Enabled || !change.Account.IsEmpty)
{
if (_logger.IsTrace) _logger.Trace($" Commit create {change.Address} B = {change.Account.Balance} N = {change.Account.Nonce}");
SetState(change.Address, change.Account);
if (isTracing)
{
trace[change.Address] = new ChangeTrace(change.Account);
}
}
break;
}
case ChangeType.Delete:
{
if (_logger.IsTrace) _logger.Trace($" Commit remove {change.Address}");
bool wasItCreatedNow = false;
while (_intraBlockCache[change.Address].Count > 0)
{
int previousOne = _intraBlockCache[change.Address].Pop();
wasItCreatedNow |= _changes[previousOne].ChangeType == ChangeType.New;
if (wasItCreatedNow)
{
break;
}
}
if (!wasItCreatedNow)
{
SetState(change.Address, null);
if (isTracing)
{
trace[change.Address] = new ChangeTrace(null);
}
}
break;
}
default:
throw new ArgumentOutOfRangeException();
}
}
if (isTracing)
{
foreach (Address nullRead in _readsForTracing)
{
// // this may be enough, let us write tests
stateTracer.ReportAccountRead(nullRead);
}
}
Resettable<Change>.Reset(ref _changes, ref _capacity, ref _currentPosition, StartCapacity);
_committedThisRound.Reset();
_readsForTracing.Clear();
_intraBlockCache.Reset();
if (isTracing)
{
ReportChanges(stateTracer, trace);
}
}
private void ReportChanges(IStateTracer stateTracer, Dictionary<Address, ChangeTrace> trace)
{
foreach ((Address address, ChangeTrace change) in trace)
{
bool someChangeReported = false;
Account before = change.Before;
Account after = change.After;
UInt256? beforeBalance = before?.Balance;
UInt256? afterBalance = after?.Balance;
UInt256? beforeNonce = before?.Nonce;
UInt256? afterNonce = after?.Nonce;
Keccak beforeCodeHash = before?.CodeHash;
Keccak afterCodeHash = after?.CodeHash;
if (beforeCodeHash != afterCodeHash)
{
byte[] beforeCode = beforeCodeHash == null
? null
: beforeCodeHash == Keccak.OfAnEmptyString
? Array.Empty<byte>()
: _codeDb.Get(beforeCodeHash);
byte[] afterCode = afterCodeHash == null
? null
: afterCodeHash == Keccak.OfAnEmptyString
? Array.Empty<byte>()
: _codeDb.Get(afterCodeHash);
if (!((beforeCode?.Length ?? 0) == 0 && (afterCode?.Length ?? 0) == 0))
{
stateTracer.ReportCodeChange(address, beforeCode, afterCode);
}
someChangeReported = true;
}
if (afterBalance != beforeBalance)
{
stateTracer.ReportBalanceChange(address, beforeBalance, afterBalance);
someChangeReported = true;
}
if (afterNonce != beforeNonce)
{
stateTracer.ReportNonceChange(address, beforeNonce, afterNonce);
someChangeReported = true;
}
if (!someChangeReported)
{
stateTracer.ReportAccountRead(address);
}
}
}
private Account GetState(Address address)
{
Metrics.StateTreeReads++;
Account account = _tree.Get(address);
return account;
}
private void SetState(Address address, Account account)
{
_needsStateRootUpdate = true;
Metrics.StateTreeWrites++;
_tree.Set(address, account);
}
private HashSet<Address> _readsForTracing = new HashSet<Address>();
private Account GetAndAddToCache(Address address)
{
Account account = GetState(address);
if (account != null)
{
PushJustCache(address, account);
}
else
{
// just for tracing - potential perf hit, maybe a better solution?
_readsForTracing.Add(address);
}
return account;
}
private Account GetThroughCache(Address address)
{
if (_intraBlockCache.ContainsKey(address))
{
return _changes[_intraBlockCache[address].Peek()].Account;
}
Account account = GetAndAddToCache(address);
return account;
}
private void PushJustCache(Address address, Account account)
{
Push(ChangeType.JustCache, address, account);
}
private void PushUpdate(Address address, Account account)
{
Push(ChangeType.Update, address, account);
}
private void PushTouch(Address address, Account account, IReleaseSpec releaseSpec, bool isZero)
{
if (isZero && releaseSpec.IsEip158IgnoredAccount(address)) return;
Push(ChangeType.Touch, address, account);
}
private void PushDelete(Address address)
{
Push(ChangeType.Delete, address, null);
}
private void Push(ChangeType changeType, Address address, Account touchedAccount)
{
SetupCache(address);
if (changeType == ChangeType.Touch
&& _changes[_intraBlockCache[address].Peek()].ChangeType == ChangeType.Touch)
{
return;
}
IncrementChangePosition();
_intraBlockCache[address].Push(_currentPosition);
_changes[_currentPosition] = new Change(changeType, address, touchedAccount);
}
private void PushNew(Address address, Account account)
{
SetupCache(address);
IncrementChangePosition();
_intraBlockCache[address].Push(_currentPosition);
_changes[_currentPosition] = new Change(ChangeType.New, address, account);
}
private void IncrementChangePosition()
{
Resettable<Change>.IncrementPosition(ref _changes, ref _capacity, ref _currentPosition);
}
private void SetupCache(Address address)
{
if (!_intraBlockCache.ContainsKey(address))
{
_intraBlockCache[address] = new Stack<int>();
}
}
private enum ChangeType
{
JustCache,
Touch,
Update,
New,
Delete
}
private class Change
{
public Change(ChangeType type, Address address, Account account)
{
ChangeType = type;
Address = address;
Account = account;
}
public ChangeType ChangeType { get; }
public Address Address { get; }
public Account Account { get; }
}
public void Reset()
{
if (_logger.IsTrace) _logger.Trace("Clearing state provider caches");
_intraBlockCache.Reset();
_committedThisRound.Reset();
_readsForTracing.Clear();
_currentPosition = -1;
Array.Clear(_changes, 0, _changes.Length);
_needsStateRootUpdate = false;
}
public void CommitTree()
{
if (_needsStateRootUpdate)
{
RecalculateStateRoot();
}
_tree.Commit();
}
}
} | 1 | 24,272 | not a great fan of this change - maybe worth splitting into CommitTree() and CloseBlock(long blockNumber)? | NethermindEth-nethermind | .cs |
@@ -65,7 +65,9 @@ namespace Microsoft.AspNetCore.Connections
public CancellationToken ConnectionClosed { get; set; }
- public virtual void Abort()
+ public void Abort() => Abort(abortReason: null);
+
+ public virtual void Abort(ConnectionAbortedException abortReason)
{
ThreadPool.QueueUserWorkItem(cts => ((CancellationTokenSource)cts).Cancel(), _connectionClosedTokenSource);
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO.Pipelines;
using System.Security.Claims;
using System.Threading;
using Microsoft.AspNetCore.Connections.Features;
using Microsoft.AspNetCore.Http.Features;
namespace Microsoft.AspNetCore.Connections
{
public class DefaultConnectionContext : ConnectionContext,
IDisposable,
IConnectionIdFeature,
IConnectionItemsFeature,
IConnectionTransportFeature,
IConnectionUserFeature,
IConnectionLifetimeFeature
{
private CancellationTokenSource _connectionClosedTokenSource = new CancellationTokenSource();
public DefaultConnectionContext() :
this(Guid.NewGuid().ToString())
{
ConnectionClosed = _connectionClosedTokenSource.Token;
}
/// <summary>
/// Creates the DefaultConnectionContext without Pipes to avoid upfront allocations.
/// The caller is expected to set the <see cref="Transport"/> and <see cref="Application"/> pipes manually.
/// </summary>
/// <param name="id"></param>
public DefaultConnectionContext(string id)
{
ConnectionId = id;
Features = new FeatureCollection();
Features.Set<IConnectionUserFeature>(this);
Features.Set<IConnectionItemsFeature>(this);
Features.Set<IConnectionIdFeature>(this);
Features.Set<IConnectionTransportFeature>(this);
Features.Set<IConnectionLifetimeFeature>(this);
}
public DefaultConnectionContext(string id, IDuplexPipe transport, IDuplexPipe application)
: this(id)
{
Transport = transport;
Application = application;
}
public override string ConnectionId { get; set; }
public override IFeatureCollection Features { get; }
public ClaimsPrincipal User { get; set; }
public override IDictionary<object, object> Items { get; set; } = new ConnectionItems();
public IDuplexPipe Application { get; set; }
public override IDuplexPipe Transport { get; set; }
public CancellationToken ConnectionClosed { get; set; }
public virtual void Abort()
{
ThreadPool.QueueUserWorkItem(cts => ((CancellationTokenSource)cts).Cancel(), _connectionClosedTokenSource);
}
public void Dispose()
{
_connectionClosedTokenSource.Dispose();
}
}
}
| 1 | 15,815 | Ugh, if we're going to make a breaking change, I'd like this to be moved to ConnectionContext. | aspnet-KestrelHttpServer | .cs |
@@ -1286,6 +1286,12 @@ func NewFs(path string) (Fs, error) {
if err != nil {
return nil, err
}
+ // TODO: Fix import for rc
+ // f, err := fsInfo.NewFs(configName, fsPath, config)
+ // if err != nil && f.reload != nil {
+ // rc.AddOptionReload(f.name, &f.opt, f.reload)
+ // }
+ // return f, err
return fsInfo.NewFs(configName, fsPath, config)
}
| 1 | // Package fs is a generic file system interface for rclone object storage systems
package fs
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
)
// EntryType can be associated with remote paths to identify their type
type EntryType int
// Constants
const (
// ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
// MaxLevel is a sentinel representing an infinite depth for listings
MaxLevel = math.MaxInt32
// EntryDirectory should be used to classify remote paths in directories
EntryDirectory EntryType = iota // 0
// EntryObject should be used to classify remote paths in objects
EntryObject // 1
)
// Globals
var (
// Filesystem registry
Registry []*RegInfo
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
ErrorCantPurge = errors.New("can't purge directory")
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
ErrorCantMove = errors.New("can't move object - incompatible remotes")
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
ErrorCantUploadEmptyFiles = errors.New("can't upload empty files to this remote")
ErrorDirExists = errors.New("can't copy directory - destination already exists")
ErrorCantSetModTime = errors.New("can't set modified time")
ErrorCantSetModTimeWithoutDelete = errors.New("can't set modified time without deleting existing object")
ErrorDirNotFound = errors.New("directory not found")
ErrorObjectNotFound = errors.New("object not found")
ErrorLevelNotSupported = errors.New("level value not supported")
ErrorListAborted = errors.New("list aborted")
ErrorListBucketRequired = errors.New("bucket or container name is needed in remote")
ErrorIsFile = errors.New("is a file not a directory")
ErrorNotAFile = errors.New("is not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
ErrorDirectoryNotEmpty = errors.New("directory not empty")
ErrorImmutableModified = errors.New("immutable file modified")
ErrorPermissionDenied = errors.New("permission denied")
ErrorCantShareDirectories = errors.New("this backend can't share directories with link")
ErrorNotImplemented = errors.New("optional feature not implemented")
)
// RegInfo provides information about a filesystem
type RegInfo struct {
// Name of this fs
Name string
// Description of this fs - defaults to Name
Description string
// Prefix for command line flags for this fs - defaults to Name if not set
Prefix string
// Create a new file system. If root refers to an existing
// object, then it should return a Fs which which points to
// the parent of that object and ErrorIsFile.
NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
// Function to call to help with config
Config func(name string, config configmap.Mapper) `json:"-"`
// Options for the Fs configuration
Options Options
}
// FileName returns the on disk file name for this backend
func (ri *RegInfo) FileName() string {
return strings.Replace(ri.Name, " ", "", -1)
}
// Options is a slice of configuration Option for a backend
type Options []Option
// Set the default values for the options
func (os Options) setValues() {
for i := range os {
o := &os[i]
if o.Default == nil {
o.Default = ""
}
}
}
// Get the Option corresponding to name or return nil if not found
func (os Options) Get(name string) *Option {
for i := range os {
opt := &os[i]
if opt.Name == name {
return opt
}
}
return nil
}
// OptionVisibility controls whether the options are visible in the
// configurator or the command line.
type OptionVisibility byte
// Constants Option.Hide
const (
OptionHideCommandLine OptionVisibility = 1 << iota
OptionHideConfigurator
OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
)
// Option is describes an option for the config wizard
//
// This also describes command line options and environment variables
type Option struct {
Name string // name of the option in snake_case
Help string // Help, the first line only is used for the command line help
Provider string // Set to filter on provider
Default interface{} // default value, nil => ""
Value interface{} // value to be set by flags
Examples OptionExamples `json:",omitempty"` // config examples
ShortOpt string // the short option for this if required
Hide OptionVisibility // set this to hide the config from the configurator or the command line
Required bool // this option is required
IsPassword bool // set if the option is a password
NoPrefix bool // set if the option for this should not use the backend prefix
Advanced bool // set if this is an advanced config option
}
// BaseOption is an alias for Option used internally
type BaseOption Option
// MarshalJSON turns an Option into JSON
//
// It adds some generated fields for ease of use
// - DefaultStr - a string rendering of Default
// - ValueStr - a string rendering of Value
// - Type - the type of the option
func (o *Option) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
BaseOption
DefaultStr string
ValueStr string
Type string
}{
BaseOption: BaseOption(*o),
DefaultStr: fmt.Sprint(o.Default),
ValueStr: o.String(),
Type: o.Type(),
})
}
// GetValue gets the current current value which is the default if not set
func (o *Option) GetValue() interface{} {
val := o.Value
if val == nil {
val = o.Default
if val == nil {
val = ""
}
}
return val
}
// String turns Option into a string
func (o *Option) String() string {
return fmt.Sprint(o.GetValue())
}
// Set a Option from a string
func (o *Option) Set(s string) (err error) {
newValue, err := configstruct.StringToInterface(o.GetValue(), s)
if err != nil {
return err
}
o.Value = newValue
return nil
}
// Type of the value
func (o *Option) Type() string {
return reflect.TypeOf(o.GetValue()).Name()
}
// FlagName for the option
func (o *Option) FlagName(prefix string) string {
name := strings.Replace(o.Name, "_", "-", -1) // convert snake_case to kebab-case
if !o.NoPrefix {
name = prefix + "-" + name
}
return name
}
// EnvVarName for the option
func (o *Option) EnvVarName(prefix string) string {
return OptionToEnv(prefix + "-" + o.Name)
}
// OptionExamples is a slice of examples
type OptionExamples []OptionExample
// Len is part of sort.Interface.
func (os OptionExamples) Len() int { return len(os) }
// Swap is part of sort.Interface.
func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
// Less is part of sort.Interface.
func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
// Sort sorts an OptionExamples
func (os OptionExamples) Sort() { sort.Sort(os) }
// OptionExample describes an example for an Option
type OptionExample struct {
Value string
Help string
Provider string
}
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *RegInfo) {
info.Options.setValues()
if info.Prefix == "" {
info.Prefix = info.Name
}
Registry = append(Registry, info)
}
// Fs is the interface a cloud storage system must provide
type Fs interface {
Info
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
List(ctx context.Context, dir string) (entries DirEntries, err error)
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
NewObject(ctx context.Context, remote string) (Object, error)
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
Put(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
Mkdir(ctx context.Context, dir string) error
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
Rmdir(ctx context.Context, dir string) error
}
// Info provides a read only interface to information about a filesystem.
type Info interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
// Precision of the ModTimes in this Fs
Precision() time.Duration
// Returns the supported hash types of the filesystem
Hashes() hash.Set
// Features returns the optional features of this Fs
Features() *Features
}
// Object is a filesystem like object provided by an Fs
type Object interface {
ObjectInfo
// SetModTime sets the metadata on the object to set the modification date
SetModTime(ctx context.Context, t time.Time) error
// Open opens the file for read. Call Close() on the returned io.ReadCloser
Open(ctx context.Context, options ...OpenOption) (io.ReadCloser, error)
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
Update(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) error
// Removes this object
Remove(ctx context.Context) error
}
// ObjectInfo provides read only information about an object.
type ObjectInfo interface {
DirEntry
// Fs returns read only access to the Fs that this object is part of
Fs() Info
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
Hash(ctx context.Context, ty hash.Type) (string, error)
// Storable says whether this object can be stored
Storable() bool
}
// DirEntry provides read only information about the common subset of
// a Dir or Object. These are returned from directory listings - type
// assert them into the correct type.
type DirEntry interface {
// String returns a description of the Object
String() string
// Remote returns the remote path
Remote() string
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime(context.Context) time.Time
// Size returns the size of the file
Size() int64
}
// Directory is a filesystem like directory provided by an Fs
type Directory interface {
DirEntry
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
Items() int64
// ID returns the internal ID of this directory if known, or
// "" otherwise
ID() string
}
// MimeTyper is an optional interface for Object
type MimeTyper interface {
// MimeType returns the content type of the Object if
// known, or "" if not
MimeType(ctx context.Context) string
}
// IDer is an optional interface for Object
type IDer interface {
// ID returns the ID of the Object if known, or "" if not
ID() string
}
// ObjectUnWrapper is an optional interface for Object
type ObjectUnWrapper interface {
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
UnWrap() Object
}
// SetTierer is an optional interface for Object
type SetTierer interface {
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
SetTier(tier string) error
}
// GetTierer is an optional interface for Object
type GetTierer interface {
// GetTier returns storage tier or class of the Object
GetTier() string
}
// FullObjectInfo contains all the read-only optional interfaces
//
// Use for checking making wrapping ObjectInfos implement everything
type FullObjectInfo interface {
ObjectInfo
MimeTyper
IDer
ObjectUnWrapper
GetTierer
}
// FullObject contains all the optional interfaces for Object
//
// Use for checking making wrapping Objects implement everything
type FullObject interface {
Object
MimeTyper
IDer
ObjectUnWrapper
GetTierer
SetTierer
}
// ObjectOptionalInterfaces returns the names of supported and
// unsupported optional interfaces for an Object
func ObjectOptionalInterfaces(o Object) (supported, unsupported []string) {
store := func(ok bool, name string) {
if ok {
supported = append(supported, name)
} else {
unsupported = append(unsupported, name)
}
}
_, ok := o.(MimeTyper)
store(ok, "MimeType")
_, ok = o.(IDer)
store(ok, "ID")
_, ok = o.(ObjectUnWrapper)
store(ok, "UnWrap")
_, ok = o.(SetTierer)
store(ok, "SetTier")
_, ok = o.(GetTierer)
store(ok, "GetTier")
return supported, unsupported
}
// ListRCallback defines a callback function for ListR to use
//
// It is called for each tranche of entries read from the listing and
// if it returns an error, the listing stops.
type ListRCallback func(entries DirEntries) error
// ListRFn is defines the call used to recursively list a directory
type ListRFn func(ctx context.Context, dir string, callback ListRCallback) error
// NewUsageValue makes a valid value
func NewUsageValue(value int64) *int64 {
p := new(int64)
*p = value
return p
}
// Usage is returned by the About call
//
// If a value is nil then it isn't supported by that backend
type Usage struct {
Total *int64 `json:"total,omitempty"` // quota of bytes that can be used
Used *int64 `json:"used,omitempty"` // bytes in use
Trashed *int64 `json:"trashed,omitempty"` // bytes in trash
Other *int64 `json:"other,omitempty"` // other usage eg gmail in drive
Free *int64 `json:"free,omitempty"` // bytes which can be uploaded before reaching the quota
Objects *int64 `json:"objects,omitempty"` // objects in the storage system
}
// WriterAtCloser wraps io.WriterAt and io.Closer
type WriterAtCloser interface {
io.WriterAt
io.Closer
}
// Features describe the optional features of the Fs
type Features struct {
// Feature flags, whether Fs
CaseInsensitive bool // has case insensitive files
DuplicateFiles bool // allows duplicate files
ReadMimeType bool // can read the mime type of objects
WriteMimeType bool // can set the mime type of objects
CanHaveEmptyDirectories bool // can have empty directories
BucketBased bool // is bucket based (like s3, swift etc)
BucketBasedRootOK bool // is bucket based and can use from root
SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
IsLocal bool // is the local backend
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge func(ctx context.Context) error
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy func(ctx context.Context, src Object, remote string) (Object, error)
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move func(ctx context.Context, src Object, remote string) (Object, error)
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove func(ctx context.Context, src Fs, srcRemote, dstRemote string) error
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
ChangeNotify func(context.Context, func(string, EntryType), <-chan time.Duration)
// UnWrap returns the Fs that this Fs is wrapping
UnWrap func() Fs
// WrapFs returns the Fs that is wrapping this Fs
WrapFs func() Fs
// SetWrapper sets the Fs that is wrapping this Fs
SetWrapper func(f Fs)
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
DirCacheFlush func()
// PublicLink generates a public link to the remote path (usually readable by anyone)
PublicLink func(ctx context.Context, remote string) (string, error)
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
PutUnchecked func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
PutStream func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
MergeDirs func(ctx context.Context, dirs []Directory) error
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
CleanUp func(ctx context.Context) error
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
ListR ListRFn
// About gets quota information from the Fs
About func(ctx context.Context) (*Usage, error)
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
OpenWriterAt func(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
// UserInfo returns info about the connected user
UserInfo func(ctx context.Context) (map[string]string, error)
// Disconnect the current user
Disconnect func(ctx context.Context) error
}
// Disable nil's out the named feature. If it isn't found then it
// will log a message.
func (ft *Features) Disable(name string) *Features {
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
for i := 0; i < v.NumField(); i++ {
vName := vType.Field(i).Name
field := v.Field(i)
if strings.EqualFold(name, vName) {
if !field.CanSet() {
Errorf(nil, "Can't set Feature %q", name)
} else {
zero := reflect.Zero(field.Type())
field.Set(zero)
Debugf(nil, "Reset feature %q", name)
}
}
}
return ft
}
// List returns a slice of all the possible feature names
func (ft *Features) List() (out []string) {
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
for i := 0; i < v.NumField(); i++ {
out = append(out, vType.Field(i).Name)
}
return out
}
// Enabled returns a map of features with keys showing whether they
// are enabled or not
func (ft *Features) Enabled() (features map[string]bool) {
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
features = make(map[string]bool, v.NumField())
for i := 0; i < v.NumField(); i++ {
vName := vType.Field(i).Name
field := v.Field(i)
if field.Kind() == reflect.Func {
// Can't compare functions
features[vName] = !field.IsNil()
} else {
zero := reflect.Zero(field.Type())
features[vName] = field.Interface() != zero.Interface()
}
}
return features
}
// DisableList nil's out the comma separated list of named features.
// If it isn't found then it will log a message.
func (ft *Features) DisableList(list []string) *Features {
for _, feature := range list {
ft.Disable(strings.TrimSpace(feature))
}
return ft
}
// Fill fills in the function pointers in the Features struct from the
// optional interfaces. It returns the original updated Features
// struct passed in.
func (ft *Features) Fill(f Fs) *Features {
if do, ok := f.(Purger); ok {
ft.Purge = do.Purge
}
if do, ok := f.(Copier); ok {
ft.Copy = do.Copy
}
if do, ok := f.(Mover); ok {
ft.Move = do.Move
}
if do, ok := f.(DirMover); ok {
ft.DirMove = do.DirMove
}
if do, ok := f.(ChangeNotifier); ok {
ft.ChangeNotify = do.ChangeNotify
}
if do, ok := f.(UnWrapper); ok {
ft.UnWrap = do.UnWrap
}
if do, ok := f.(Wrapper); ok {
ft.WrapFs = do.WrapFs
ft.SetWrapper = do.SetWrapper
}
if do, ok := f.(DirCacheFlusher); ok {
ft.DirCacheFlush = do.DirCacheFlush
}
if do, ok := f.(PublicLinker); ok {
ft.PublicLink = do.PublicLink
}
if do, ok := f.(PutUncheckeder); ok {
ft.PutUnchecked = do.PutUnchecked
}
if do, ok := f.(PutStreamer); ok {
ft.PutStream = do.PutStream
}
if do, ok := f.(MergeDirser); ok {
ft.MergeDirs = do.MergeDirs
}
if do, ok := f.(CleanUpper); ok {
ft.CleanUp = do.CleanUp
}
if do, ok := f.(ListRer); ok {
ft.ListR = do.ListR
}
if do, ok := f.(Abouter); ok {
ft.About = do.About
}
if do, ok := f.(OpenWriterAter); ok {
ft.OpenWriterAt = do.OpenWriterAt
}
if do, ok := f.(UserInfoer); ok {
ft.UserInfo = do.UserInfo
}
if do, ok := f.(Disconnecter); ok {
ft.Disconnect = do.Disconnect
}
return ft.DisableList(Config.DisableFeatures)
}
// Mask the Features with the Fs passed in
//
// Only optional features which are implemented in both the original
// Fs AND the one passed in will be advertised. Any features which
// aren't in both will be set to false/nil, except for UnWrap/Wrap which
// will be left untouched.
func (ft *Features) Mask(f Fs) *Features {
mask := f.Features()
ft.CaseInsensitive = ft.CaseInsensitive && mask.CaseInsensitive
ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles
ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType
ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType
ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories
ft.BucketBased = ft.BucketBased && mask.BucketBased
ft.BucketBasedRootOK = ft.BucketBasedRootOK && mask.BucketBasedRootOK
ft.SetTier = ft.SetTier && mask.SetTier
ft.GetTier = ft.GetTier && mask.GetTier
if mask.Purge == nil {
ft.Purge = nil
}
if mask.Copy == nil {
ft.Copy = nil
}
if mask.Move == nil {
ft.Move = nil
}
if mask.DirMove == nil {
ft.DirMove = nil
}
if mask.ChangeNotify == nil {
ft.ChangeNotify = nil
}
// if mask.UnWrap == nil {
// ft.UnWrap = nil
// }
// if mask.Wrapper == nil {
// ft.Wrapper = nil
// }
if mask.DirCacheFlush == nil {
ft.DirCacheFlush = nil
}
if mask.PublicLink == nil {
ft.PublicLink = nil
}
if mask.PutUnchecked == nil {
ft.PutUnchecked = nil
}
if mask.PutStream == nil {
ft.PutStream = nil
}
if mask.MergeDirs == nil {
ft.MergeDirs = nil
}
if mask.CleanUp == nil {
ft.CleanUp = nil
}
if mask.ListR == nil {
ft.ListR = nil
}
if mask.About == nil {
ft.About = nil
}
if mask.OpenWriterAt == nil {
ft.OpenWriterAt = nil
}
if mask.UserInfo == nil {
ft.UserInfo = nil
}
if mask.Disconnect == nil {
ft.Disconnect = nil
}
return ft.DisableList(Config.DisableFeatures)
}
// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
// method only if available in f.
func (ft *Features) Wrap(f Fs) *Features {
ftCopy := new(Features)
*ftCopy = *ft
if do, ok := f.(UnWrapper); ok {
ftCopy.UnWrap = do.UnWrap
}
if do, ok := f.(Wrapper); ok {
ftCopy.WrapFs = do.WrapFs
ftCopy.SetWrapper = do.SetWrapper
}
return ftCopy
}
// WrapsFs adds extra information between `f` which wraps `w`
func (ft *Features) WrapsFs(f Fs, w Fs) *Features {
wFeatures := w.Features()
if wFeatures.WrapFs != nil && wFeatures.SetWrapper != nil {
wFeatures.SetWrapper(f)
}
return ft
}
// Purger is an optional interfaces for Fs
type Purger interface {
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge(ctx context.Context) error
}
// Copier is an optional interface for Fs
type Copier interface {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy(ctx context.Context, src Object, remote string) (Object, error)
}
// Mover is an optional interface for Fs
type Mover interface {
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move(ctx context.Context, src Object, remote string) (Object, error)
}
// DirMover is an optional interface for Fs
type DirMover interface {
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove(ctx context.Context, src Fs, srcRemote, dstRemote string) error
}
// ChangeNotifier is an optional interface for Fs
type ChangeNotifier interface {
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
ChangeNotify(context.Context, func(string, EntryType), <-chan time.Duration)
}
// UnWrapper is an optional interfaces for Fs
type UnWrapper interface {
// UnWrap returns the Fs that this Fs is wrapping
UnWrap() Fs
}
// Wrapper is an optional interfaces for Fs
type Wrapper interface {
// Wrap returns the Fs that is wrapping this Fs
WrapFs() Fs
// SetWrapper sets the Fs that is wrapping this Fs
SetWrapper(f Fs)
}
// DirCacheFlusher is an optional interface for Fs
type DirCacheFlusher interface {
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
DirCacheFlush()
}
// PutUncheckeder is an optional interface for Fs
type PutUncheckeder interface {
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
PutUnchecked(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
}
// PutStreamer is an optional interface for Fs
type PutStreamer interface {
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
PutStream(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
}
// PublicLinker is an optional interface for Fs
type PublicLinker interface {
// PublicLink generates a public link to the remote path (usually readable by anyone)
PublicLink(ctx context.Context, remote string) (string, error)
}
// MergeDirser is an option interface for Fs
type MergeDirser interface {
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
MergeDirs(ctx context.Context, dirs []Directory) error
}
// CleanUpper is an optional interfaces for Fs
type CleanUpper interface {
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
CleanUp(ctx context.Context) error
}
// ListRer is an optional interfaces for Fs
type ListRer interface {
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
ListR(ctx context.Context, dir string, callback ListRCallback) error
}
// RangeSeeker is the interface that wraps the RangeSeek method.
//
// Some of the returns from Object.Open() may optionally implement
// this method for efficiency purposes.
type RangeSeeker interface {
// RangeSeek behaves like a call to Seek(offset int64, whence
// int) with the output wrapped in an io.LimitedReader
// limiting the total length to limit.
//
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error)
}
// Abouter is an optional interface for Fs
type Abouter interface {
// About gets quota information from the Fs
About(ctx context.Context) (*Usage, error)
}
// OpenWriterAter is an optional interface for Fs
type OpenWriterAter interface {
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
OpenWriterAt(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
}
// UserInfoer is an optional interface for Fs
type UserInfoer interface {
// UserInfo returns info about the connected user
UserInfo(ctx context.Context) (map[string]string, error)
}
// Disconnecter is an optional interface for Fs
type Disconnecter interface {
// Disconnect the current user
Disconnect(ctx context.Context) error
}
// ObjectsChan is a channel of Objects
type ObjectsChan chan Object
// Objects is a slice of Object~s
type Objects []Object
// ObjectPair is a pair of Objects used to describe a potential copy
// operation.
type ObjectPair struct {
Src, Dst Object
}
// UnWrapFs unwraps f as much as possible and returns the base Fs
func UnWrapFs(f Fs) Fs {
for {
unwrap := f.Features().UnWrap
if unwrap == nil {
break // not a wrapped Fs, use current
}
next := unwrap()
if next == nil {
break // no base Fs found, use current
}
f = next
}
return f
}
// UnWrapObject unwraps o as much as possible and returns the base object
func UnWrapObject(o Object) Object {
for {
u, ok := o.(ObjectUnWrapper)
if !ok {
break // not a wrapped object, use current
}
next := u.UnWrap()
if next == nil {
break // no base object found, use current
}
o = next
}
return o
}
// Find looks for an RegInfo object for the name passed in. The name
// can be either the Name or the Prefix.
//
// Services are looked up in the config file
func Find(name string) (*RegInfo, error) {
for _, item := range Registry {
if item.Name == name || item.Prefix == name || item.FileName() == name {
return item, nil
}
}
return nil, errors.Errorf("didn't find backend called %q", name)
}
// MustFind looks for an Info object for the type name passed in
//
// Services are looked up in the config file
//
// Exits with a fatal error if not found
func MustFind(name string) *RegInfo {
fs, err := Find(name)
if err != nil {
log.Fatalf("Failed to find remote: %v", err)
}
return fs
}
// ParseRemote deconstructs a path into configName, fsPath, looking up
// the fsName in the config file (returning NotFoundInConfigFile if not found)
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
configName, fsPath, err = fspath.Parse(path)
if err != nil {
return nil, "", "", err
}
var fsName string
var ok bool
if configName != "" {
if strings.HasPrefix(configName, ":") {
fsName = configName[1:]
} else {
m := ConfigMap(nil, configName)
fsName, ok = m.Get("type")
if !ok {
return nil, "", "", ErrorNotFoundInConfigFile
}
}
} else {
fsName = "local"
configName = "local"
}
fsInfo, err = Find(fsName)
return fsInfo, configName, fsPath, err
}
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
return os.LookupEnv(ConfigToEnv(string(configName), key))
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
fsInfo *RegInfo
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.fsInfo.Options.Get(key)
if opt == nil {
return "", false
}
// For options with NoPrefix set, check without prefix too
if opt.NoPrefix {
value, ok = os.LookupEnv(OptionToEnv(key))
if ok {
return value, ok
}
}
return os.LookupEnv(OptionToEnv(oev.fsInfo.Prefix + "-" + key))
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
fsInfo *RegInfo
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.fsInfo.Options.Get(key)
if opt != nil && (r.useDefault || opt.Value != nil) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the *RegInfo and the
// configName passed in.
//
// If fsInfo is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// flag values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, false})
}
// remote specific environment vars
config.AddGetter(configEnvVars(configName))
// backend specific environment vars
if fsInfo != nil {
config.AddGetter(optionEnvVars{fsInfo: fsInfo})
}
// config file
config.AddGetter(getConfigFile(configName))
// default values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, true})
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
// ConfigFs makes the config for calling NewFs with.
//
// It parses the path which is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) {
// Parse the remote path
fsInfo, configName, fsPath, err = ParseRemote(path)
if err != nil {
return
}
config = ConfigMap(fsInfo, configName)
return
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(path string) (Fs, error) {
fsInfo, configName, fsPath, config, err := ConfigFs(path)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fsPath, config)
}
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
//
// No cleanup is performed, the caller must call Purge on the Fs themselves.
func TemporaryLocalFs() (Fs, error) {
path, err := ioutil.TempDir("", "rclone-spool")
if err == nil {
err = os.Remove(path)
}
if err != nil {
return nil, err
}
path = filepath.ToSlash(path)
return NewFs(path)
}
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func CheckClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// FileExists returns true if a file remote exists.
// If remote is a directory, FileExists returns false.
func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
_, err := fs.NewObject(ctx, remote)
if err != nil {
if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied {
return false, nil
}
return false, err
}
return true, nil
}
// GetModifyWindow calculates the maximum modify window between the given Fses
// and the Config.ModifyWindow parameter.
func GetModifyWindow(fss ...Info) time.Duration {
window := Config.ModifyWindow
for _, f := range fss {
if f != nil {
precision := f.Precision()
if precision == ModTimeNotSupported {
return ModTimeNotSupported
}
if precision > window {
window = precision
}
}
}
return window
}
// Pacer is a simple wrapper around a pacer.Pacer with logging.
type Pacer struct {
*pacer.Pacer
}
type logCalculator struct {
pacer.Calculator
}
// NewPacer creates a Pacer for the given Fs and Calculator.
func NewPacer(c pacer.Calculator) *Pacer {
p := &Pacer{
Pacer: pacer.New(
pacer.InvokerOption(pacerInvoker),
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
pacer.RetriesOption(Config.LowLevelRetries),
pacer.CalculatorOption(c),
),
}
p.SetCalculator(c)
return p
}
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
oldSleepTime := state.SleepTime
newSleepTime := d.Calculator.Calculate(state)
if state.ConsecutiveRetries > 0 {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
}
} else {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
}
}
return newSleepTime
}
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c pacer.Calculator) {
switch c.(type) {
case *logCalculator:
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
case nil:
c = &logCalculator{pacer.NewDefault()}
default:
c = &logCalculator{c}
}
p.Pacer.SetCalculator(c)
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
p.ModifyCalculator(func(c pacer.Calculator) {
switch _c := c.(type) {
case *logCalculator:
f(_c.Calculator)
default:
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
f(c)
}
})
}
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
retry, err = f()
if retry {
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
err = fserrors.RetryError(err)
}
return
}
| 1 | 10,912 | I was planning to add it as a common option, but there is a cyclic import for the rc & fs libs. Thoughts on avoiding it or should I remove this for now? | rclone-rclone | go |
@@ -65,6 +65,10 @@ namespace SynchronizeVersions
"src/Datadog.Trace/Datadog.Trace.csproj",
NugetVersionReplace);
+ SynchronizeVersion(
+ "/deploy/Nuget/Datadog.Trace.nuspec",
+ NugetVersionReplace);
+
Console.WriteLine($"Completed synchronizing versions to {VersionString()}");
}
| 1 | using System;
using System.IO;
using System.Text;
using System.Text.RegularExpressions;
using Datadog.Trace.TestHelpers;
namespace SynchronizeVersions
{
public class Program
{
private static int major = 1;
private static int minor = 7;
private static int patch = 0;
public static void Main(string[] args)
{
Console.WriteLine($"Updating version instances to {VersionString()}");
SynchronizeVersion(
"integrations.json",
FullAssemblyNameReplace);
SynchronizeVersion(
"docker/package.sh",
text => Regex.Replace(text, $"VERSION={VersionPattern()}", $"VERSION={VersionString()}"));
SynchronizeVersion(
"reproductions/AutomapperTest/Dockerfile",
text => Regex.Replace(text, $"ARG TRACER_VERSION={VersionPattern()}", $"ARG TRACER_VERSION={VersionString()}"));
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Managed.Loader/Datadog.Trace.ClrProfiler.Managed.Loader.csproj",
NugetVersionReplace);
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Managed.Loader/Startup.cs",
FullAssemblyNameReplace);
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Managed/Datadog.Trace.ClrProfiler.Managed.csproj",
NugetVersionReplace);
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Native/CMakeLists.txt",
text => FullVersionReplace(text, "."));
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Native/Resource.rc",
text =>
{
text = FullVersionReplace(text, ",");
text = FullVersionReplace(text, ".");
return text;
});
SynchronizeVersion(
"src/Datadog.Trace.ClrProfiler.Native/version.h",
text => FullVersionReplace(text, "."));
SynchronizeVersion(
"src/Datadog.Trace.OpenTracing/Datadog.Trace.OpenTracing.csproj",
NugetVersionReplace);
SynchronizeVersion(
"src/Datadog.Trace/Datadog.Trace.csproj",
NugetVersionReplace);
Console.WriteLine($"Completed synchronizing versions to {VersionString()}");
}
private static string FullVersionReplace(string text, string split)
{
return Regex.Replace(text, VersionPattern(split), VersionString(split));
}
private static string FullAssemblyNameReplace(string text)
{
return Regex.Replace(text, AssemblyString(VersionPattern()), AssemblyString(VersionString()));
}
private static string NugetVersionReplace(string text)
{
return Regex.Replace(text, $"<Version>{VersionPattern()}</Version>", $"<Version>{VersionString()}</Version>");
}
private static void SynchronizeVersion(string path, Func<string, string> transform)
{
var solutionDirectory = EnvironmentHelper.GetSolutionDirectory();
var fullPath = Path.Combine(solutionDirectory, path);
Console.WriteLine($"Updating version instances for {path}");
if (!File.Exists(fullPath))
{
throw new Exception($"File not found to version: {path}");
}
var fileContent = File.ReadAllText(fullPath);
var newFileContent = transform(fileContent);
File.WriteAllText(fullPath, newFileContent, new UTF8Encoding(encoderShouldEmitUTF8Identifier: false));
}
private static string VersionString(string split = ".")
{
return $"{major}{split}{minor}{split}{patch}";
}
private static string VersionPattern(string split = ".")
{
if (split == ".")
{
split = @"\.";
}
return $@"\d+{split}\d+{split}\d+";
}
private static string AssemblyString(string versionText)
{
return $"Datadog.Trace.ClrProfiler.Managed, Version={versionText}.0, Culture=neutral, PublicKeyToken=def86d061d0d2eeb";
}
}
}
| 1 | 15,714 | I think this entry can be removed now, right? | DataDog-dd-trace-dotnet | .cs |
@@ -230,6 +230,18 @@ class Key(object):
else:
self.encrypted = None
+ def handle_storage_class_header(self, resp):
+ provider = self.bucket.connection.provider
+ if provider.storage_class_header:
+ self._storage_class = resp.getheader(
+ provider.storage_class_header, None)
+ if (self._storage_class is None and
+ provider.get_provider_name() == 'aws'):
+ # S3 docs for HEAD object requests say S3 will return this
+ # header for all objects except Standard storage class objects.
+ self._storage_class = 'STANDARD'
+
+
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it | 1 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
num_cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| 1 | 11,964 | I don't understand why you want to populate the storage class in this case, if the S3 docs say they won't populate the header in this case? | boto-boto | py |
@@ -47,6 +47,10 @@ public interface CapabilityType {
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
+ String TIMEOUTS = "timeouts";
+ String IMPLICIT_TIMEOUT = "implicit";
+ String PAGE_LOAD_TIMEOUT = "pageLoad";
+ String SCRIPT_TIMEOUT = "script";
String LOGGING_PREFS = "loggingPrefs";
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
@Deprecated String PLATFORM = "platform";
String PLATFORM_NAME = "platformName";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String BROWSER_VERSION = "browserVersion";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String ACCEPT_INSECURE_CERTS = "acceptInsecureCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String UNHANDLED_PROMPT_BEHAVIOUR = "unhandledPromptBehavior";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 19,072 | These are really meant to be the keys in the capabilities, not the keys of values within the capabilities | SeleniumHQ-selenium | java |
@@ -14,8 +14,11 @@ import (
//
// The request will not be signed, and will not use your AWS credentials.
//
-// A "NotFound" error code will be returned if the bucket does not exist in
-// the AWS partition the regionHint belongs to.
+// A "NotFound" error code will be returned if the bucket does not exist in the
+// AWS partition the regionHint belongs to. If the regionHint parameter is an
+// empty string GetBucketRegion will fallback to the ConfigProvider's region
+// config. If no region is specified, hint or ConfigProvider, an error will
+// be returned.
//
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2". | 1 | package s3manager
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// GetBucketRegion will attempt to get the region for a bucket using the
// regionHint to determine which AWS partition to perform the query on.
//
// The request will not be signed, and will not use your AWS credentials.
//
// A "NotFound" error code will be returned if the bucket does not exist in
// the AWS partition the regionHint belongs to.
//
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2".
//
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
svc := s3.New(c, &aws.Config{
Region: aws.String(regionHint),
})
return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
}
const bucketRegionHeader = "X-Amz-Bucket-Region"
// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
// that it takes a S3 service client instead of a Session. The regionHint is
// derived from the region the S3 service client was created in.
//
// See GetBucketRegion for more information.
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
Bucket: aws.String(bucket),
})
req.Config.S3ForcePathStyle = aws.Bool(true)
req.Config.Credentials = credentials.AnonymousCredentials
req.SetContext(ctx)
// Disable HTTP redirects to prevent an invalid 301 from eating the response
// because Go's HTTP client will fail, and drop the response if an 301 is
// received without a location header. S3 will return a 301 without the
// location header for HeadObject API calls.
req.DisableFollowRedirects = true
var bucketRegion string
req.Handlers.Send.PushBack(func(r *request.Request) {
bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
if len(bucketRegion) == 0 {
return
}
r.HTTPResponse.StatusCode = 200
r.HTTPResponse.Status = "OK"
r.Error = nil
})
req.ApplyOptions(opts...)
if err := req.Send(); err != nil {
return "", err
}
bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
return bucketRegion, nil
}
| 1 | 9,116 | Should the last sentence be `If no region was found` rather than `specified`? | aws-aws-sdk-go | go |
@@ -217,9 +217,16 @@ public class CoxPlugin extends Plugin
{
for (Player player : client.getPlayers())
{
- if (player.getName().equals(tpMatcher.group(1)))
+ final String rawPlayerName = player.getName();
+
+ if (rawPlayerName != null)
{
- victims.add(new Victim(player, Victim.Type.TELEPORT));
+ final String fixedPlayerName = Text.sanitize(rawPlayerName);
+
+ if (fixedPlayerName.equals(tpMatcher.group(1)))
+ {
+ victims.add(new Victim(player, Victim.Type.TELEPORT));
+ }
}
}
} | 1 | /*
* Copyright (c) 2019, xzact <https://github.com/xzact>
* Copyright (c) 2019, ganom <https://github.com/Ganom>
* Copyright (c) 2019, gazivodag <https://github.com/gazivodag>
* Copyright (c) 2019, lyzrds <https://discord.gg/5eb9Fe>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.coxhelper;
import com.google.inject.Provides;
import java.awt.Color;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Actor;
import net.runelite.api.AnimationID;
import net.runelite.api.ChatMessageType;
import net.runelite.api.Client;
import net.runelite.api.GraphicID;
import net.runelite.api.GraphicsObject;
import net.runelite.api.NPC;
import net.runelite.api.NpcID;
import net.runelite.api.Player;
import net.runelite.api.Projectile;
import net.runelite.api.ProjectileID;
import net.runelite.api.Varbits;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.ChatMessage;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.GameTick;
import net.runelite.api.events.NpcDespawned;
import net.runelite.api.events.NpcSpawned;
import net.runelite.api.events.ProjectileSpawned;
import net.runelite.api.events.SpotAnimationChanged;
import net.runelite.client.chat.ChatMessageManager;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.ui.overlay.OverlayManager;
import net.runelite.api.util.Text;
@PluginDescriptor(
name = "CoX Helper",
description = "All-in-one plugin for Chambers of Xeric",
tags = {"CoX", "chamber", "xeric", "helper"},
type = PluginType.PVM,
enabledByDefault = false
)
@Slf4j
@Singleton
@Getter(AccessLevel.PACKAGE)
public class CoxPlugin extends Plugin
{
private static final int ANIMATION_ID_G1 = 430;
private static final Pattern TP_REGEX = Pattern.compile("You have been paired with <col=ff0000>(.*)</col>! The magical power will enact soon...");
@Inject
@Getter(AccessLevel.NONE)
private Client client;
@Inject
@Getter(AccessLevel.NONE)
private ChatMessageManager chatMessageManager;
@Inject
@Getter(AccessLevel.NONE)
private CoxOverlay coxOverlay;
@Inject
@Getter(AccessLevel.NONE)
private CoxInfoBox coxInfoBox;
@Inject
@Getter(AccessLevel.NONE)
private CoxConfig config;
@Inject
@Getter(AccessLevel.NONE)
private OverlayManager overlayManager;
@Inject
@Getter(AccessLevel.NONE)
private EventBus eventBus;
private boolean handCripple;
private boolean runOlm;
private int vanguards;
private boolean tektonActive;
private NPC hand;
private NPC Olm_NPC;
private List<WorldPoint> Olm_Heal = new ArrayList<>();
private List<WorldPoint> Olm_TP = new ArrayList<>();
private Set<Victim> victims = new HashSet<>();
private Actor acidTarget;
private int crippleTimer = 45;
private int teleportTicks = 10;
private int tektonAttackTicks;
private int OlmPhase = 0;
private int Olm_TicksUntilAction = -1;
private int Olm_ActionCycle = -1; //4:0 = auto 3:0 = null 2:0 = auto 1:0 = spec + actioncycle =4
private int Olm_NextSpec = -1; // 1= crystals 2=lightnig 3=portals 4= heal hand if p4
private Map<NPC, NPCContainer> npcContainer = new HashMap<>();
@Setter(AccessLevel.PACKAGE)
private PrayAgainst prayAgainstOlm;
private long lastPrayTime;
private int sleepcount = 0;
private boolean muttadile;
private boolean tekton;
private boolean tektonTickCounter;
private boolean guardians;
private boolean guardinTickCounter;
private boolean vangHighlight;
private boolean vangHealth;
private boolean configPrayAgainstOlm;
private boolean timers;
private boolean tpOverlay;
private boolean olmTick;
private int prayAgainstSize;
private Color muttaColor;
private Color guardColor;
private Color tektonColor;
private Color burnColor;
private Color acidColor;
private Color tpColor;
private CoxConfig.FontStyle fontStyle;
private int textSize;
private boolean shadows;
@Provides
CoxConfig getConfig(ConfigManager configManager)
{
return configManager.getConfig(CoxConfig.class);
}
@Override
protected void startUp()
{
updateConfig();
addSubscriptions();
overlayManager.add(coxOverlay);
overlayManager.add(coxInfoBox);
handCripple = false;
hand = null;
Olm_TP.clear();
prayAgainstOlm = null;
victims.clear();
crippleTimer = 45;
teleportTicks = 10;
vanguards = 0;
}
@Override
protected void shutDown()
{
eventBus.unregister(this);
overlayManager.remove(coxOverlay);
overlayManager.remove(coxInfoBox);
}
private void addSubscriptions()
{
eventBus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventBus.subscribe(ChatMessage.class, this, this::onChatMessage);
eventBus.subscribe(ProjectileSpawned.class, this, this::onProjectileSpawned);
eventBus.subscribe(SpotAnimationChanged.class, this, this::onSpotAnimationChanged);
eventBus.subscribe(NpcSpawned.class, this, this::onNpcSpawned);
eventBus.subscribe(NpcDespawned.class, this, this::onNpcDespawned);
eventBus.subscribe(GameTick.class, this, this::onGameTick);
}
private void onConfigChanged(ConfigChanged event)
{
if (event.getGroup().equals("Cox"))
{
updateConfig();
}
}
private void onChatMessage(ChatMessage event)
{
if (!inRaid())
{
return;
}
if (event.getType() == ChatMessageType.GAMEMESSAGE)
{
final Matcher tpMatcher = TP_REGEX.matcher(event.getMessage());
if (tpMatcher.matches())
{
for (Player player : client.getPlayers())
{
if (player.getName().equals(tpMatcher.group(1)))
{
victims.add(new Victim(player, Victim.Type.TELEPORT));
}
}
}
switch (Text.standardize(event.getMessageNode().getValue()))
{
case "the great olm rises with the power of acid.":
case "the great olm rises with the power of crystal.":
case "the great olm rises with the power of flame.":
case "the great olm is giving its all. this is its final stand.":
if (!runOlm)
{
Olm_ActionCycle = -1;
Olm_TicksUntilAction = 4;
}
else
{
Olm_ActionCycle = -1;
Olm_TicksUntilAction = 3;
}
OlmPhase = 0;
runOlm = true;
crippleTimer = 45;
Olm_NextSpec = -1;
break;
case "the great olm fires a sphere of aggression your way. your prayers have been sapped.":
case "the great olm fires a sphere of aggression your way.":
prayAgainstOlm = PrayAgainst.MELEE;
lastPrayTime = System.currentTimeMillis();
break;
case "the great olm fires a sphere of magical power your way. your prayers have been sapped.":
case "the great olm fires a sphere of magical power your way.":
prayAgainstOlm = PrayAgainst.MAGIC;
lastPrayTime = System.currentTimeMillis();
break;
case "the great olm fires a sphere of accuracy and dexterity your way. your prayers have been sapped.":
case "the great olm fires a sphere of accuracy and dexterity your way.":
prayAgainstOlm = PrayAgainst.RANGED;
lastPrayTime = System.currentTimeMillis();
break;
case "the great olm's left claw clenches to protect itself temporarily.":
handCripple = true;
}
}
}
private void onProjectileSpawned(ProjectileSpawned event)
{
if (!inRaid())
{
return;
}
final Projectile projectile = event.getProjectile();
switch (projectile.getId())
{
case ProjectileID.OLM_MAGE_ATTACK:
prayAgainstOlm = PrayAgainst.MAGIC;
lastPrayTime = System.currentTimeMillis();
break;
case ProjectileID.OLM_RANGE_ATTACK:
prayAgainstOlm = PrayAgainst.RANGED;
lastPrayTime = System.currentTimeMillis();
break;
case ProjectileID.OLM_ACID_TRAIL:
acidTarget = projectile.getInteracting();
break;
}
}
private void onSpotAnimationChanged(SpotAnimationChanged event)
{
if (!inRaid())
{
return;
}
if (!(event.getActor() instanceof Player))
{
return;
}
final Player player = (Player) event.getActor();
if (player.getSpotAnimation() == GraphicID.OLM_BURN)
{
int add = 0;
for (Victim victim : victims)
{
if (victim.getPlayer().getName().equals(player.getName()))
{
add++;
}
}
if (add == 0)
{
victims.add(new Victim(player, Victim.Type.BURN));
}
}
}
private void onNpcSpawned(NpcSpawned event)
{
if (!inRaid())
{
return;
}
final NPC npc = event.getNpc();
switch (npc.getId())
{
case NpcID.TEKTON:
case NpcID.TEKTON_7541:
case NpcID.TEKTON_7542:
case NpcID.TEKTON_7545:
case NpcID.TEKTON_ENRAGED:
case NpcID.TEKTON_ENRAGED_7544:
npcContainer.put(npc, new NPCContainer(npc));
tektonAttackTicks = 27;
break;
case NpcID.MUTTADILE:
case NpcID.MUTTADILE_7562:
case NpcID.MUTTADILE_7563:
case NpcID.GUARDIAN:
case NpcID.GUARDIAN_7570:
npcContainer.put(npc, new NPCContainer(npc));
break;
case NpcID.VANGUARD:
case NpcID.VANGUARD_7526:
case NpcID.VANGUARD_7527:
case NpcID.VANGUARD_7528:
case NpcID.VANGUARD_7529:
vanguards++;
npcContainer.put(npc, new NPCContainer(npc));
break;
case NpcID.GREAT_OLM_LEFT_CLAW:
case NpcID.GREAT_OLM_LEFT_CLAW_7555:
hand = npc;
break;
case NpcID.GREAT_OLM:
Olm_NPC = npc;
}
}
private void onNpcDespawned(NpcDespawned event)
{
if (!inRaid())
{
return;
}
final NPC npc = event.getNpc();
switch (npc.getId())
{
case NpcID.TEKTON:
case NpcID.TEKTON_7541:
case NpcID.TEKTON_7542:
case NpcID.TEKTON_7545:
case NpcID.TEKTON_ENRAGED:
case NpcID.TEKTON_ENRAGED_7544:
case NpcID.MUTTADILE:
case NpcID.MUTTADILE_7562:
case NpcID.MUTTADILE_7563:
case NpcID.GUARDIAN:
case NpcID.GUARDIAN_7570:
case NpcID.GUARDIAN_7571:
case NpcID.GUARDIAN_7572:
if (npcContainer.remove(event.getNpc()) != null && !npcContainer.isEmpty())
{
npcContainer.remove(event.getNpc());
}
break;
case NpcID.VANGUARD:
case NpcID.VANGUARD_7526:
case NpcID.VANGUARD_7527:
case NpcID.VANGUARD_7528:
case NpcID.VANGUARD_7529:
if (npcContainer.remove(event.getNpc()) != null && !npcContainer.isEmpty())
{
npcContainer.remove(event.getNpc());
}
vanguards--;
break;
case NpcID.GREAT_OLM_RIGHT_CLAW_7553:
case NpcID.GREAT_OLM_RIGHT_CLAW:
handCripple = false;
break;
}
}
private void onGameTick(GameTick event)
{
if (!inRaid())
{
OlmPhase = 0;
sleepcount = 0;
Olm_Heal.clear();
npcContainer.clear();
victims.clear();
Olm_NPC = null;
hand = null;
prayAgainstOlm = null;
runOlm = false;
return;
}
handleNpcs();
handleVictims();
if (handCripple)
{
crippleTimer--;
if (crippleTimer <= 0)
{
handCripple = false;
crippleTimer = 45;
}
}
if (runOlm)
{
handleOlm();
}
}
private void handleVictims()
{
if (victims.size() > 0)
{
victims.forEach(Victim::updateTicks);
victims.removeIf(victim -> victim.getTicks() <= 0);
}
}
private void handleNpcs()
{
for (NPCContainer npcs : getNpcContainer().values())
{
switch (npcs.getNpc().getId())
{
case NpcID.TEKTON:
case NpcID.TEKTON_7541:
case NpcID.TEKTON_7542:
case NpcID.TEKTON_7545:
case NpcID.TEKTON_ENRAGED:
case NpcID.TEKTON_ENRAGED_7544:
npcs.setTicksUntilAttack(npcs.getTicksUntilAttack() - 1);
npcs.setAttackStyle(NPCContainer.Attackstyle.MELEE);
switch (npcs.getNpc().getAnimation())
{
case AnimationID.TEKTON_AUTO1:
case AnimationID.TEKTON_AUTO2:
case AnimationID.TEKTON_AUTO3:
case AnimationID.TEKTON_ENRAGE_AUTO1:
case AnimationID.TEKTON_ENRAGE_AUTO2:
case AnimationID.TEKTON_ENRAGE_AUTO3:
tektonActive = true;
if (npcs.getTicksUntilAttack() < 1)
{
npcs.setTicksUntilAttack(4);
}
break;
case AnimationID.TEKTON_FAST_AUTO1:
case AnimationID.TEKTON_FAST_AUTO2:
tektonActive = true;
if (npcs.getTicksUntilAttack() < 1)
{
npcs.setTicksUntilAttack(3);
}
break;
case AnimationID.TEKTON_ANVIL:
tektonActive = false;
tektonAttackTicks = 47;
if (npcs.getTicksUntilAttack() < 1)
{
npcs.setTicksUntilAttack(15);
}
}
break;
case NpcID.GUARDIAN:
case NpcID.GUARDIAN_7570:
case NpcID.GUARDIAN_7571:
case NpcID.GUARDIAN_7572:
npcs.setTicksUntilAttack(npcs.getTicksUntilAttack() - 1);
npcs.setAttackStyle(NPCContainer.Attackstyle.MELEE);
if (npcs.getNpc().getAnimation() == ANIMATION_ID_G1 &&
npcs.getTicksUntilAttack() < 1)
{
npcs.setTicksUntilAttack(5);
}
break;
case NpcID.VANGUARD_7529:
if (npcs.getAttackStyle() == NPCContainer.Attackstyle.UNKNOWN)
{
npcs.setAttackStyle(NPCContainer.Attackstyle.MAGE);
}
break;
case NpcID.VANGUARD_7528:
if (npcs.getAttackStyle() == NPCContainer.Attackstyle.UNKNOWN)
{
npcs.setAttackStyle(NPCContainer.Attackstyle.RANGE);
}
break;
case NpcID.VANGUARD_7527:
if (npcs.getAttackStyle() == NPCContainer.Attackstyle.UNKNOWN)
{
npcs.setAttackStyle(NPCContainer.Attackstyle.MELEE);
}
break;
}
}
if (tektonActive && tektonAttackTicks > 0)
{
tektonAttackTicks--;
}
}
private void handleOlm()
{
Olm_Heal.clear();
Olm_TP.clear();
client.clearHintArrow();
sleepcount--;
if (Olm_TicksUntilAction == 1)
{
if (Olm_ActionCycle == 1)
{
Olm_ActionCycle = 4;
Olm_TicksUntilAction = 4;
if (Olm_NextSpec == 1)
{
if (OlmPhase == 1)
{
Olm_NextSpec = 4; // 4 = heal 3= cry 2 = lightn 1 = swap
}
else
{
Olm_NextSpec = 3;
}
}
else
{
Olm_NextSpec--;
}
}
else
{
if (Olm_ActionCycle != -1)
{
Olm_ActionCycle--;
}
Olm_TicksUntilAction = 4;
}
}
else
{
Olm_TicksUntilAction--;
}
for (GraphicsObject o : client.getGraphicsObjects())
{
if (sleepcount <= 0)
{
if (o.getId() == 1338)
{
Olm_TicksUntilAction = 1;
Olm_NextSpec = 2;
Olm_ActionCycle = 4; //spec=1 null=3
sleepcount = 5;
}
if (o.getId() == 1356)
{
Olm_TicksUntilAction = 4;
Olm_NextSpec = 1;
Olm_ActionCycle = 4; //spec=1 null=3
sleepcount = 50;
}
}
if (o.getId() == GraphicID.OLM_TELEPORT)
{
Olm_TP.add(WorldPoint.fromLocal(client, o.getLocation()));
}
if (o.getId() == GraphicID.OLM_HEAL)
{
Olm_Heal.add(WorldPoint.fromLocal(client, o.getLocation()));
}
if (!Olm_TP.isEmpty())
{
teleportTicks--;
if (teleportTicks <= 0)
{
client.clearHintArrow();
teleportTicks = 10;
}
}
}
}
boolean inRaid()
{
return client.getVar(Varbits.IN_RAID) == 1;
}
private void updateConfig()
{
this.muttadile = config.muttadile();
this.tekton = config.tekton();
this.tektonTickCounter = config.tektonTickCounter();
this.guardians = config.guardians();
this.guardinTickCounter = config.guardinTickCounter();
this.vangHighlight = config.vangHighlight();
this.vangHealth = config.vangHealth();
this.configPrayAgainstOlm = config.prayAgainstOlm();
this.timers = config.timers();
this.tpOverlay = config.tpOverlay();
this.olmTick = config.olmTick();
this.muttaColor = config.muttaColor();
this.guardColor = config.guardColor();
this.tektonColor = config.tektonColor();
this.burnColor = config.burnColor();
this.acidColor = config.acidColor();
this.tpColor = config.tpColor();
this.fontStyle = config.fontStyle();
this.textSize = config.textSize();
this.shadows = config.shadows();
this.prayAgainstSize = config.prayAgainstOlmSize();
}
}
| 1 | 15,811 | Text.sanitize just removes images from names, for instance, the hardcore ironman symbol when someone talks. A better option would be text.standardize, or text.toJagexName | open-osrs-runelite | java |
@@ -81,3 +81,13 @@ func (a *Application) IsOutOfSync() bool {
func IsApplicationConfigFile(filename string) bool {
return filename == DefaultApplicationConfigFilename || strings.HasSuffix(filename, applicationConfigFileExtention)
}
+
+func ToApplicationKind(kind string) (ApplicationKind, bool) {
+ upper := strings.ToUpper(string(kind))
+ k := strings.TrimSuffix(upper, "APP")
+ appKind, ok := ApplicationKind_value[k]
+ if !ok {
+ return -1, false
+ }
+ return ApplicationKind(appKind), true
+} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"path/filepath"
"strings"
)
const (
DefaultApplicationConfigFilename = ".pipe.yaml"
applicationConfigFileExtention = ".pipecd.yaml"
)
// GetApplicationConfigFilePath returns the path to application configuration file.
func (p ApplicationGitPath) GetApplicationConfigFilePath() string {
filename := DefaultApplicationConfigFilename
if n := p.ConfigFilename; n != "" {
filename = n
}
return filepath.Join(p.Path, filename)
}
// HasChanged checks whether the content of sync state has been changed.
// This ignores the timestamp value.
func (s ApplicationSyncState) HasChanged(next ApplicationSyncState) bool {
if s.Status != next.Status {
return true
}
if s.ShortReason != next.ShortReason {
return true
}
if s.Reason != next.Reason {
return true
}
return false
}
func MakeApplicationURL(baseURL, applicationID string) string {
return fmt.Sprintf("%s/applications/%s", strings.TrimSuffix(baseURL, "/"), applicationID)
}
// ContainLabels checks if it has all the given labels.
func (a *Application) ContainLabels(labels map[string]string) bool {
if len(a.Labels) < len(labels) {
return false
}
for k, v := range labels {
value, ok := a.Labels[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
func (a *Application) IsOutOfSync() bool {
if a.SyncState == nil {
return false
}
return a.SyncState.Status == ApplicationSyncStatus_OUT_OF_SYNC
}
func IsApplicationConfigFile(filename string) bool {
return filename == DefaultApplicationConfigFilename || strings.HasSuffix(filename, applicationConfigFileExtention)
}
| 1 | 23,462 | How about `ApplicationKindFromConfigKind`? And I think this function should be better in the config package. The reason is `config` package can import and refer things from the model package but not vice versa. | pipe-cd-pipe | go |
@@ -101,9 +101,14 @@ const (
// Options sets options for constructing a *blob.Bucket backed by Azure Block Blob.
type Options struct {
// Credential represents the authorizer for SignedURL.
- // Required to use SignedURL.
+ // Required to use SignedURL. If you're using MSI for authentication, this will
+ // attempt to be loaded lazily the first time you call SignedURL
Credential azblob.StorageAccountCredential
+ // CredentialExpiration is when the current MSI-obtained signing credential
+ // expires so that we can refresh it on demand
+ CredentialExpiration time.Time
+
// SASToken can be provided along with anonymous credentials to use
// delegated privileges.
// See https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1#shared-access-signature-parameters. | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azureblob provides a blob implementation that uses Azure Storage’s
// BlockBlob. Use OpenBucket to construct a *blob.Bucket.
//
// NOTE: SignedURLs for PUT created with this package are not fully portable;
// they will not work unless the PUT request includes a "x-ms-blob-type" header
// set to "BlockBlob".
// See https://stackoverflow.com/questions/37824136/put-on-sas-blob-url-without-specifying-x-ms-blob-type-header.
//
// URLs
//
// For blob.OpenBucket, azureblob registers for the scheme "azblob".
// The default URL opener will use credentials from the environment variables
// AZURE_STORAGE_ACCOUNT, AZURE_STORAGE_KEY, and AZURE_STORAGE_SAS_TOKEN.
// AZURE_STORAGE_ACCOUNT is required, along with one of the other two.
// AZURE_STORAGE_DOMAIN can optionally be used to provide an Azure Environment
// blob storage domain to use. If no AZURE_STORAGE_DOMAIN is provided, the
// default Azure public domain "blob.core.windows.net" will be used. Check
// the Azure Developer Guide for your particular cloud environment to see
// the proper blob storage domain name to provide.
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for azureblob:
// - Blob keys: ASCII characters 0-31, 92 ("\"), and 127 are escaped to
// "__0x<hex>__". Additionally, the "/" in "../" and a trailing "/" in a
// key (e.g., "foo/") are escaped in the same way.
// - Metadata keys: Per https://docs.microsoft.com/en-us/azure/storage/blobs/storage-properties-metadata,
// Azure only allows C# identifiers as metadata keys. Therefore, characters
// other than "[a-z][A-z][0-9]_" are escaped using "__0x<hex>__". In addition,
// characters "[0-9]" are escaped when they start the string.
// URL encoding would not work since "%" is not valid.
// - Metadata values: Escaped using URL encoding.
//
// As
//
// azureblob exposes the following types for As:
// - Bucket: *azblob.ContainerURL
// - Error: azblob.StorageError
// - ListObject: azblob.BlobItemInternal for objects, azblob.BlobPrefix for "directories"
// - ListOptions.BeforeList: *azblob.ListBlobsSegmentOptions
// - Reader: azblob.DownloadResponse
// - Reader.BeforeRead: *azblob.BlockBlobURL, *azblob.BlobAccessConditions
// - Attributes: azblob.BlobGetPropertiesResponse
// - CopyOptions.BeforeCopy: azblob.Metadata, *azblob.ModifiedAccessConditions, *azblob.BlobAccessConditions
// - WriterOptions.BeforeWrite: *azblob.UploadStreamToBlockBlobOptions
// - SignedURLOptions.BeforeSign: *azblob.BlobSASSignatureValues
package azureblob
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/google/uuid"
"github.com/google/wire"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/escape"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
)
const (
tokenRefreshTolerance = 300
)
// Options sets options for constructing a *blob.Bucket backed by Azure Block Blob.
type Options struct {
// Credential represents the authorizer for SignedURL.
// Required to use SignedURL.
Credential azblob.StorageAccountCredential
// SASToken can be provided along with anonymous credentials to use
// delegated privileges.
// See https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1#shared-access-signature-parameters.
SASToken SASToken
// StorageDomain can be provided to specify an Azure Cloud Environment
// domain to target for the blob storage account (i.e. public, government, china).
// The default value is "blob.core.windows.net". Possible values will look similar
// to this but are different for each cloud (i.e. "blob.core.govcloudapi.net" for USGovernment).
// Check the Azure developer guide for the cloud environment where your bucket resides.
StorageDomain StorageDomain
// Protocol can be provided to specify protocol to access Azure Blob Storage.
// Protocols that can be specified are "http" for local emulator and "https" for general.
// If blank is specified, "https" will be used.
Protocol Protocol
}
const (
defaultMaxDownloadRetryRequests = 3 // download retry policy (Azure default is zero)
defaultPageSize = 1000 // default page size for ListPaged (Azure default is 5000)
defaultUploadBuffers = 5 // configure the number of rotating buffers that are used when uploading (for degree of parallelism)
defaultUploadBlockSize = 8 * 1024 * 1024 // configure the upload buffer size
)
func init() {
blob.DefaultURLMux().RegisterBucket(Scheme, new(lazyCredsOpener))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
NewPipeline,
wire.Struct(new(Options), "Credential", "SASToken"),
wire.Struct(new(URLOpener), "AccountName", "Pipeline", "Options"),
)
// lazyCredsOpener obtains credentials from the environment on the first call
// to OpenBucketURL.
type lazyCredsOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *lazyCredsOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
o.init.Do(func() {
// Use default credential info from the environment.
// Ignore errors, as we'll get errors from OpenBucket later.
accountName, _ := DefaultAccountName()
accountKey, _ := DefaultAccountKey()
sasToken, _ := DefaultSASToken()
storageDomain, _ := DefaultStorageDomain()
protocol, _ := DefaultProtocol()
isMSIEnvironment := adal.MSIAvailable(ctx, adal.CreateSender())
if accountKey != "" {
o.opener, o.err = openerFromEnv(accountName, accountKey, sasToken, storageDomain, protocol)
} else if isMSIEnvironment {
o.opener, o.err = openerFromMSI(accountName, storageDomain, protocol)
} else {
o.opener, o.err = openerFromAnon(accountName, storageDomain, protocol)
}
})
if o.err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, o.err)
}
return o.opener.OpenBucketURL(ctx, u)
}
// Scheme is the URL scheme gcsblob registers its URLOpener under on
// blob.DefaultMux.
const Scheme = "azblob"
// URLOpener opens Azure URLs like "azblob://mybucket".
//
// The URL host is used as the bucket name.
//
// The following query options are supported:
// - domain: The domain name used to access the Azure Blob storage (e.g. blob.core.windows.net)
type URLOpener struct {
// AccountName must be specified.
AccountName AccountName
// Pipeline must be set to a non-nil value.
Pipeline pipeline.Pipeline
// Options specifies the options to pass to OpenBucket.
Options Options
}
func openerFromEnv(accountName AccountName, accountKey AccountKey, sasToken SASToken, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) {
// azblob.Credential is an interface; we will use either a SharedKeyCredential
// or anonymous credentials. If the former, we will also fill in
// Options.Credential so that SignedURL will work.
var credential azblob.Credential
var storageAccountCredential azblob.StorageAccountCredential
if accountKey != "" {
sharedKeyCred, err := NewCredential(accountName, accountKey)
if err != nil {
return nil, fmt.Errorf("invalid credentials %s/%s: %v", accountName, accountKey, err)
}
credential = sharedKeyCred
storageAccountCredential = sharedKeyCred
} else {
credential = azblob.NewAnonymousCredential()
}
return &URLOpener{
AccountName: accountName,
Pipeline: NewPipeline(credential, azblob.PipelineOptions{}),
Options: Options{
Credential: storageAccountCredential,
SASToken: sasToken,
StorageDomain: storageDomain,
Protocol: protocol,
},
}, nil
}
// openerFromAnon creates an anonymous credential backend URLOpener
func openerFromAnon(accountName AccountName, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) {
return &URLOpener{
AccountName: accountName,
Pipeline: NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}),
Options: Options{
StorageDomain: storageDomain,
Protocol: protocol,
},
}, nil
}
var defaultTokenRefreshFunction = func(spToken *adal.ServicePrincipalToken) func(credential azblob.TokenCredential) time.Duration {
return func(credential azblob.TokenCredential) time.Duration {
err := spToken.Refresh()
if err != nil {
return 0
}
expiresIn, err := strconv.ParseInt(string(spToken.Token().ExpiresIn), 10, 64)
if err != nil {
return 0
}
credential.SetToken(spToken.Token().AccessToken)
return time.Duration(expiresIn-tokenRefreshTolerance) * time.Second
}
}
// openerFromMSI acquires an MSI token and returns TokenCredential backed URLOpener
func openerFromMSI(accountName AccountName, storageDomain StorageDomain, protocol Protocol) (*URLOpener, error) {
spToken, err := getMSIServicePrincipalToken(azure.PublicCloud.ResourceIdentifiers.Storage)
if err != nil {
return nil, fmt.Errorf("failure acquiring token from MSI endpoint %w", err)
}
err = spToken.Refresh()
if err != nil {
return nil, fmt.Errorf("failure refreshing token from MSI endpoint %w", err)
}
credential := azblob.NewTokenCredential(spToken.Token().AccessToken, defaultTokenRefreshFunction(spToken))
return &URLOpener{
AccountName: accountName,
Pipeline: NewPipeline(credential, azblob.PipelineOptions{}),
Options: Options{
StorageDomain: storageDomain,
Protocol: protocol,
},
}, nil
}
// getMSIServicePrincipalToken retrieves Azure API Service Principal token.
func getMSIServicePrincipalToken(resource string) (*adal.ServicePrincipalToken, error) {
msiEndpoint, err := adal.GetMSIEndpoint()
if err != nil {
return nil, fmt.Errorf("failed to get the managed service identity endpoint: %v", err)
}
token, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, resource)
if err != nil {
return nil, fmt.Errorf("failed to create the managed service identity token: %v", err)
}
return token, nil
}
// OpenBucketURL opens a blob.Bucket based on u.
func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
opts := new(Options)
*opts = o.Options
err := setOptionsFromURLParams(u.Query(), opts)
if err != nil {
return nil, err
}
return OpenBucket(ctx, o.Pipeline, o.AccountName, u.Host, opts)
}
func setOptionsFromURLParams(q url.Values, o *Options) error {
for param, values := range q {
if len(values) > 1 {
return fmt.Errorf("multiple values of %v not allowed", param)
}
value := values[0]
switch param {
case "domain":
o.StorageDomain = StorageDomain(value)
default:
return fmt.Errorf("unknown query parameter %q", param)
}
}
return nil
}
// DefaultIdentity is a Wire provider set that provides an Azure storage
// account name, key, and SharedKeyCredential from environment variables.
var DefaultIdentity = wire.NewSet(
DefaultAccountName,
DefaultAccountKey,
NewCredential,
wire.Bind(new(azblob.Credential), new(*azblob.SharedKeyCredential)),
wire.Value(azblob.PipelineOptions{}),
)
// SASTokenIdentity is a Wire provider set that provides an Azure storage
// account name, SASToken, and anonymous credential from environment variables.
var SASTokenIdentity = wire.NewSet(
DefaultAccountName,
DefaultSASToken,
azblob.NewAnonymousCredential,
wire.Value(azblob.PipelineOptions{}),
)
// AccountName is an Azure storage account name.
type AccountName string
// AccountKey is an Azure storage account key (primary or secondary).
type AccountKey string
// SASToken is an Azure shared access signature.
// https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1
type SASToken string
// StorageDomain is an Azure Cloud Environment domain name to target
// (i.e. blob.core.windows.net, blob.core.govcloudapi.net, blob.core.chinacloudapi.cn).
// It is read from the AZURE_STORAGE_DOMAIN environment variable.
type StorageDomain string
// Protocol is an protocol to access Azure Blob Storage.
// It must be "http" or "https".
// It is read from the AZURE_STORAGE_PROTOCOL environment variable.
type Protocol string
// DefaultAccountName loads the Azure storage account name from the
// AZURE_STORAGE_ACCOUNT environment variable.
func DefaultAccountName() (AccountName, error) {
s := os.Getenv("AZURE_STORAGE_ACCOUNT")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_ACCOUNT not set")
}
return AccountName(s), nil
}
// DefaultAccountKey loads the Azure storage account key (primary or secondary)
// from the AZURE_STORAGE_KEY environment variable.
func DefaultAccountKey() (AccountKey, error) {
s := os.Getenv("AZURE_STORAGE_KEY")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_KEY not set")
}
return AccountKey(s), nil
}
// DefaultSASToken loads a Azure SAS token from the AZURE_STORAGE_SAS_TOKEN
// environment variable.
func DefaultSASToken() (SASToken, error) {
s := os.Getenv("AZURE_STORAGE_SAS_TOKEN")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_SAS_TOKEN not set")
}
return SASToken(s), nil
}
// DefaultStorageDomain loads the desired Azure Cloud to target from
// the AZURE_STORAGE_DOMAIN environment variable.
func DefaultStorageDomain() (StorageDomain, error) {
s := os.Getenv("AZURE_STORAGE_DOMAIN")
return StorageDomain(s), nil
}
// DefaultProtocol loads the protocol to access Azure Blob Storage from the
// AZURE_STORAGE_PROTOCOL environment variable.
func DefaultProtocol() (Protocol, error) {
s := os.Getenv("AZURE_STORAGE_PROTOCOL")
return Protocol(s), nil
}
// NewCredential creates a SharedKeyCredential.
func NewCredential(accountName AccountName, accountKey AccountKey) (*azblob.SharedKeyCredential, error) {
return azblob.NewSharedKeyCredential(string(accountName), string(accountKey))
}
// NewPipeline creates a Pipeline for making HTTP requests to Azure.
func NewPipeline(credential azblob.Credential, opts azblob.PipelineOptions) pipeline.Pipeline {
opts.Telemetry.Value = useragent.AzureUserAgentPrefix("blob") + opts.Telemetry.Value
return azblob.NewPipeline(credential, opts)
}
// bucket represents a Azure Storage Account Container, which handles read,
// write and delete operations on objects within it.
// See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction.
type bucket struct {
name string
pageMarkers map[string]azblob.Marker
serviceURL *azblob.ServiceURL
containerURL azblob.ContainerURL
opts *Options
}
// OpenBucket returns a *blob.Bucket backed by Azure Storage Account. See the package
// documentation for an example and
// https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob
// for more details.
func OpenBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*blob.Bucket, error) {
b, err := openBucket(ctx, pipeline, accountName, containerName, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(b), nil
}
func openBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*bucket, error) {
if pipeline == nil {
return nil, errors.New("azureblob.OpenBucket: pipeline is required")
}
if accountName == "" {
return nil, errors.New("azureblob.OpenBucket: accountName is required")
}
if containerName == "" {
return nil, errors.New("azureblob.OpenBucket: containerName is required")
}
if opts == nil {
opts = &Options{}
}
if opts.StorageDomain == "" {
// If opts.StorageDomain is missing, use default domain.
opts.StorageDomain = "blob.core.windows.net"
}
switch opts.Protocol {
case "":
// If opts.Protocol is missing, use "https".
opts.Protocol = "https"
case "https", "http":
default:
return nil, errors.New("azureblob.OpenBucket: protocol must be http or https")
}
d := string(opts.StorageDomain)
var u string
// The URL structure of the local emulator is a bit different from the real one.
if strings.HasPrefix(d, "127.0.0.1") || strings.HasPrefix(d, "localhost") {
u = fmt.Sprintf("%s://%s/%s", opts.Protocol, opts.StorageDomain, accountName) // http://127.0.0.1:10000/devstoreaccount1
} else {
u = fmt.Sprintf("%s://%s.%s", opts.Protocol, accountName, opts.StorageDomain) // https://myaccount.blob.core.windows.net
}
blobURL, err := url.Parse(u)
if err != nil {
return nil, err
}
if opts.SASToken != "" {
// The Azure portal includes a leading "?" for the SASToken, which we
// don't want here.
blobURL.RawQuery = strings.TrimPrefix(string(opts.SASToken), "?")
}
serviceURL := azblob.NewServiceURL(*blobURL, pipeline)
return &bucket{
name: containerName,
pageMarkers: map[string]azblob.Marker{},
serviceURL: &serviceURL,
containerURL: serviceURL.NewContainerURL(containerName),
opts: opts,
}, nil
}
// Close implements driver.Close.
func (b *bucket) Close() error {
return nil
}
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
dstKey = escapeKey(dstKey, false)
dstBlobURL := b.containerURL.NewBlobURL(dstKey)
srcKey = escapeKey(srcKey, false)
srcURL := b.containerURL.NewBlobURL(srcKey).URL()
md := azblob.Metadata{}
mac := azblob.ModifiedAccessConditions{}
bac := azblob.BlobAccessConditions{}
at := azblob.AccessTierNone
btm := azblob.BlobTagsMap{}
if opts.BeforeCopy != nil {
asFunc := func(i interface{}) bool {
switch v := i.(type) {
case *azblob.Metadata:
*v = md
return true
case **azblob.ModifiedAccessConditions:
*v = &mac
return true
case **azblob.BlobAccessConditions:
*v = &bac
return true
}
return false
}
if err := opts.BeforeCopy(asFunc); err != nil {
return err
}
}
resp, err := dstBlobURL.StartCopyFromURL(ctx, srcURL, md, mac, bac, at, btm)
if err != nil {
return err
}
copyStatus := resp.CopyStatus()
nErrors := 0
for copyStatus == azblob.CopyStatusPending {
// Poll until the copy is complete.
time.Sleep(500 * time.Millisecond)
propertiesResp, err := dstBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
if err != nil {
// A GetProperties failure may be transient, so allow a couple
// of them before giving up.
nErrors++
if ctx.Err() != nil || nErrors == 3 {
return err
}
}
copyStatus = propertiesResp.CopyStatus()
}
if copyStatus != azblob.CopyStatusSuccess {
return fmt.Errorf("Copy failed with status: %s", copyStatus)
}
return nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
_, err := blockBlobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
return err
}
// reader reads an azblob. It implements io.ReadCloser.
type reader struct {
body io.ReadCloser
attrs driver.ReaderAttributes
raw *azblob.DownloadResponse
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) Attributes() *driver.ReaderAttributes {
return &r.attrs
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(*azblob.DownloadResponse)
if !ok {
return false
}
*p = *r.raw
return true
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
blockBlobURLp := &blockBlobURL
accessConditions := &azblob.BlobAccessConditions{}
end := length
if end < 0 {
end = azblob.CountToEnd
}
if opts.BeforeRead != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**azblob.BlockBlobURL); ok {
*p = blockBlobURLp
return true
}
if p, ok := i.(**azblob.BlobAccessConditions); ok {
*p = accessConditions
return true
}
return false
}
if err := opts.BeforeRead(asFunc); err != nil {
return nil, err
}
}
blobDownloadResponse, err := blockBlobURLp.Download(ctx, offset, end, *accessConditions, false)
if err != nil {
return nil, err
}
attrs := driver.ReaderAttributes{
ContentType: blobDownloadResponse.ContentType(),
Size: getSize(blobDownloadResponse.ContentLength(), blobDownloadResponse.ContentRange()),
ModTime: blobDownloadResponse.LastModified(),
}
var body io.ReadCloser
if length == 0 {
body = http.NoBody
} else {
body = blobDownloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: defaultMaxDownloadRetryRequests})
}
return &reader{
body: body,
attrs: attrs,
raw: blobDownloadResponse,
}, nil
}
func getSize(contentLength int64, contentRange string) int64 {
// Default size to ContentLength, but that's incorrect for partial-length reads,
// where ContentLength refers to the size of the returned Body, not the entire
// size of the blob. ContentRange has the full size.
size := contentLength
if contentRange != "" {
// Sample: bytes 10-14/27 (where 27 is the full size).
parts := strings.Split(contentRange, "/")
if len(parts) == 2 {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
size = i
}
}
}
return size
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**azblob.ContainerURL)
if !ok {
return false
}
*p = &b.containerURL
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
switch v := err.(type) {
case azblob.StorageError:
if p, ok := i.(*azblob.StorageError); ok {
*p = v
return true
}
}
return false
}
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
serr, ok := err.(azblob.StorageError)
switch {
case !ok:
// This happens with an invalid storage account name; the host
// is something like invalidstorageaccount.blob.core.windows.net.
if strings.Contains(err.Error(), "no such host") {
return gcerrors.NotFound
}
return gcerrors.Unknown
case serr.ServiceCode() == azblob.ServiceCodeBlobNotFound || serr.Response().StatusCode == 404:
// Check and fail both the SDK ServiceCode and the Http Response Code for NotFound
return gcerrors.NotFound
case serr.ServiceCode() == azblob.ServiceCodeAuthenticationFailed:
return gcerrors.PermissionDenied
default:
return gcerrors.Unknown
}
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
blobPropertiesResponse, err := blockBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
if err != nil {
return nil, err
}
azureMD := blobPropertiesResponse.NewMetadata()
md := make(map[string]string, len(azureMD))
for k, v := range azureMD {
// See the package comments for more details on escaping of metadata
// keys & values.
md[escape.HexUnescape(k)] = escape.URLUnescape(v)
}
return &driver.Attributes{
CacheControl: blobPropertiesResponse.CacheControl(),
ContentDisposition: blobPropertiesResponse.ContentDisposition(),
ContentEncoding: blobPropertiesResponse.ContentEncoding(),
ContentLanguage: blobPropertiesResponse.ContentLanguage(),
ContentType: blobPropertiesResponse.ContentType(),
Size: blobPropertiesResponse.ContentLength(),
CreateTime: blobPropertiesResponse.CreationTime(),
ModTime: blobPropertiesResponse.LastModified(),
MD5: blobPropertiesResponse.ContentMD5(),
ETag: fmt.Sprintf("%v", blobPropertiesResponse.ETag()),
Metadata: md,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobGetPropertiesResponse)
if !ok {
return false
}
*p = *blobPropertiesResponse
return true
},
}, nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
marker := azblob.Marker{}
if len(opts.PageToken) > 0 {
if m, ok := b.pageMarkers[string(opts.PageToken)]; ok {
marker = m
}
}
azOpts := azblob.ListBlobsSegmentOptions{
MaxResults: int32(pageSize),
Prefix: escapeKey(opts.Prefix, true),
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**azblob.ListBlobsSegmentOptions)
if !ok {
return false
}
*p = &azOpts
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
listBlob, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, escapeKey(opts.Delimiter, true), azOpts)
if err != nil {
return nil, err
}
page := &driver.ListPage{}
page.Objects = []*driver.ListObject{}
for _, blobPrefix := range listBlob.Segment.BlobPrefixes {
page.Objects = append(page.Objects, &driver.ListObject{
Key: unescapeKey(blobPrefix.Name),
Size: 0,
IsDir: true,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobPrefix)
if !ok {
return false
}
*p = blobPrefix
return true
}})
}
for _, blobInfo := range listBlob.Segment.BlobItems {
page.Objects = append(page.Objects, &driver.ListObject{
Key: unescapeKey(blobInfo.Name),
ModTime: blobInfo.Properties.LastModified,
Size: *blobInfo.Properties.ContentLength,
MD5: blobInfo.Properties.ContentMD5,
IsDir: false,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobItemInternal)
if !ok {
return false
}
*p = blobInfo
return true
},
})
}
if listBlob.NextMarker.NotDone() {
token := uuid.New().String()
b.pageMarkers[token] = listBlob.NextMarker
page.NextPageToken = []byte(token)
}
if len(listBlob.Segment.BlobPrefixes) > 0 && len(listBlob.Segment.BlobItems) > 0 {
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
return page, nil
}
// SignedURL implements driver.SignedURL.
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
if b.opts.Credential == nil {
return "", gcerr.New(gcerr.Unimplemented, nil, 1, "azureblob: to use SignedURL, you must call OpenBucket with a non-nil Options.Credential")
}
if opts.ContentType != "" || opts.EnforceAbsentContentType {
return "", gcerr.New(gcerr.Unimplemented, nil, 1, "azureblob: does not enforce Content-Type on PUT")
}
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
srcBlobParts := azblob.NewBlobURLParts(blockBlobURL.URL())
perms := azblob.BlobSASPermissions{}
switch opts.Method {
case http.MethodGet:
perms.Read = true
case http.MethodPut:
perms.Create = true
perms.Write = true
case http.MethodDelete:
perms.Delete = true
default:
return "", fmt.Errorf("unsupported Method %s", opts.Method)
}
signVals := &azblob.BlobSASSignatureValues{
Protocol: azblob.SASProtocolHTTPS,
ExpiryTime: time.Now().UTC().Add(opts.Expiry),
ContainerName: b.name,
BlobName: srcBlobParts.BlobName,
Permissions: perms.String(),
}
if opts.BeforeSign != nil {
asFunc := func(i interface{}) bool {
v, ok := i.(**azblob.BlobSASSignatureValues)
if ok {
*v = signVals
}
return ok
}
if err := opts.BeforeSign(asFunc); err != nil {
return "", err
}
}
var err error
if srcBlobParts.SAS, err = signVals.NewSASQueryParameters(b.opts.Credential); err != nil {
return "", err
}
srcBlobURLWithSAS := srcBlobParts.URL()
return srcBlobURLWithSAS.String(), nil
}
type writer struct {
ctx context.Context
blockBlobURL *azblob.BlockBlobURL
uploadOpts *azblob.UploadStreamToBlockBlobOptions
w *io.PipeWriter
donec chan struct{}
err error
}
// escapeKey does all required escaping for UTF-8 strings to work with Azure.
// isPrefix indicates whether the key is a full key, or a prefix/delimiter.
func escapeKey(key string, isPrefix bool) string {
return escape.HexEscape(key, func(r []rune, i int) bool {
c := r[i]
switch {
// Azure does not work well with backslashes in blob names.
case c == '\\':
return true
// Azure doesn't handle these characters (determined via experimentation).
case c < 32 || c == 127:
return true
// Escape trailing "/" for full keys, otherwise Azure can't address them
// consistently.
case !isPrefix && i == len(key)-1 && c == '/':
return true
// For "../", escape the trailing slash.
case i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return escape.HexUnescape(key)
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
if opts.BufferSize == 0 {
opts.BufferSize = defaultUploadBlockSize
}
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
// See the package comments for more details on escaping of metadata
// keys & values.
e := escape.HexEscape(k, func(runes []rune, i int) bool {
c := runes[i]
switch {
case i == 0 && c >= '0' && c <= '9':
return true
case escape.IsASCIIAlphanumeric(c):
return false
case c == '_':
return false
}
return true
})
if _, ok := md[e]; ok {
return nil, fmt.Errorf("duplicate keys after escaping: %q => %q", k, e)
}
md[e] = escape.URLEscape(v)
}
uploadOpts := &azblob.UploadStreamToBlockBlobOptions{
BufferSize: opts.BufferSize,
MaxBuffers: defaultUploadBuffers,
Metadata: md,
BlobHTTPHeaders: azblob.BlobHTTPHeaders{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentMD5: opts.ContentMD5,
ContentType: contentType,
},
}
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**azblob.UploadStreamToBlockBlobOptions)
if !ok {
return false
}
*p = uploadOpts
return true
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
return &writer{
ctx: ctx,
blockBlobURL: &blockBlobURL,
uploadOpts: uploadOpts,
donec: make(chan struct{}),
}, nil
}
// Write appends p to w. User must call Close to close the w after done writing.
func (w *writer) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if w.w == nil {
pr, pw := io.Pipe()
w.w = pw
if err := w.open(pr); err != nil {
return 0, err
}
}
return w.w.Write(p)
}
func (w *writer) open(pr *io.PipeReader) error {
go func() {
defer close(w.donec)
var body io.Reader
if pr == nil {
body = http.NoBody
} else {
body = pr
}
_, w.err = azblob.UploadStreamToBlockBlob(w.ctx, body, *w.blockBlobURL, *w.uploadOpts)
if w.err != nil {
if pr != nil {
pr.CloseWithError(w.err)
}
return
}
}()
return nil
}
// Close completes the writer and closes it. Any error occurring during write will
// be returned. If a writer is closed before any Write is called, Close will
// create an empty file at the given key.
func (w *writer) Close() error {
if w.w == nil {
w.open(nil)
} else if err := w.w.Close(); err != nil {
return err
}
<-w.donec
return w.err
}
| 1 | 20,259 | Nit: missing closing ".". | google-go-cloud | go |
@@ -35,3 +35,6 @@ def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
+
+def testCannotClickDisabledButton(driver):
+ WebDriverWait(driver, 3).until(EC.element_to_be_unclickable(By.ID, "disabled-button")) | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture(autouse=True)
def loadPage(pages):
pages.load("clicks.html")
def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
driver.find_element(By.ID, "overflowLink").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
| 1 | 16,343 | I believe it's misleading name for the condition. I prefer "element_to_be_disable" We can have a condition, when element is enabled but we can't click it, because another element overlays above it. So, If we use "unclickable" we might mislead people, who use that condition to verify if element can be clicked | SeleniumHQ-selenium | js |
@@ -123,9 +123,9 @@ func CreatePipeline(pipelineName string, provider Provider, stageNames []string)
}, nil
}
-// Marshal serializes the pipeline manifest object into byte array that
+// MarshalBinary serializes the pipeline manifest object into byte array that
// represents the pipeline.yml document.
-func (m *PipelineManifest) Marshal() ([]byte, error) {
+func (m *PipelineManifest) MarshalBinary() ([]byte, error) {
box := templates.Box()
content, err := box.FindString("cicd/pipeline.yml")
if err != nil { | 1 | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"errors"
"fmt"
"text/template"
"github.com/fatih/structs"
"gopkg.in/yaml.v3"
"github.com/aws/amazon-ecs-cli-v2/templates"
)
const (
GithubProviderName = "GitHub"
GithubSecretIdKeyName = "access_token_secret"
)
// Provider defines a source of the artifacts
// that will be built and deployed via a pipeline
type Provider interface {
fmt.Stringer
Name() string
Properties() map[string]interface{}
}
type githubProvider struct {
properties *GitHubProperties
}
func (p *githubProvider) Name() string {
return GithubProviderName
}
func (p *githubProvider) String() string {
return GithubProviderName
}
func (p *githubProvider) Properties() map[string]interface{} {
return structs.Map(p.properties)
}
// GitHubProperties contain information for configuring a Github
// source provider.
type GitHubProperties struct {
// use tag from https://godoc.org/github.com/fatih/structs#example-Map--Tags
// to specify the name of the field in the output properties
// An example for OwnerAndRepository would be: "aws/amazon-ecs-cli-v2"
OwnerAndRepository string `structs:"repository" yaml:"repository"`
Branch string `structs:"branch" yaml:"branch"`
GithubSecretIdKeyName string `structs:"access_token_secret" yaml:"access_token_secret` // TODO fix naming
}
// NewProvider creates a source provider based on the type of
// the provided provider-specific configurations
func NewProvider(configs interface{}) (Provider, error) {
switch props := configs.(type) {
case *GitHubProperties:
return &githubProvider{
properties: props,
}, nil
default:
return nil, &ErrUnknownProvider{unknownProviderProperties: props}
}
}
// PipelineSchemaMajorVersion is the major version number
// of the pipeline manifest schema
type PipelineSchemaMajorVersion int
const (
// Ver1 is the current schema major version of the pipeline.yml file.
Ver1 PipelineSchemaMajorVersion = iota + 1
)
// PipelineManifest contains information that defines the relationship
// and deployment ordering of your environments.
type PipelineManifest struct {
// Name of the pipeline
Name string `yaml:"name"`
Version PipelineSchemaMajorVersion `yaml:"version"`
Source *Source `yaml:"source"`
Stages []PipelineStage `yaml:"stages"`
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
ProviderName string `yaml:"provider"`
Properties map[string]interface{} `yaml:"properties"`
}
// PipelineStage represents a stage in the pipeline manifest
type PipelineStage struct {
Name string `yaml:"name`
}
// CreatePipeline returns a pipeline manifest object.
func CreatePipeline(pipelineName string, provider Provider, stageNames []string) (*PipelineManifest, error) {
// TODO: #221 Do more validations
if len(stageNames) == 0 {
return nil, fmt.Errorf("a pipeline %s can not be created without a deployment stage",
pipelineName)
}
stages := make([]PipelineStage, 0, len(stageNames))
for _, name := range stageNames {
stages = append(stages, PipelineStage{Name: name})
}
return &PipelineManifest{
Name: pipelineName,
Version: Ver1,
Source: &Source{
ProviderName: provider.Name(),
Properties: provider.Properties(),
},
Stages: stages,
}, nil
}
// Marshal serializes the pipeline manifest object into byte array that
// represents the pipeline.yml document.
func (m *PipelineManifest) Marshal() ([]byte, error) {
box := templates.Box()
content, err := box.FindString("cicd/pipeline.yml")
if err != nil {
return nil, err
}
tpl, err := template.New("pipelineTemplate").Parse(content)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := tpl.Execute(&buf, *m); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalPipeline deserializes the YAML input stream into a pipeline
// manifest object. It returns an error if any issue occurs during
// deserialization or the YAML input contains invalid fields.
func UnmarshalPipeline(in []byte) (*PipelineManifest, error) {
pm := PipelineManifest{}
err := yaml.Unmarshal(in, &pm)
if err != nil {
return nil, err
}
var version PipelineSchemaMajorVersion
if version, err = validateVersion(&pm); err != nil {
return nil, err
}
// TODO: #221 Do more validations
switch version {
case Ver1:
return &pm, nil
}
// we should never reach here, this is just to make the compiler happy
return nil, errors.New("unexpected error occurs while unmarshalling pipeline.yml")
}
func validateVersion(pm *PipelineManifest) (PipelineSchemaMajorVersion, error) {
switch pm.Version {
case Ver1:
return Ver1, nil
default:
return pm.Version,
&ErrInvalidPipelineManifestVersion{
invalidVersion: pm.Version,
}
}
}
| 1 | 11,995 | Same here. Should this be pipeline YAML file? Like `MarshalPipelineManifest` | aws-copilot-cli | go |
@@ -213,6 +213,9 @@ func (i *Instance) Restart(newCaddyfile Input) (*Instance, error) {
}
i.Stop()
+ // Execute instantiation events
+ EmitEvent(InstanceStartupEvent, newInst)
+
log.Println("[INFO] Reloading complete")
return newInst, nil | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package caddy implements the Caddy server manager.
//
// To use this package:
//
// 1. Set the AppName and AppVersion variables.
// 2. Call LoadCaddyfile() to get the Caddyfile.
// Pass in the name of the server type (like "http").
// Make sure the server type's package is imported
// (import _ "github.com/mholt/caddy/caddyhttp").
// 3. Call caddy.Start() to start Caddy. You get back
// an Instance, on which you can call Restart() to
// restart it or Stop() to stop it.
//
// You should call Wait() on your instance to wait for
// all servers to quit before your process exits.
package caddy
import (
"bytes"
"encoding/gob"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/mholt/caddy/caddyfile"
)
// Configurable application parameters
var (
// AppName is the name of the application.
AppName string
// AppVersion is the version of the application.
AppVersion string
// Quiet mode will not show any informative output on initialization.
Quiet bool
// PidFile is the path to the pidfile to create.
PidFile string
// GracefulTimeout is the maximum duration of a graceful shutdown.
GracefulTimeout time.Duration
// isUpgrade will be set to true if this process
// was started as part of an upgrade, where a parent
// Caddy process started this one.
isUpgrade = os.Getenv("CADDY__UPGRADE") == "1"
// started will be set to true when the first
// instance is started; it never gets set to
// false after that.
started bool
// mu protects the variables 'isUpgrade' and 'started'.
mu sync.Mutex
)
// Instance contains the state of servers created as a result of
// calling Start and can be used to access or control those servers.
type Instance struct {
// serverType is the name of the instance's server type
serverType string
// caddyfileInput is the input configuration text used for this process
caddyfileInput Input
// wg is used to wait for all servers to shut down
wg *sync.WaitGroup
// context is the context created for this instance.
context Context
// servers is the list of servers with their listeners.
servers []ServerListener
// these callbacks execute when certain events occur
onFirstStartup []func() error // starting, not as part of a restart
onStartup []func() error // starting, even as part of a restart
onRestart []func() error // before restart commences
onShutdown []func() error // stopping, even as part of a restart
onFinalShutdown []func() error // stopping, not as part of a restart
}
// Servers returns the ServerListeners in i.
func (i *Instance) Servers() []ServerListener { return i.servers }
// Stop stops all servers contained in i. It does NOT
// execute shutdown callbacks.
func (i *Instance) Stop() error {
// stop the servers
for _, s := range i.servers {
if gs, ok := s.server.(GracefulServer); ok {
if err := gs.Stop(); err != nil {
log.Printf("[ERROR] Stopping %s: %v", gs.Address(), err)
}
}
}
// splice i out of instance list, causing it to be garbage-collected
instancesMu.Lock()
for j, other := range instances {
if other == i {
instances = append(instances[:j], instances[j+1:]...)
break
}
}
instancesMu.Unlock()
return nil
}
// ShutdownCallbacks executes all the shutdown callbacks of i,
// including ones that are scheduled only for the final shutdown
// of i. An error returned from one does not stop execution of
// the rest. All the non-nil errors will be returned.
func (i *Instance) ShutdownCallbacks() []error {
var errs []error
for _, shutdownFunc := range i.onShutdown {
err := shutdownFunc()
if err != nil {
errs = append(errs, err)
}
}
for _, finalShutdownFunc := range i.onFinalShutdown {
err := finalShutdownFunc()
if err != nil {
errs = append(errs, err)
}
}
return errs
}
// Restart replaces the servers in i with new servers created from
// executing the newCaddyfile. Upon success, it returns the new
// instance to replace i. Upon failure, i will not be replaced.
func (i *Instance) Restart(newCaddyfile Input) (*Instance, error) {
log.Println("[INFO] Reloading")
i.wg.Add(1)
defer i.wg.Done()
// run restart callbacks
for _, fn := range i.onRestart {
err := fn()
if err != nil {
return i, err
}
}
if newCaddyfile == nil {
newCaddyfile = i.caddyfileInput
}
// Add file descriptors of all the sockets that are capable of it
restartFds := make(map[string]restartTriple)
for _, s := range i.servers {
gs, srvOk := s.server.(GracefulServer)
ln, lnOk := s.listener.(Listener)
pc, pcOk := s.packet.(PacketConn)
if srvOk {
if lnOk && pcOk {
restartFds[gs.Address()] = restartTriple{server: gs, listener: ln, packet: pc}
continue
}
if lnOk {
restartFds[gs.Address()] = restartTriple{server: gs, listener: ln}
continue
}
if pcOk {
restartFds[gs.Address()] = restartTriple{server: gs, packet: pc}
continue
}
}
}
// create new instance; if the restart fails, it is simply discarded
newInst := &Instance{serverType: newCaddyfile.ServerType(), wg: i.wg}
// attempt to start new instance
err := startWithListenerFds(newCaddyfile, newInst, restartFds)
if err != nil {
return i, err
}
// success! stop the old instance
for _, shutdownFunc := range i.onShutdown {
err := shutdownFunc()
if err != nil {
return i, err
}
}
i.Stop()
log.Println("[INFO] Reloading complete")
return newInst, nil
}
// SaveServer adds s and its associated listener ln to the
// internally-kept list of servers that is running. For
// saved servers, graceful restarts will be provided.
func (i *Instance) SaveServer(s Server, ln net.Listener) {
i.servers = append(i.servers, ServerListener{server: s, listener: ln})
}
// HasListenerWithAddress returns whether this package is
// tracking a server using a listener with the address
// addr.
func HasListenerWithAddress(addr string) bool {
instancesMu.Lock()
defer instancesMu.Unlock()
for _, inst := range instances {
for _, sln := range inst.servers {
if listenerAddrEqual(sln.listener, addr) {
return true
}
}
}
return false
}
// listenerAddrEqual compares a listener's address with
// addr. Extra care is taken to match addresses with an
// empty hostname portion, as listeners tend to report
// [::]:80, for example, when the matching address that
// created the listener might be simply :80.
func listenerAddrEqual(ln net.Listener, addr string) bool {
lnAddr := ln.Addr().String()
hostname, port, err := net.SplitHostPort(addr)
if err != nil {
return lnAddr == addr
}
if lnAddr == net.JoinHostPort("::", port) {
return true
}
if lnAddr == net.JoinHostPort("0.0.0.0", port) {
return true
}
return hostname != "" && lnAddr == addr
}
// TCPServer is a type that can listen and serve connections.
// A TCPServer must associate with exactly zero or one net.Listeners.
type TCPServer interface {
// Listen starts listening by creating a new listener
// and returning it. It does not start accepting
// connections. For UDP-only servers, this method
// can be a no-op that returns (nil, nil).
Listen() (net.Listener, error)
// Serve starts serving using the provided listener.
// Serve must start the server loop nearly immediately,
// or at least not return any errors before the server
// loop begins. Serve blocks indefinitely, or in other
// words, until the server is stopped. For UDP-only
// servers, this method can be a no-op that returns nil.
Serve(net.Listener) error
}
// UDPServer is a type that can listen and serve packets.
// A UDPServer must associate with exactly zero or one net.PacketConns.
type UDPServer interface {
// ListenPacket starts listening by creating a new packetconn
// and returning it. It does not start accepting connections.
// TCP-only servers may leave this method blank and return
// (nil, nil).
ListenPacket() (net.PacketConn, error)
// ServePacket starts serving using the provided packetconn.
// ServePacket must start the server loop nearly immediately,
// or at least not return any errors before the server
// loop begins. ServePacket blocks indefinitely, or in other
// words, until the server is stopped. For TCP-only servers,
// this method can be a no-op that returns nil.
ServePacket(net.PacketConn) error
}
// Server is a type that can listen and serve. It supports both
// TCP and UDP, although the UDPServer interface can be used
// for more than just UDP.
//
// If the server uses TCP, it should implement TCPServer completely.
// If it uses UDP or some other protocol, it should implement
// UDPServer completely. If it uses both, both interfaces should be
// fully implemented. Any unimplemented methods should be made as
// no-ops that simply return nil values.
type Server interface {
TCPServer
UDPServer
}
// Stopper is a type that can stop serving. The stop
// does not necessarily have to be graceful.
type Stopper interface {
// Stop stops the server. It blocks until the
// server is completely stopped.
Stop() error
}
// GracefulServer is a Server and Stopper, the stopping
// of which is graceful (whatever that means for the kind
// of server being implemented). It must be able to return
// the address it is configured to listen on so that its
// listener can be paired with it upon graceful restarts.
// The net.Listener that a GracefulServer creates must
// implement the Listener interface for restarts to be
// graceful (assuming the listener is for TCP).
type GracefulServer interface {
Server
Stopper
// Address returns the address the server should
// listen on; it is used to pair the server to
// its listener during a graceful/zero-downtime
// restart. Thus when implementing this method,
// you must not access a listener to get the
// address; you must store the address the
// server is to serve on some other way.
Address() string
}
// Listener is a net.Listener with an underlying file descriptor.
// A server's listener should implement this interface if it is
// to support zero-downtime reloads.
type Listener interface {
net.Listener
File() (*os.File, error)
}
// PacketConn is a net.PacketConn with an underlying file descriptor.
// A server's packetconn should implement this interface if it is
// to support zero-downtime reloads (in sofar this holds true for datagram
// connections).
type PacketConn interface {
net.PacketConn
File() (*os.File, error)
}
// AfterStartup is an interface that can be implemented
// by a server type that wants to run some code after all
// servers for the same Instance have started.
type AfterStartup interface {
OnStartupComplete()
}
// LoadCaddyfile loads a Caddyfile by calling the plugged in
// Caddyfile loader methods. An error is returned if more than
// one loader returns a non-nil Caddyfile input. If no loaders
// load a Caddyfile, the default loader is used. If no default
// loader is registered or it returns nil, the server type's
// default Caddyfile is loaded. If the server type does not
// specify any default Caddyfile value, then an empty Caddyfile
// is returned. Consequently, this function never returns a nil
// value as long as there are no errors.
func LoadCaddyfile(serverType string) (Input, error) {
// If we are finishing an upgrade, we must obtain the Caddyfile
// from our parent process, regardless of configured loaders.
if IsUpgrade() {
err := gob.NewDecoder(os.Stdin).Decode(&loadedGob)
if err != nil {
return nil, err
}
return loadedGob.Caddyfile, nil
}
// Ask plugged-in loaders for a Caddyfile
cdyfile, err := loadCaddyfileInput(serverType)
if err != nil {
return nil, err
}
// Otherwise revert to default
if cdyfile == nil {
cdyfile = DefaultInput(serverType)
}
// Still nil? Geez.
if cdyfile == nil {
cdyfile = CaddyfileInput{ServerTypeName: serverType}
}
return cdyfile, nil
}
// Wait blocks until all of i's servers have stopped.
func (i *Instance) Wait() {
i.wg.Wait()
}
// CaddyfileFromPipe loads the Caddyfile input from f if f is
// not interactive input. f is assumed to be a pipe or stream,
// such as os.Stdin. If f is not a pipe, no error is returned
// but the Input value will be nil. An error is only returned
// if there was an error reading the pipe, even if the length
// of what was read is 0.
func CaddyfileFromPipe(f *os.File, serverType string) (Input, error) {
fi, err := f.Stat()
if err == nil && fi.Mode()&os.ModeCharDevice == 0 {
// Note that a non-nil error is not a problem. Windows
// will not create a stdin if there is no pipe, which
// produces an error when calling Stat(). But Unix will
// make one either way, which is why we also check that
// bitmask.
// NOTE: Reading from stdin after this fails (e.g. for the let's encrypt email address) (OS X)
confBody, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return CaddyfileInput{
Contents: confBody,
Filepath: f.Name(),
ServerTypeName: serverType,
}, nil
}
// not having input from the pipe is not itself an error,
// just means no input to return.
return nil, nil
}
// Caddyfile returns the Caddyfile used to create i.
func (i *Instance) Caddyfile() Input {
return i.caddyfileInput
}
// Start starts Caddy with the given Caddyfile.
//
// This function blocks until all the servers are listening.
func Start(cdyfile Input) (*Instance, error) {
inst := &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
err := startWithListenerFds(cdyfile, inst, nil)
if err != nil {
return inst, err
}
signalSuccessToParent()
if pidErr := writePidFile(); pidErr != nil {
log.Printf("[ERROR] Could not write pidfile: %v", pidErr)
}
return inst, nil
}
func startWithListenerFds(cdyfile Input, inst *Instance, restartFds map[string]restartTriple) error {
if cdyfile == nil {
cdyfile = CaddyfileInput{}
}
err := ValidateAndExecuteDirectives(cdyfile, inst, false)
if err != nil {
return err
}
slist, err := inst.context.MakeServers()
if err != nil {
return err
}
// run startup callbacks
if !IsUpgrade() && restartFds == nil {
// first startup means not a restart or upgrade
for _, firstStartupFunc := range inst.onFirstStartup {
err := firstStartupFunc()
if err != nil {
return err
}
}
}
for _, startupFunc := range inst.onStartup {
err := startupFunc()
if err != nil {
return err
}
}
err = startServers(slist, inst, restartFds)
if err != nil {
return err
}
instancesMu.Lock()
instances = append(instances, inst)
instancesMu.Unlock()
// run any AfterStartup callbacks if this is not
// part of a restart; then show file descriptor notice
if restartFds == nil {
for _, srvln := range inst.servers {
if srv, ok := srvln.server.(AfterStartup); ok {
srv.OnStartupComplete()
}
}
if !Quiet {
for _, srvln := range inst.servers {
if !IsLoopback(srvln.listener.Addr().String()) {
checkFdlimit()
break
}
}
}
}
mu.Lock()
started = true
mu.Unlock()
return nil
}
// ValidateAndExecuteDirectives will load the server blocks from cdyfile
// by parsing it, then execute the directives configured by it and store
// the resulting server blocks into inst. If justValidate is true, parse
// callbacks will not be executed between directives, since the purpose
// is only to check the input for valid syntax.
func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bool) error {
// If parsing only inst will be nil, create an instance for this function call only.
if justValidate {
inst = &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
}
stypeName := cdyfile.ServerType()
stype, err := getServerType(stypeName)
if err != nil {
return err
}
inst.caddyfileInput = cdyfile
sblocks, err := loadServerBlocks(stypeName, cdyfile.Path(), bytes.NewReader(cdyfile.Body()))
if err != nil {
return err
}
inst.context = stype.NewContext()
if inst.context == nil {
return fmt.Errorf("server type %s produced a nil Context", stypeName)
}
sblocks, err = inst.context.InspectServerBlocks(cdyfile.Path(), sblocks)
if err != nil {
return err
}
err = executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
if err != nil {
return err
}
return nil
}
func executeDirectives(inst *Instance, filename string,
directives []string, sblocks []caddyfile.ServerBlock, justValidate bool) error {
// map of server block ID to map of directive name to whatever.
storages := make(map[int]map[string]interface{})
// It is crucial that directives are executed in the proper order.
// We loop with the directives on the outer loop so we execute
// a directive for all server blocks before going to the next directive.
// This is important mainly due to the parsing callbacks (below).
for _, dir := range directives {
for i, sb := range sblocks {
var once sync.Once
if _, ok := storages[i]; !ok {
storages[i] = make(map[string]interface{})
}
for j, key := range sb.Keys {
// Execute directive if it is in the server block
if tokens, ok := sb.Tokens[dir]; ok {
controller := &Controller{
instance: inst,
Key: key,
Dispenser: caddyfile.NewDispenserTokens(filename, tokens),
OncePerServerBlock: func(f func() error) error {
var err error
once.Do(func() {
err = f()
})
return err
},
ServerBlockIndex: i,
ServerBlockKeyIndex: j,
ServerBlockKeys: sb.Keys,
ServerBlockStorage: storages[i][dir],
}
setup, err := DirectiveAction(inst.serverType, dir)
if err != nil {
return err
}
err = setup(controller)
if err != nil {
return err
}
storages[i][dir] = controller.ServerBlockStorage // persist for this server block
}
}
}
if !justValidate {
// See if there are any callbacks to execute after this directive
if allCallbacks, ok := parsingCallbacks[inst.serverType]; ok {
callbacks := allCallbacks[dir]
for _, callback := range callbacks {
if err := callback(inst.context); err != nil {
return err
}
}
}
}
}
return nil
}
func startServers(serverList []Server, inst *Instance, restartFds map[string]restartTriple) error {
errChan := make(chan error, len(serverList))
for _, s := range serverList {
var (
ln net.Listener
pc net.PacketConn
err error
)
// if performing an upgrade, obtain listener file descriptors
// from parent process
if IsUpgrade() {
if gs, ok := s.(GracefulServer); ok {
addr := gs.Address()
if fdIndex, ok := loadedGob.ListenerFds["tcp"+addr]; ok {
file := os.NewFile(fdIndex, "")
ln, err = net.FileListener(file)
file.Close()
if err != nil {
return err
}
}
if fdIndex, ok := loadedGob.ListenerFds["udp"+addr]; ok {
file := os.NewFile(fdIndex, "")
pc, err = net.FilePacketConn(file)
file.Close()
if err != nil {
return err
}
}
}
}
// If this is a reload and s is a GracefulServer,
// reuse the listener for a graceful restart.
if gs, ok := s.(GracefulServer); ok && restartFds != nil {
addr := gs.Address()
if old, ok := restartFds[addr]; ok {
// listener
if old.listener != nil {
file, err := old.listener.File()
if err != nil {
return err
}
ln, err = net.FileListener(file)
if err != nil {
return err
}
file.Close()
}
// packetconn
if old.packet != nil {
file, err := old.packet.File()
if err != nil {
return err
}
pc, err = net.FilePacketConn(file)
if err != nil {
return err
}
file.Close()
}
}
}
if ln == nil {
ln, err = s.Listen()
if err != nil {
return err
}
}
if pc == nil {
pc, err = s.ListenPacket()
if err != nil {
return err
}
}
inst.wg.Add(2)
go func(s Server, ln net.Listener, pc net.PacketConn, inst *Instance) {
defer inst.wg.Done()
go func() {
errChan <- s.Serve(ln)
defer inst.wg.Done()
}()
errChan <- s.ServePacket(pc)
}(s, ln, pc, inst)
inst.servers = append(inst.servers, ServerListener{server: s, listener: ln, packet: pc})
}
// Log errors that may be returned from Serve() calls,
// these errors should only be occurring in the server loop.
go func() {
for err := range errChan {
if err == nil {
continue
}
if strings.Contains(err.Error(), "use of closed network connection") {
// this error is normal when closing the listener
continue
}
log.Println(err)
}
}()
return nil
}
func getServerType(serverType string) (ServerType, error) {
stype, ok := serverTypes[serverType]
if ok {
return stype, nil
}
if len(serverTypes) == 0 {
return ServerType{}, fmt.Errorf("no server types plugged in")
}
if serverType == "" {
if len(serverTypes) == 1 {
for _, stype := range serverTypes {
return stype, nil
}
}
return ServerType{}, fmt.Errorf("multiple server types available; must choose one")
}
return ServerType{}, fmt.Errorf("unknown server type '%s'", serverType)
}
func loadServerBlocks(serverType, filename string, input io.Reader) ([]caddyfile.ServerBlock, error) {
validDirectives := ValidDirectives(serverType)
serverBlocks, err := caddyfile.Parse(filename, input, validDirectives)
if err != nil {
return nil, err
}
if len(serverBlocks) == 0 && serverTypes[serverType].DefaultInput != nil {
newInput := serverTypes[serverType].DefaultInput()
serverBlocks, err = caddyfile.Parse(newInput.Path(),
bytes.NewReader(newInput.Body()), validDirectives)
if err != nil {
return nil, err
}
}
return serverBlocks, nil
}
// Stop stops ALL servers. It blocks until they are all stopped.
// It does NOT execute shutdown callbacks, and it deletes all
// instances after stopping is completed. Do not re-use any
// references to old instances after calling Stop.
func Stop() error {
// This awkward for loop is to avoid a deadlock since
// inst.Stop() also acquires the instancesMu lock.
for {
instancesMu.Lock()
if len(instances) == 0 {
break
}
inst := instances[0]
instancesMu.Unlock()
if err := inst.Stop(); err != nil {
log.Printf("[ERROR] Stopping %s: %v", inst.serverType, err)
}
}
return nil
}
// IsLoopback returns true if the hostname of addr looks
// explicitly like a common local hostname. addr must only
// be a host or a host:port combination.
func IsLoopback(addr string) bool {
host, _, err := net.SplitHostPort(addr)
if err != nil {
host = addr // happens if the addr is just a hostname
}
return host == "localhost" ||
strings.Trim(host, "[]") == "::1" ||
strings.HasPrefix(host, "127.")
}
// IsInternal returns true if the IP of addr
// belongs to a private network IP range. addr must only
// be an IP or an IP:port combination.
// Loopback addresses are considered false.
func IsInternal(addr string) bool {
privateNetworks := []string{
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"fc00::/7",
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
host = addr // happens if the addr is just a hostname, missing port
// if we encounter an error, the brackets need to be stripped
// because SplitHostPort didn't do it for us
host = strings.Trim(host, "[]")
}
ip := net.ParseIP(host)
if ip == nil {
return false
}
for _, privateNetwork := range privateNetworks {
_, ipnet, _ := net.ParseCIDR(privateNetwork)
if ipnet.Contains(ip) {
return true
}
}
return false
}
// Started returns true if at least one instance has been
// started by this package. It never gets reset to false
// once it is set to true.
func Started() bool {
mu.Lock()
defer mu.Unlock()
return started
}
// CaddyfileInput represents a Caddyfile as input
// and is simply a convenient way to implement
// the Input interface.
type CaddyfileInput struct {
Filepath string
Contents []byte
ServerTypeName string
}
// Body returns c.Contents.
func (c CaddyfileInput) Body() []byte { return c.Contents }
// Path returns c.Filepath.
func (c CaddyfileInput) Path() string { return c.Filepath }
// ServerType returns c.ServerType.
func (c CaddyfileInput) ServerType() string { return c.ServerTypeName }
// Input represents a Caddyfile; its contents and file path
// (which should include the file name at the end of the path).
// If path does not apply (e.g. piped input) you may use
// any understandable value. The path is mainly used for logging,
// error messages, and debugging.
type Input interface {
// Gets the Caddyfile contents
Body() []byte
// Gets the path to the origin file
Path() string
// The type of server this input is intended for
ServerType() string
}
// DefaultInput returns the default Caddyfile input
// to use when it is otherwise empty or missing.
// It uses the default host and port (depends on
// host, e.g. localhost is 2015, otherwise 443) and
// root.
func DefaultInput(serverType string) Input {
if _, ok := serverTypes[serverType]; !ok {
return nil
}
if serverTypes[serverType].DefaultInput == nil {
return nil
}
return serverTypes[serverType].DefaultInput()
}
// writePidFile writes the process ID to the file at PidFile.
// It does nothing if PidFile is not set.
func writePidFile() error {
if PidFile == "" {
return nil
}
pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
return ioutil.WriteFile(PidFile, pid, 0644)
}
type restartTriple struct {
server GracefulServer
listener Listener
packet PacketConn
}
var (
// instances is the list of running Instances.
instances []*Instance
// instancesMu protects instances.
instancesMu sync.Mutex
)
var (
// DefaultConfigFile is the name of the configuration file that is loaded
// by default if no other file is specified.
DefaultConfigFile = "Caddyfile"
)
// CtxKey is a value type for use with context.WithValue.
type CtxKey string
| 1 | 11,481 | I will add this to my PR | caddyserver-caddy | go |
@@ -19,4 +19,8 @@ class RolePolicy < ApplicationPolicy
def destroy?
@role.plan.owned_by?(@user.id)
end
+
+ def archive?
+ @role.user_id = @user.id
+ end
end | 1 | class RolePolicy < ApplicationPolicy
attr_reader :user
attr_reader :role
def initialize(user, role)
raise Pundit::NotAuthorizedError, "must be logged in" unless user
@user = user
@role = role
end
def create?
@role.plan.administerable_by?(@user.id)
end
def update?
@role.plan.administerable_by?(@user.id)
end
def destroy?
@role.plan.owned_by?(@user.id)
end
end | 1 | 16,817 | Again not 100% sold on the name | DMPRoadmap-roadmap | rb |
@@ -226,8 +226,9 @@ func runWeb(ctx *cli.Context) error {
m.Group("/user/settings", func() {
m.Get("", user.Settings)
m.Post("", bindIgnErr(auth.UpdateProfileForm{}), user.SettingsPost)
- m.Post("/avatar", binding.MultipartForm(auth.UploadAvatarForm{}), user.SettingsAvatar)
+ m.Post("/avatar", binding.MultipartForm(auth.AvatarForm{}), user.SettingsAvatarPost)
m.Post("/avatar/delete", user.SettingsDeleteAvatar)
+ m.Get("/avatar", user.SettingsAvatar)
m.Combo("/email").Get(user.SettingsEmails).
Post(bindIgnErr(auth.AddEmailForm{}), user.SettingsEmailPost)
m.Post("/email/delete", user.DeleteEmail) | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/http"
"net/http/fcgi"
"os"
"path"
"strings"
"github.com/codegangsta/cli"
"github.com/go-macaron/binding"
"github.com/go-macaron/cache"
"github.com/go-macaron/captcha"
"github.com/go-macaron/csrf"
"github.com/go-macaron/gzip"
"github.com/go-macaron/i18n"
"github.com/go-macaron/session"
"github.com/go-macaron/toolbox"
"github.com/go-xorm/xorm"
"github.com/mcuadros/go-version"
"gopkg.in/ini.v1"
"gopkg.in/macaron.v1"
"github.com/gogits/git-module"
"github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/bindata"
"github.com/gogits/gogs/modules/context"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
"github.com/gogits/gogs/modules/template"
"github.com/gogits/gogs/routers"
"github.com/gogits/gogs/routers/admin"
apiv1 "github.com/gogits/gogs/routers/api/v1"
"github.com/gogits/gogs/routers/dev"
"github.com/gogits/gogs/routers/org"
"github.com/gogits/gogs/routers/repo"
"github.com/gogits/gogs/routers/user"
)
var CmdWeb = cli.Command{
Name: "web",
Usage: "Start Gogs web server",
Description: `Gogs web server is the only thing you need to run,
and it takes care of all the other things for you`,
Action: runWeb,
Flags: []cli.Flag{
stringFlag("port, p", "3000", "Temporary port number to prevent conflict"),
stringFlag("config, c", "custom/conf/app.ini", "Custom configuration file path"),
},
}
type VerChecker struct {
ImportPath string
Version func() string
Expected string
}
// checkVersion checks if binary matches the version of templates files.
func checkVersion() {
// Templates.
data, err := ioutil.ReadFile(setting.StaticRootPath + "/templates/.VERSION")
if err != nil {
log.Fatal(4, "Fail to read 'templates/.VERSION': %v", err)
}
if string(data) != setting.AppVer {
log.Fatal(4, "Binary and template file version does not match, did you forget to recompile?")
}
// Check dependency version.
checkers := []VerChecker{
{"github.com/go-xorm/xorm", func() string { return xorm.Version }, "0.5.5"},
{"github.com/go-macaron/binding", binding.Version, "0.3.2"},
{"github.com/go-macaron/cache", cache.Version, "0.1.2"},
{"github.com/go-macaron/csrf", csrf.Version, "0.1.0"},
{"github.com/go-macaron/i18n", i18n.Version, "0.3.0"},
{"github.com/go-macaron/session", session.Version, "0.1.6"},
{"github.com/go-macaron/toolbox", toolbox.Version, "0.1.0"},
{"gopkg.in/ini.v1", ini.Version, "1.8.4"},
{"gopkg.in/macaron.v1", macaron.Version, "1.1.4"},
{"github.com/gogits/git-module", git.Version, "0.3.3"},
{"github.com/gogits/go-gogs-client", gogs.Version, "0.10.1"},
}
for _, c := range checkers {
if !version.Compare(c.Version(), c.Expected, ">=") {
log.Fatal(4, `Dependency outdated!
Package '%s' current version (%s) is below requirement (%s),
please use following command to update this package and recompile Gogs:
go get -u %[1]s`, c.ImportPath, c.Version(), c.Expected)
}
}
}
// newMacaron initializes Macaron instance.
func newMacaron() *macaron.Macaron {
m := macaron.New()
if !setting.DisableRouterLog {
m.Use(macaron.Logger())
}
m.Use(macaron.Recovery())
if setting.EnableGzip {
m.Use(gzip.Gziper())
}
if setting.Protocol == setting.FCGI {
m.SetURLPrefix(setting.AppSubUrl)
}
m.Use(macaron.Static(
path.Join(setting.StaticRootPath, "public"),
macaron.StaticOptions{
SkipLogging: setting.DisableRouterLog,
},
))
m.Use(macaron.Static(
setting.AvatarUploadPath,
macaron.StaticOptions{
Prefix: "avatars",
SkipLogging: setting.DisableRouterLog,
},
))
funcMap := template.NewFuncMap()
m.Use(macaron.Renderer(macaron.RenderOptions{
Directory: path.Join(setting.StaticRootPath, "templates"),
AppendDirectories: []string{path.Join(setting.CustomPath, "templates")},
Funcs: funcMap,
IndentJSON: macaron.Env != macaron.PROD,
}))
models.InitMailRender(path.Join(setting.StaticRootPath, "templates/mail"),
path.Join(setting.CustomPath, "templates/mail"), funcMap)
localeNames, err := bindata.AssetDir("conf/locale")
if err != nil {
log.Fatal(4, "Fail to list locale files: %v", err)
}
localFiles := make(map[string][]byte)
for _, name := range localeNames {
localFiles[name] = bindata.MustAsset("conf/locale/" + name)
}
m.Use(i18n.I18n(i18n.Options{
SubURL: setting.AppSubUrl,
Files: localFiles,
CustomDirectory: path.Join(setting.CustomPath, "conf/locale"),
Langs: setting.Langs,
Names: setting.Names,
DefaultLang: "en-US",
Redirect: true,
}))
m.Use(cache.Cacher(cache.Options{
Adapter: setting.CacheAdapter,
AdapterConfig: setting.CacheConn,
Interval: setting.CacheInternal,
}))
m.Use(captcha.Captchaer(captcha.Options{
SubURL: setting.AppSubUrl,
}))
m.Use(session.Sessioner(setting.SessionConfig))
m.Use(csrf.Csrfer(csrf.Options{
Secret: setting.SecretKey,
Cookie: setting.CSRFCookieName,
SetCookie: true,
Header: "X-Csrf-Token",
CookiePath: setting.AppSubUrl,
}))
m.Use(toolbox.Toolboxer(m, toolbox.Options{
HealthCheckFuncs: []*toolbox.HealthCheckFuncDesc{
&toolbox.HealthCheckFuncDesc{
Desc: "Database connection",
Func: models.Ping,
},
},
}))
m.Use(context.Contexter())
return m
}
func runWeb(ctx *cli.Context) error {
if ctx.IsSet("config") {
setting.CustomConf = ctx.String("config")
}
routers.GlobalInit()
checkVersion()
m := newMacaron()
reqSignIn := context.Toggle(&context.ToggleOptions{SignInRequired: true})
ignSignIn := context.Toggle(&context.ToggleOptions{SignInRequired: setting.Service.RequireSignInView})
ignSignInAndCsrf := context.Toggle(&context.ToggleOptions{DisableCSRF: true})
reqSignOut := context.Toggle(&context.ToggleOptions{SignOutRequired: true})
bindIgnErr := binding.BindIgnErr
// FIXME: not all routes need go through same middlewares.
// Especially some AJAX requests, we can reduce middleware number to improve performance.
// Routers.
m.Get("/", ignSignIn, routers.Home)
m.Group("/explore", func() {
m.Get("", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubUrl + "/explore/repos")
})
m.Get("/repos", routers.ExploreRepos)
m.Get("/users", routers.ExploreUsers)
}, ignSignIn)
m.Combo("/install", routers.InstallInit).Get(routers.Install).
Post(bindIgnErr(auth.InstallForm{}), routers.InstallPost)
m.Get("/^:type(issues|pulls)$", reqSignIn, user.Issues)
// ***** START: User *****
m.Group("/user", func() {
m.Get("/login", user.SignIn)
m.Post("/login", bindIgnErr(auth.SignInForm{}), user.SignInPost)
m.Get("/sign_up", user.SignUp)
m.Post("/sign_up", bindIgnErr(auth.RegisterForm{}), user.SignUpPost)
m.Get("/reset_password", user.ResetPasswd)
m.Post("/reset_password", user.ResetPasswdPost)
}, reqSignOut)
m.Group("/user/settings", func() {
m.Get("", user.Settings)
m.Post("", bindIgnErr(auth.UpdateProfileForm{}), user.SettingsPost)
m.Post("/avatar", binding.MultipartForm(auth.UploadAvatarForm{}), user.SettingsAvatar)
m.Post("/avatar/delete", user.SettingsDeleteAvatar)
m.Combo("/email").Get(user.SettingsEmails).
Post(bindIgnErr(auth.AddEmailForm{}), user.SettingsEmailPost)
m.Post("/email/delete", user.DeleteEmail)
m.Get("/password", user.SettingsPassword)
m.Post("/password", bindIgnErr(auth.ChangePasswordForm{}), user.SettingsPasswordPost)
m.Combo("/ssh").Get(user.SettingsSSHKeys).
Post(bindIgnErr(auth.AddSSHKeyForm{}), user.SettingsSSHKeysPost)
m.Post("/ssh/delete", user.DeleteSSHKey)
m.Combo("/applications").Get(user.SettingsApplications).
Post(bindIgnErr(auth.NewAccessTokenForm{}), user.SettingsApplicationsPost)
m.Post("/applications/delete", user.SettingsDeleteApplication)
m.Route("/delete", "GET,POST", user.SettingsDelete)
}, reqSignIn, func(ctx *context.Context) {
ctx.Data["PageIsUserSettings"] = true
})
m.Group("/user", func() {
// r.Get("/feeds", binding.Bind(auth.FeedsForm{}), user.Feeds)
m.Any("/activate", user.Activate)
m.Any("/activate_email", user.ActivateEmail)
m.Get("/email2user", user.Email2User)
m.Get("/forget_password", user.ForgotPasswd)
m.Post("/forget_password", user.ForgotPasswdPost)
m.Get("/logout", user.SignOut)
})
// ***** END: User *****
adminReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, AdminRequired: true})
// ***** START: Admin *****
m.Group("/admin", func() {
m.Get("", adminReq, admin.Dashboard)
m.Get("/config", admin.Config)
m.Post("/config/test_mail", admin.SendTestMail)
m.Get("/monitor", admin.Monitor)
m.Group("/users", func() {
m.Get("", admin.Users)
m.Combo("/new").Get(admin.NewUser).Post(bindIgnErr(auth.AdminCrateUserForm{}), admin.NewUserPost)
m.Combo("/:userid").Get(admin.EditUser).Post(bindIgnErr(auth.AdminEditUserForm{}), admin.EditUserPost)
m.Post("/:userid/delete", admin.DeleteUser)
})
m.Group("/orgs", func() {
m.Get("", admin.Organizations)
})
m.Group("/repos", func() {
m.Get("", admin.Repos)
m.Post("/delete", admin.DeleteRepo)
})
m.Group("/auths", func() {
m.Get("", admin.Authentications)
m.Combo("/new").Get(admin.NewAuthSource).Post(bindIgnErr(auth.AuthenticationForm{}), admin.NewAuthSourcePost)
m.Combo("/:authid").Get(admin.EditAuthSource).
Post(bindIgnErr(auth.AuthenticationForm{}), admin.EditAuthSourcePost)
m.Post("/:authid/delete", admin.DeleteAuthSource)
})
m.Group("/notices", func() {
m.Get("", admin.Notices)
m.Post("/delete", admin.DeleteNotices)
m.Get("/empty", admin.EmptyNotices)
})
}, adminReq)
// ***** END: Admin *****
m.Group("", func() {
m.Group("/:username", func() {
m.Get("", user.Profile)
m.Get("/followers", user.Followers)
m.Get("/following", user.Following)
m.Get("/stars", user.Stars)
})
m.Get("/attachments/:uuid", func(ctx *context.Context) {
attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
if err != nil {
if models.IsErrAttachmentNotExist(err) {
ctx.Error(404)
} else {
ctx.Handle(500, "GetAttachmentByUUID", err)
}
return
}
fr, err := os.Open(attach.LocalPath())
if err != nil {
ctx.Handle(500, "Open", err)
return
}
defer fr.Close()
ctx.Header().Set("Cache-Control", "public,max-age=86400")
// Fix #312. Attachments with , in their name are not handled correctly by Google Chrome.
// We must put the name in " manually.
if err = repo.ServeData(ctx, "\""+attach.Name+"\"", fr); err != nil {
ctx.Handle(500, "ServeData", err)
return
}
})
m.Post("/issues/attachments", repo.UploadIssueAttachment)
}, ignSignIn)
m.Group("/:username", func() {
m.Get("/action/:action", user.Action)
}, reqSignIn)
if macaron.Env == macaron.DEV {
m.Get("/template/*", dev.TemplatePreview)
}
reqRepoAdmin := context.RequireRepoAdmin()
reqRepoWriter := context.RequireRepoWriter()
// ***** START: Organization *****
m.Group("/org", func() {
m.Get("/create", org.Create)
m.Post("/create", bindIgnErr(auth.CreateOrgForm{}), org.CreatePost)
m.Group("/:org", func() {
m.Get("/dashboard", user.Dashboard)
m.Get("/^:type(issues|pulls)$", user.Issues)
m.Get("/members", org.Members)
m.Get("/members/action/:action", org.MembersAction)
m.Get("/teams", org.Teams)
}, context.OrgAssignment(true))
m.Group("/:org", func() {
m.Get("/teams/:team", org.TeamMembers)
m.Get("/teams/:team/repositories", org.TeamRepositories)
m.Route("/teams/:team/action/:action", "GET,POST", org.TeamsAction)
m.Route("/teams/:team/action/repo/:action", "GET,POST", org.TeamsRepoAction)
}, context.OrgAssignment(true, false, true))
m.Group("/:org", func() {
m.Get("/teams/new", org.NewTeam)
m.Post("/teams/new", bindIgnErr(auth.CreateTeamForm{}), org.NewTeamPost)
m.Get("/teams/:team/edit", org.EditTeam)
m.Post("/teams/:team/edit", bindIgnErr(auth.CreateTeamForm{}), org.EditTeamPost)
m.Post("/teams/:team/delete", org.DeleteTeam)
m.Group("/settings", func() {
m.Combo("").Get(org.Settings).
Post(bindIgnErr(auth.UpdateOrgSettingForm{}), org.SettingsPost)
m.Post("/avatar", binding.MultipartForm(auth.UploadAvatarForm{}), org.SettingsAvatar)
m.Post("/avatar/delete", org.SettingsDeleteAvatar)
m.Group("/hooks", func() {
m.Get("", org.Webhooks)
m.Post("/delete", org.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gogs/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/gogs/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
})
m.Route("/delete", "GET,POST", org.SettingsDelete)
})
m.Route("/invitations/new", "GET,POST", org.Invitation)
}, context.OrgAssignment(true, true))
}, reqSignIn)
// ***** END: Organization *****
// ***** START: Repository *****
m.Group("/repo", func() {
m.Get("/create", repo.Create)
m.Post("/create", bindIgnErr(auth.CreateRepoForm{}), repo.CreatePost)
m.Get("/migrate", repo.Migrate)
m.Post("/migrate", bindIgnErr(auth.MigrateRepoForm{}), repo.MigratePost)
m.Combo("/fork/:repoid").Get(repo.Fork).
Post(bindIgnErr(auth.CreateRepoForm{}), repo.ForkPost)
}, reqSignIn)
m.Group("/:username/:reponame", func() {
m.Group("/settings", func() {
m.Combo("").Get(repo.Settings).
Post(bindIgnErr(auth.RepoSettingForm{}), repo.SettingsPost)
m.Group("/collaboration", func() {
m.Combo("").Get(repo.Collaboration).Post(repo.CollaborationPost)
m.Post("/access_mode", repo.ChangeCollaborationAccessMode)
m.Post("/delete", repo.DeleteCollaboration)
})
m.Group("/hooks", func() {
m.Get("", repo.Webhooks)
m.Post("/delete", repo.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gogs/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/:id/test", repo.TestWebhook)
m.Post("/gogs/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
m.Group("/git", func() {
m.Get("", repo.GitHooks)
m.Combo("/:name").Get(repo.GitHooksEdit).
Post(repo.GitHooksEditPost)
}, context.GitHookService())
})
m.Group("/keys", func() {
m.Combo("").Get(repo.DeployKeys).
Post(bindIgnErr(auth.AddSSHKeyForm{}), repo.DeployKeysPost)
m.Post("/delete", repo.DeleteDeployKey)
})
}, func(ctx *context.Context) {
ctx.Data["PageIsSettings"] = true
})
}, reqSignIn, context.RepoAssignment(), reqRepoAdmin, context.RepoRef())
m.Get("/:username/:reponame/action/:action", reqSignIn, context.RepoAssignment(), repo.Action)
m.Group("/:username/:reponame", func() {
m.Group("/issues", func() {
m.Combo("/new").Get(context.RepoRef(), repo.NewIssue).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.NewIssuePost)
m.Combo("/:index/comments").Post(bindIgnErr(auth.CreateCommentForm{}), repo.NewComment)
m.Group("/:index", func() {
m.Post("/label", repo.UpdateIssueLabel)
m.Post("/milestone", repo.UpdateIssueMilestone)
m.Post("/assignee", repo.UpdateIssueAssignee)
}, reqRepoWriter)
m.Group("/:index", func() {
m.Post("/title", repo.UpdateIssueTitle)
m.Post("/content", repo.UpdateIssueContent)
})
}, repo.MustEnableIssues)
m.Group("/comments/:id", func() {
m.Post("", repo.UpdateCommentContent)
m.Post("/delete", repo.DeleteComment)
})
m.Group("/labels", func() {
m.Post("/new", bindIgnErr(auth.CreateLabelForm{}), repo.NewLabel)
m.Post("/edit", bindIgnErr(auth.CreateLabelForm{}), repo.UpdateLabel)
m.Post("/delete", repo.DeleteLabel)
}, repo.MustEnableIssues, reqRepoWriter, context.RepoRef())
m.Group("/milestones", func() {
m.Combo("/new").Get(repo.NewMilestone).
Post(bindIgnErr(auth.CreateMilestoneForm{}), repo.NewMilestonePost)
m.Get("/:id/edit", repo.EditMilestone)
m.Post("/:id/edit", bindIgnErr(auth.CreateMilestoneForm{}), repo.EditMilestonePost)
m.Get("/:id/:action", repo.ChangeMilestonStatus)
m.Post("/delete", repo.DeleteMilestone)
}, repo.MustEnableIssues, reqRepoWriter, context.RepoRef())
m.Group("/releases", func() {
m.Get("/new", repo.NewRelease)
m.Post("/new", bindIgnErr(auth.NewReleaseForm{}), repo.NewReleasePost)
m.Get("/edit/:tagname", repo.EditRelease)
m.Post("/edit/:tagname", bindIgnErr(auth.EditReleaseForm{}), repo.EditReleasePost)
m.Post("/delete", repo.DeleteRelease)
}, reqRepoWriter, context.RepoRef())
m.Combo("/compare/*", repo.MustAllowPulls).Get(repo.CompareAndPullRequest).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.CompareAndPullRequestPost)
}, reqSignIn, context.RepoAssignment(), repo.MustBeNotBare)
m.Group("/:username/:reponame", func() {
m.Group("", func() {
m.Get("/releases", repo.Releases)
m.Get("/^:type(issues|pulls)$", repo.RetrieveLabels, repo.Issues)
m.Get("/^:type(issues|pulls)$/:index", repo.ViewIssue)
m.Get("/labels/", repo.RetrieveLabels, repo.Labels)
m.Get("/milestones", repo.Milestones)
}, context.RepoRef())
// m.Get("/branches", repo.Branches)
m.Group("/wiki", func() {
m.Get("/?:page", repo.Wiki)
m.Get("/_pages", repo.WikiPages)
m.Group("", func() {
m.Combo("/_new").Get(repo.NewWiki).
Post(bindIgnErr(auth.NewWikiForm{}), repo.NewWikiPost)
m.Combo("/:page/_edit").Get(repo.EditWiki).
Post(bindIgnErr(auth.NewWikiForm{}), repo.EditWikiPost)
m.Post("/:page/delete", repo.DeleteWikiPagePost)
}, reqSignIn, reqRepoWriter)
}, repo.MustEnableWiki, context.RepoRef())
m.Get("/archive/*", repo.Download)
m.Group("/pulls/:index", func() {
m.Get("/commits", context.RepoRef(), repo.ViewPullCommits)
m.Get("/files", context.RepoRef(), repo.ViewPullFiles)
m.Post("/merge", reqRepoWriter, repo.MergePullRequest)
}, repo.MustAllowPulls)
m.Group("", func() {
m.Get("/src/*", repo.Home)
m.Get("/raw/*", repo.SingleDownload)
m.Get("/commits/*", repo.RefCommits)
m.Get("/commit/:sha([a-z0-9]{40})$", repo.Diff)
m.Get("/forks", repo.Forks)
}, context.RepoRef())
m.Get("/commit/:sha([a-z0-9]{40})\\.:ext(patch|diff)", repo.RawDiff)
m.Get("/compare/:before([a-z0-9]{40})\\.\\.\\.:after([a-z0-9]{40})", repo.CompareDiff)
}, ignSignIn, context.RepoAssignment(), repo.MustBeNotBare)
m.Group("/:username/:reponame", func() {
m.Get("/stars", repo.Stars)
m.Get("/watchers", repo.Watchers)
}, ignSignIn, context.RepoAssignment(), context.RepoRef())
m.Group("/:username", func() {
m.Group("/:reponame", func() {
m.Get("", repo.Home)
m.Get("\\.git$", repo.Home)
}, ignSignIn, context.RepoAssignment(true), context.RepoRef())
m.Group("/:reponame", func() {
m.Any("/*", ignSignInAndCsrf, repo.HTTP)
m.Head("/tasks/trigger", repo.TriggerTask)
})
})
// ***** END: Repository *****
m.Group("/api", func() {
apiv1.RegisterRoutes(m)
}, ignSignIn)
// robots.txt
m.Get("/robots.txt", func(ctx *context.Context) {
if setting.HasRobotsTxt {
ctx.ServeFileContent(path.Join(setting.CustomPath, "robots.txt"))
} else {
ctx.Error(404)
}
})
// Not found handler.
m.NotFound(routers.NotFound)
// Flag for port number in case first time run conflict.
if ctx.IsSet("port") {
setting.AppUrl = strings.Replace(setting.AppUrl, setting.HttpPort, ctx.String("port"), 1)
setting.HttpPort = ctx.String("port")
}
var err error
listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl)
switch setting.Protocol {
case setting.HTTP:
err = http.ListenAndServe(listenAddr, m)
case setting.HTTPS:
server := &http.Server{Addr: listenAddr, TLSConfig: &tls.Config{MinVersion: tls.VersionTLS10}, Handler: m}
err = server.ListenAndServeTLS(setting.CertFile, setting.KeyFile)
case setting.FCGI:
err = fcgi.Serve(nil, m)
default:
log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
}
if err != nil {
log.Fatal(4, "Fail to start server: %v", err)
}
return nil
}
| 1 | 11,577 | Use `m.Combo` for `Get` and `Post` methods. | gogs-gogs | go |
@@ -446,6 +446,13 @@ class DownloadItem(QObject):
# The file already exists, so ask the user if it should be
# overwritten.
self._ask_overwrite_question()
+ # FIFO, device node, etc. Don't even try.
+ elif (os.path.exists(self._filename) and not
+ os.path.isdir(self._filename)):
+ self.cancel(False)
+ message.error(self._win_id, "The file {} already exists, and is a "
+ "special file. Aborting.".format(
+ self._filename))
else:
self._create_fileobj()
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Download manager."""
import io
import os
import sys
import os.path
import shutil
import functools
import collections
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QTimer,
Qt, QVariant, QAbstractListModel, QModelIndex, QUrl)
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkRequest, QNetworkReply
# We need this import so PyQt can use it inside pyqtSlot
from PyQt5.QtWebKitWidgets import QWebPage # pylint: disable=unused-import
from qutebrowser.config import config
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.utils import (message, usertypes, log, utils, urlutils,
objreg, standarddir, qtutils)
from qutebrowser.browser import http
from qutebrowser.browser.network import networkmanager
ModelRole = usertypes.enum('ModelRole', ['item'], start=Qt.UserRole,
is_int=True)
RetryInfo = collections.namedtuple('RetryInfo', ['request', 'manager'])
def _download_dir():
"""Get the download directory to use."""
directory = config.get('storage', 'download-directory')
if directory is None:
directory = standarddir.download()
return directory
def _path_suggestion(filename):
"""Get the suggested file path.
Args:
filename: The filename to use if included in the suggestion.
"""
suggestion = config.get('completion', 'download-path-suggestion')
if suggestion == 'path':
# add trailing '/' if not present
return os.path.join(_download_dir(), '')
elif suggestion == 'filename':
return filename
elif suggestion == 'both':
return os.path.join(_download_dir(), filename)
else:
raise ValueError("Invalid suggestion value {}!".format(suggestion))
class DownloadItemStats(QObject):
"""Statistics (bytes done, total bytes, time, etc.) about a download.
Class attributes:
SPEED_REFRESH_INTERVAL: How often to refresh the speed, in msec.
SPEED_AVG_WINDOW: How many seconds of speed data to average to
estimate the remaining time.
Attributes:
done: How many bytes there are already downloaded.
total: The total count of bytes. None if the total is unknown.
speed: The current download speed, in bytes per second.
_speed_avg: A rolling average of speeds.
_last_done: The count of bytes which where downloaded when calculating
the speed the last time.
"""
SPEED_REFRESH_INTERVAL = 500
SPEED_AVG_WINDOW = 30
updated = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.total = None
self.done = 0
self.speed = 0
self._last_done = 0
samples = int(self.SPEED_AVG_WINDOW *
(1000 / self.SPEED_REFRESH_INTERVAL))
self._speed_avg = collections.deque(maxlen=samples)
self.timer = usertypes.Timer(self, 'speed_refresh')
self.timer.timeout.connect(self._update_speed)
self.timer.setInterval(self.SPEED_REFRESH_INTERVAL)
self.timer.start()
@pyqtSlot()
def _update_speed(self):
"""Recalculate the current download speed."""
delta = self.done - self._last_done
self.speed = delta * 1000 / self.SPEED_REFRESH_INTERVAL
self._speed_avg.append(self.speed)
self._last_done = self.done
self.updated.emit()
def finish(self):
"""Set the download stats as finished."""
self.timer.stop()
self.done = self.total
def percentage(self):
"""The current download percentage, or None if unknown."""
if self.total == 0 or self.total is None:
return None
else:
return 100 * self.done / self.total
def remaining_time(self):
"""The remaining download time in seconds, or None."""
if self.total is None or not self._speed_avg:
# No average yet or we don't know the total size.
return None
remaining_bytes = self.total - self.done
avg = sum(self._speed_avg) / len(self._speed_avg)
if avg == 0:
# Download stalled
return None
else:
return remaining_bytes / avg
@pyqtSlot(int, int)
def on_download_progress(self, bytes_done, bytes_total):
"""Update local variables when the download progress changed.
Args:
bytes_done: How many bytes are downloaded.
bytes_total: How many bytes there are to download in total.
"""
if bytes_total == -1:
bytes_total = None
self.done = bytes_done
self.total = bytes_total
class DownloadItem(QObject):
"""A single download currently running.
There are multiple ways the data can flow from the QNetworkReply to the
disk.
If the filename/file object is known immediately when starting the
download, QNetworkReply's readyRead writes to the target file directly.
If not, readyRead is ignored and with self._read_timer we periodically read
into the self._buffer BytesIO slowly, so some broken servers don't close
our connection.
As soon as we know the file object, we copy self._buffer over and the next
readyRead will write to the real file object.
Class attributes:
MAX_REDIRECTS: The maximum redirection count.
Attributes:
done: Whether the download is finished.
stats: A DownloadItemStats object.
index: The index of the download in the view.
successful: Whether the download has completed successfully.
error_msg: The current error message, or None
autoclose: Whether to close the associated file if the download is
done.
fileobj: The file object to download the file to.
reply: The QNetworkReply associated with this download.
retry_info: A RetryInfo instance.
_filename: The filename of the download.
_redirects: How many time we were redirected already.
_buffer: A BytesIO object to buffer incoming data until we know the
target file.
_read_timer: A Timer which reads the QNetworkReply into self._buffer
periodically.
_win_id: The window ID the DownloadItem runs in.
Signals:
data_changed: The downloads metadata changed.
finished: The download was finished.
cancelled: The download was cancelled.
error: An error with the download occurred.
arg: The error message as string.
redirected: Signal emitted when a download was redirected.
arg 0: The new QNetworkRequest.
arg 1: The old QNetworkReply.
do_retry: Emitted when a download is retried.
arg 0: The new DownloadItem
"""
MAX_REDIRECTS = 10
data_changed = pyqtSignal()
finished = pyqtSignal()
error = pyqtSignal(str)
cancelled = pyqtSignal()
redirected = pyqtSignal(QNetworkRequest, QNetworkReply)
do_retry = pyqtSignal(object) # DownloadItem
def __init__(self, reply, win_id, parent=None):
"""Constructor.
Args:
reply: The QNetworkReply to download.
"""
super().__init__(parent)
self.retry_info = None
self.done = False
self.stats = DownloadItemStats(self)
self.stats.updated.connect(self.data_changed)
self.index = 0
self.autoclose = True
self.reply = None
self._buffer = io.BytesIO()
self._read_timer = usertypes.Timer(self, name='download-read-timer')
self._read_timer.setInterval(500)
self._read_timer.timeout.connect(self.on_read_timer_timeout)
self._redirects = 0
self.error_msg = None
self.basename = '???'
self.successful = False
self.fileobj = None
self._filename = None
self.init_reply(reply)
self._win_id = win_id
def __repr__(self):
return utils.get_repr(self, basename=self.basename)
def __str__(self):
"""Get the download as a string.
Example: foo.pdf [699.2kB/s|0.34|16%|4.253/25.124]
"""
speed = utils.format_size(self.stats.speed, suffix='B/s')
down = utils.format_size(self.stats.done, suffix='B')
perc = self.stats.percentage()
remaining = self.stats.remaining_time()
if self.error_msg is None:
errmsg = ""
else:
errmsg = " - {}".format(self.error_msg)
if all(e is None for e in (perc, remaining, self.stats.total)):
return ('{index}: {name} [{speed:>10}|{down}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
down=down, errmsg=errmsg))
if perc is None:
perc = '??'
else:
perc = round(perc)
if remaining is None:
remaining = '?'
else:
remaining = utils.format_seconds(remaining)
total = utils.format_size(self.stats.total, suffix='B')
if self.done:
return ('{index}: {name} [{perc:>2}%|{total}]{errmsg}'.format(
index=self.index, name=self.basename, perc=perc,
total=total, errmsg=errmsg))
else:
return ('{index}: {name} [{speed:>10}|{remaining:>5}|{perc:>2}%|'
'{down}/{total}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
remaining=remaining, perc=perc, down=down,
total=total, errmsg=errmsg))
def _create_fileobj(self):
"""Create a file object using the internal filename."""
try:
fileobj = open(self._filename, 'wb')
except OSError as e:
self._die(e.strerror)
else:
self.set_fileobj(fileobj)
def _ask_overwrite_question(self):
"""Create a Question object to be asked."""
q = usertypes.Question(self)
q.text = self._filename + " already exists. Overwrite? (y/n)"
q.mode = usertypes.PromptMode.yesno
q.answered_yes.connect(self._create_fileobj)
q.answered_no.connect(functools.partial(self.cancel, False))
q.cancelled.connect(functools.partial(self.cancel, False))
self.cancelled.connect(q.abort)
self.error.connect(q.abort)
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.ask(q, blocking=False)
def _die(self, msg):
"""Abort the download and emit an error."""
assert not self.successful
self._read_timer.stop()
self.reply.downloadProgress.disconnect()
self.reply.finished.disconnect()
self.reply.error.disconnect()
self.reply.readyRead.disconnect()
self.error_msg = msg
self.stats.finish()
self.error.emit(msg)
with log.hide_qt_warning('QNetworkReplyImplPrivate::error: Internal '
'problem, this method must only be called '
'once.'):
# See https://codereview.qt-project.org/#/c/107863/
self.reply.abort()
self.reply.deleteLater()
self.reply = None
self.done = True
self.data_changed.emit()
def init_reply(self, reply):
"""Set a new reply and connect its signals.
Args:
reply: The QNetworkReply to handle.
"""
self.done = False
self.successful = False
self.reply = reply
reply.setReadBufferSize(16 * 1024 * 1024) # 16 MB
reply.downloadProgress.connect(self.stats.on_download_progress)
reply.finished.connect(self.on_reply_finished)
reply.error.connect(self.on_reply_error)
reply.readyRead.connect(self.on_ready_read)
self.retry_info = RetryInfo(request=reply.request(),
manager=reply.manager())
if not self.fileobj:
self._read_timer.start()
# We could have got signals before we connected slots to them.
# Here no signals are connected to the DownloadItem yet, so we use a
# singleShot QTimer to emit them after they are connected.
if reply.error() != QNetworkReply.NoError:
QTimer.singleShot(0, lambda: self.error.emit(reply.errorString()))
def bg_color(self):
"""Background color to be shown."""
start = config.get('colors', 'downloads.bg.start')
stop = config.get('colors', 'downloads.bg.stop')
system = config.get('colors', 'downloads.bg.system')
error = config.get('colors', 'downloads.bg.error')
if self.error_msg is not None:
assert not self.successful
return error
elif self.stats.percentage() is None:
return start
else:
return utils.interpolate_color(
start, stop, self.stats.percentage(), system)
@pyqtSlot()
def cancel(self, remove_data=True):
"""Cancel the download.
Args:
remove_data: Whether to remove the downloaded data.
"""
log.downloads.debug("cancelled")
self._read_timer.stop()
self.cancelled.emit()
if self.reply is not None:
self.reply.finished.disconnect(self.on_reply_finished)
self.reply.abort()
self.reply.deleteLater()
self.reply = None
if self.fileobj is not None:
self.fileobj.close()
if remove_data:
self.delete()
self.done = True
self.finished.emit()
self.data_changed.emit()
def delete(self):
"""Delete the downloaded file."""
try:
if self._filename is not None and os.path.exists(self._filename):
os.remove(self._filename)
except OSError:
log.downloads.exception("Failed to remove partial file")
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
new_reply = self.retry_info.manager.get(self.retry_info.request)
new_download = download_manager.fetch(
new_reply, suggested_filename=self.basename)
self.do_retry.emit(new_download)
self.cancel()
@pyqtSlot()
def open_file(self):
"""Open the downloaded file."""
assert self.successful
url = QUrl.fromLocalFile(self._filename)
QDesktopServices.openUrl(url)
def set_filename(self, filename):
"""Set the filename to save the download to.
Args:
filename: The full filename to save the download to.
None: special value to stop the download.
"""
if self.fileobj is not None:
raise ValueError("fileobj was already set! filename: {}, "
"existing: {}, fileobj {}".format(
filename, self._filename, self.fileobj))
filename = os.path.expanduser(filename)
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/The-Compiler/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
filename = utils.force_encoding(filename, encoding)
if not self._create_full_filename(filename):
# We only got a filename (without directory) or a relative path
# from the user, so we append that to the default directory and
# try again.
self._create_full_filename(os.path.join(_download_dir(), filename))
log.downloads.debug("Setting filename to {}".format(filename))
if os.path.isfile(self._filename):
# The file already exists, so ask the user if it should be
# overwritten.
self._ask_overwrite_question()
else:
self._create_fileobj()
def _create_full_filename(self, filename):
"""Try to create the full filename.
Return:
True if the full filename was created, False otherwise.
"""
if os.path.isabs(filename) and os.path.isdir(filename):
# We got an absolute directory from the user, so we save it under
# the default filename in that directory.
self._filename = os.path.join(filename, self.basename)
return True
elif os.path.isabs(filename):
# We got an absolute filename from the user, so we save it under
# that filename.
self._filename = filename
self.basename = os.path.basename(self._filename)
return True
return False
def set_fileobj(self, fileobj):
""""Set the file object to write the download to.
Args:
fileobj: A file-like object.
"""
if self.fileobj is not None:
raise ValueError("fileobj was already set! Old: {}, new: "
"{}".format(self.fileobj, fileobj))
self.fileobj = fileobj
try:
self._read_timer.stop()
log.downloads.debug("buffer: {} bytes".format(self._buffer.tell()))
self._buffer.seek(0)
shutil.copyfileobj(self._buffer, fileobj)
self._buffer.close()
if self.reply.isFinished():
# Downloading to the buffer in RAM has already finished so we
# write out the data and clean up now.
self.on_reply_finished()
else:
# Since the buffer already might be full, on_ready_read might
# not be called at all anymore, so we force it here to flush
# the buffer and continue receiving new data.
self.on_ready_read()
except OSError as e:
self._die(e.strerror)
def finish_download(self):
"""Write buffered data to disk and finish the QNetworkReply."""
log.downloads.debug("Finishing download...")
if self.reply.isOpen():
self.fileobj.write(self.reply.readAll())
if self.autoclose:
self.fileobj.close()
self.successful = self.reply.error() == QNetworkReply.NoError
self.reply.close()
self.reply.deleteLater()
self.reply = None
self.finished.emit()
self.done = True
log.downloads.debug("Download finished")
self.data_changed.emit()
@pyqtSlot()
def on_reply_finished(self):
"""Clean up when the download was finished.
Note when this gets called, only the QNetworkReply has finished. This
doesn't mean the download (i.e. writing data to the disk) is finished
as well. Therefore, we can't close() the QNetworkReply in here yet.
"""
if self.reply is None:
return
self._read_timer.stop()
self.stats.finish()
is_redirected = self._handle_redirect()
if is_redirected:
return
log.downloads.debug("Reply finished, fileobj {}".format(self.fileobj))
if self.fileobj is not None:
# We can do a "delayed" write immediately to empty the buffer and
# clean up.
self.finish_download()
@pyqtSlot()
def on_ready_read(self):
"""Read available data and save file when ready to read."""
if self.fileobj is None or self.reply is None:
# No filename has been set yet (so we don't empty the buffer) or we
# got a readyRead after the reply was finished (which happens on
# qute:log for example).
return
if not self.reply.isOpen():
raise OSError("Reply is closed!")
try:
self.fileobj.write(self.reply.readAll())
except OSError as e:
self._die(e.strerror)
@pyqtSlot(int)
def on_reply_error(self, code):
"""Handle QNetworkReply errors."""
if code == QNetworkReply.OperationCanceledError:
return
else:
self._die(self.reply.errorString())
@pyqtSlot()
def on_read_timer_timeout(self):
"""Read some bytes from the QNetworkReply periodically."""
if not self.reply.isOpen():
raise OSError("Reply is closed!")
data = self.reply.read(1024)
if data is not None:
self._buffer.write(data)
def _handle_redirect(self):
"""Handle a HTTP redirect.
Return:
True if the download was redirected, False otherwise.
"""
redirect = self.reply.attribute(
QNetworkRequest.RedirectionTargetAttribute)
if redirect is None or redirect.isEmpty():
return False
new_url = self.reply.url().resolved(redirect)
request = self.reply.request()
if new_url == request.url():
return False
if self._redirects > self.MAX_REDIRECTS:
self._die("Maximum redirection count reached!")
return True # so on_reply_finished aborts
log.downloads.debug("{}: Handling redirect".format(self))
self._redirects += 1
request.setUrl(new_url)
reply = self.reply
reply.finished.disconnect(self.on_reply_finished)
self._read_timer.stop()
self.reply = None
if self.fileobj is not None:
self.fileobj.seek(0)
self.redirected.emit(request, reply) # this will change self.reply!
reply.deleteLater() # the old one
return True
class DownloadManager(QAbstractListModel):
"""Manager and model for currently running downloads.
Attributes:
downloads: A list of active DownloadItems.
questions: A list of Question objects to not GC them.
_networkmanager: A NetworkManager for generic downloads.
_win_id: The window ID the DownloadManager runs in.
"""
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
self.downloads = []
self.questions = []
self._networkmanager = networkmanager.NetworkManager(
win_id, None, self)
def __repr__(self):
return utils.get_repr(self, downloads=len(self.downloads))
def _prepare_question(self):
"""Prepare a Question object to be asked."""
q = usertypes.Question(self)
q.text = "Save file to:"
q.mode = usertypes.PromptMode.text
q.completed.connect(q.deleteLater)
q.destroyed.connect(functools.partial(self.questions.remove, q))
self.questions.append(q)
return q
@pyqtSlot('QUrl', 'QWebPage')
def get(self, url, page=None, fileobj=None, filename=None,
auto_remove=False):
"""Start a download with a link URL.
Args:
url: The URL to get, as QUrl
page: The QWebPage to get the download from.
fileobj: The file object to write the answer to.
filename: A path to write the data to.
auto_remove: Whether to remove the download even if
ui -> remove-finished-downloads is set to false.
Return:
If the download could start immediately, (fileobj/filename given),
the created DownloadItem.
If not, None.
"""
if fileobj is not None and filename is not None:
raise TypeError("Only one of fileobj/filename may be given!")
if not url.isValid():
urlutils.invalid_url_error(self._win_id, url, "start download")
return
req = QNetworkRequest(url)
return self.get_request(req, page, fileobj, filename, auto_remove)
def get_request(self, request, page=None, fileobj=None, filename=None,
auto_remove=False):
"""Start a download with a QNetworkRequest.
Args:
request: The QNetworkRequest to download.
page: The QWebPage to use.
fileobj: The file object to write the answer to.
filename: A path to write the data to.
auto_remove: Whether to remove the download even if
ui -> remove-finished-downloads is set to false.
Return:
If the download could start immediately, (fileobj/filename given),
the created DownloadItem.
If not, None.
"""
if fileobj is not None and filename is not None:
raise TypeError("Only one of fileobj/filename may be given!")
# WORKAROUND for Qt corrupting data loaded from cache:
# https://bugreports.qt-project.org/browse/QTBUG-42757
request.setAttribute(QNetworkRequest.CacheLoadControlAttribute,
QNetworkRequest.AlwaysNetwork)
suggested_fn = urlutils.filename_from_url(request.url())
if fileobj is not None or filename is not None:
return self.fetch_request(request, page, fileobj, filename,
auto_remove, suggested_fn)
if suggested_fn is None:
suggested_fn = 'qutebrowser-download'
else:
encoding = sys.getfilesystemencoding()
suggested_fn = utils.force_encoding(suggested_fn, encoding)
q = self._prepare_question()
q.default = _path_suggestion(suggested_fn)
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
q.answered.connect(
lambda fn: self.fetch_request(request, page, filename=fn,
auto_remove=auto_remove,
suggested_filename=suggested_fn))
message_bridge.ask(q, blocking=False)
return None
def fetch_request(self, request, page=None, fileobj=None, filename=None,
auto_remove=False, suggested_filename=None):
"""Download a QNetworkRequest to disk.
Args:
request: The QNetworkRequest to download.
page: The QWebPage to use.
fileobj: The file object to write the answer to.
filename: A path to write the data to.
auto_remove: Whether to remove the download even if
ui -> remove-finished-downloads is set to false.
Return:
The created DownloadItem.
"""
if page is None:
nam = self._networkmanager
else:
nam = page.networkAccessManager()
reply = nam.get(request)
return self.fetch(reply, fileobj, filename, auto_remove,
suggested_filename)
@pyqtSlot('QNetworkReply')
def fetch(self, reply, fileobj=None, filename=None, auto_remove=False,
suggested_filename=None):
"""Download a QNetworkReply to disk.
Args:
reply: The QNetworkReply to download.
fileobj: The file object to write the answer to.
filename: A path to write the data to.
auto_remove: Whether to remove the download even if
ui -> remove-finished-downloads is set to false.
Return:
The created DownloadItem.
"""
if fileobj is not None and filename is not None:
raise TypeError("Only one of fileobj/filename may be given!")
if not suggested_filename:
if filename is not None:
suggested_filename = os.path.basename(filename)
elif fileobj is not None and getattr(fileobj, 'name', None):
suggested_filename = fileobj.name
else:
_, suggested_filename = http.parse_content_disposition(reply)
log.downloads.debug("fetch: {} -> {}".format(reply.url(),
suggested_filename))
download = DownloadItem(reply, self._win_id, self)
download.cancelled.connect(
functools.partial(self.remove_item, download))
if config.get('ui', 'remove-finished-downloads') or auto_remove:
download.finished.connect(
functools.partial(self.remove_item, download))
download.data_changed.connect(
functools.partial(self.on_data_changed, download))
download.error.connect(self.on_error)
download.redirected.connect(
functools.partial(self.on_redirect, download))
download.basename = suggested_filename
idx = len(self.downloads) + 1
download.index = idx
self.beginInsertRows(QModelIndex(), idx, idx)
self.downloads.append(download)
self.endInsertRows()
if filename is not None:
download.set_filename(filename)
elif fileobj is not None:
download.set_fileobj(fileobj)
download.autoclose = False
else:
q = self._prepare_question()
q.default = _path_suggestion(suggested_filename)
q.answered.connect(download.set_filename)
q.cancelled.connect(download.cancel)
download.cancelled.connect(q.abort)
download.error.connect(q.abort)
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.ask(q, blocking=False)
return download
def raise_no_download(self, count):
"""Raise an exception that the download doesn't exist.
Args:
count: The index of the download
"""
if not count:
raise cmdexc.CommandError("There's no download!")
raise cmdexc.CommandError("There's no download {}!".format(count))
@cmdutils.register(instance='download-manager', scope='window',
count='count')
def download_cancel(self, count=0):
"""Cancel the last/[count]th download.
Args:
count: The index of the download to cancel.
"""
try:
download = self.downloads[count - 1]
except IndexError:
self.raise_no_download(count)
if download.done:
if not count:
count = len(self.downloads)
raise cmdexc.CommandError("Download {} is already done!"
.format(count))
download.cancel()
@cmdutils.register(instance='download-manager', scope='window',
count='count')
def download_delete(self, count=0):
"""Delete the last/[count]th download from disk.
Args:
count: The index of the download to cancel.
"""
try:
download = self.downloads[count - 1]
except IndexError:
self.raise_no_download(count)
if not download.successful:
if not count:
count = len(self.downloads)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.delete()
self.remove_item(download)
@cmdutils.register(instance='download-manager', scope='window',
deprecated="Use :download-cancel instead.",
count='count')
def cancel_download(self, count=1):
"""Cancel the first/[count]th download.
Args:
count: The index of the download to cancel.
"""
self.download_cancel(count)
@cmdutils.register(instance='download-manager', scope='window',
count='count')
def download_open(self, count=0):
"""Open the last/[count]th download.
Args:
count: The index of the download to cancel.
"""
try:
download = self.downloads[count - 1]
except IndexError:
self.raise_no_download(count)
if not download.successful:
if not count:
count = len(self.downloads)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.open_file()
@pyqtSlot(QNetworkRequest, QNetworkReply)
def on_redirect(self, download, request, reply):
"""Handle a HTTP redirect of a download.
Args:
download: The old DownloadItem.
request: The new QNetworkRequest.
reply: The old QNetworkReply.
"""
log.downloads.debug("redirected: {} -> {}".format(
reply.url(), request.url()))
new_reply = reply.manager().get(request)
download.init_reply(new_reply)
@pyqtSlot(DownloadItem)
def on_data_changed(self, download):
"""Emit data_changed signal when download data changed."""
try:
idx = self.downloads.index(download)
except ValueError:
# download has been deleted in the meantime
return
model_idx = self.index(idx, 0)
qtutils.ensure_valid(model_idx)
self.dataChanged.emit(model_idx, model_idx)
@pyqtSlot(str)
def on_error(self, msg):
"""Display error message on download errors."""
message.error(self._win_id, "Download error: {}".format(msg))
def has_downloads_with_nam(self, nam):
"""Check if the DownloadManager has any downloads with the given QNAM.
Args:
nam: The QNetworkAccessManager to check.
Return:
A boolean.
"""
assert nam.adopted_downloads == 0
for download in self.downloads:
running_download = (download.reply is not None and
download.reply.manager() is nam)
# user could request retry after tab is closed.
failed_download = (download.done and (not download.successful) and
download.retry_info.manager is nam)
if running_download or failed_download:
nam.adopt_download(download)
return nam.adopted_downloads
def can_clear(self):
"""Check if there are finished downloads to clear."""
return any(download.done for download in self.downloads)
@cmdutils.register(instance='download-manager', scope='window',
count='count')
def download_remove(self, all_=False, count=0):
"""Remove the last/[count]th download from the list.
Args:
all_: If given removes all finished downloads.
count: The index of the download to cancel.
"""
if all_:
finished_items = [d for d in self.downloads if d.done]
self.remove_items(finished_items)
else:
try:
download = self.downloads[count - 1]
except IndexError:
self.raise_no_download(count)
if not download.done:
if not count:
count = len(self.downloads)
raise cmdexc.CommandError("Download {} is not done!"
.format(count))
self.remove_item(download)
def last_index(self):
"""Get the last index in the model.
Return:
A (possibly invalid) QModelIndex.
"""
idx = self.index(self.rowCount() - 1)
return idx
def remove_item(self, download):
"""Remove a given download."""
try:
idx = self.downloads.index(download)
except ValueError:
# already removed
return
self.beginRemoveRows(QModelIndex(), idx, idx)
del self.downloads[idx]
self.endRemoveRows()
download.deleteLater()
self.update_indexes()
def remove_items(self, downloads):
"""Remove an iterable of downloads."""
# On the first pass, we only generate the indices so we get the
# first/last one for beginRemoveRows.
indices = []
# We need to iterate over downloads twice, which won't work if it's a
# generator.
downloads = list(downloads)
for download in downloads:
try:
indices.append(self.downloads.index(download))
except ValueError:
# already removed
pass
if not indices:
return
indices.sort()
self.beginRemoveRows(QModelIndex(), indices[0], indices[-1])
for download in downloads:
try:
self.downloads.remove(download)
except ValueError:
# already removed
pass
else:
download.deleteLater()
self.endRemoveRows()
def update_indexes(self):
"""Update indexes of all DownloadItems."""
first_idx = None
for i, d in enumerate(self.downloads, 1):
if first_idx is None and d.index != i:
first_idx = i - 1
d.index = i
if first_idx is not None:
model_idx = self.index(first_idx, 0)
qtutils.ensure_valid(model_idx)
self.dataChanged.emit(model_idx, self.last_index())
def headerData(self, section, orientation, role):
"""Simple constant header."""
if (section == 0 and orientation == Qt.Horizontal and
role == Qt.DisplayRole):
return "Downloads"
else:
return ""
def data(self, index, role):
"""Download data from DownloadManager."""
qtutils.ensure_valid(index)
if index.parent().isValid() or index.column() != 0:
return QVariant()
item = self.downloads[index.row()]
if role == Qt.DisplayRole:
data = str(item)
elif role == Qt.ForegroundRole:
data = config.get('colors', 'downloads.fg')
elif role == Qt.BackgroundRole:
data = item.bg_color()
elif role == ModelRole.item:
data = item
elif role == Qt.ToolTipRole:
if item.error_msg is None:
data = QVariant()
else:
return item.error_msg
else:
data = QVariant()
return data
def flags(self, _index):
"""Override flags so items aren't selectable.
The default would be Qt.ItemIsEnabled | Qt.ItemIsSelectable."""
return Qt.ItemIsEnabled | Qt.ItemNeverHasChildren
def rowCount(self, parent=QModelIndex()):
"""Get count of active downloads."""
if parent.isValid():
# We don't have children
return 0
return len(self.downloads)
| 1 | 13,168 | Is there a reason you're not using `self._die("The file {} ...")` here? | qutebrowser-qutebrowser | py |
@@ -13,7 +13,7 @@ import (
// LocalDevExecCmd allows users to execute arbitrary bash commands within a container.
var LocalDevExecCmd = &cobra.Command{
- Use: "exec [app_name] [environment_name] '[cmd]'",
+ Use: "exec '[cmd]'",
Short: "run a command in an app container.",
Long: `Execs into container and runs bash commands.`,
Run: func(cmd *cobra.Command, args []string) { | 1 | package cmd
import (
"fmt"
"log"
"path"
"strings"
"github.com/drud/ddev/pkg/plugins/platform"
"github.com/drud/drud-go/utils/dockerutil"
"github.com/spf13/cobra"
)
// LocalDevExecCmd allows users to execute arbitrary bash commands within a container.
var LocalDevExecCmd = &cobra.Command{
Use: "exec [app_name] [environment_name] '[cmd]'",
Short: "run a command in an app container.",
Long: `Execs into container and runs bash commands.`,
Run: func(cmd *cobra.Command, args []string) {
// The command string will be the first argument if using a stored
// appConfig, or the third if passing in app/deploy names.
cmdString := args[0]
if len(args) > 2 {
cmdString = args[2]
}
app := platform.PluginMap[strings.ToLower(plugin)]
opts := platform.AppOptions{
Name: activeApp,
Environment: activeDeploy,
}
app.SetOpts(opts)
nameContainer := fmt.Sprintf("%s-%s", app.ContainerName(), serviceType)
if !dockerutil.IsRunning(nameContainer) {
Failed("App not running locally. Try `ddev add`.")
}
if !platform.ComposeFileExists(app) {
Failed("No docker-compose yaml for this site. Try `ddev add`.")
}
cmdArgs := []string{
"-f", path.Join(app.AbsPath(), "docker-compose.yaml"),
"exec",
"-T", nameContainer,
}
if strings.Contains(cmdString, "drush dl") {
// do we want to add a -y here?
cmdString = strings.Replace(cmdString, "drush dl", "drush --root=/src/docroot dl", 1)
}
cmdSplit := strings.Split(cmdString, " ")
cmdArgs = append(cmdArgs, cmdSplit...)
err := dockerutil.DockerCompose(cmdArgs...)
if err != nil {
log.Println(err)
Failed("Could not execute command.")
}
},
PreRun: func(cmd *cobra.Command, args []string) {
if len(args) == 1 {
return
}
if len(args) == 3 {
return
}
Failed("Invalid arguments detected. Please use a command in the form of: exec [app_name] [environment_name] '[cmd]'")
},
}
func init() {
LocalDevExecCmd.Flags().StringVarP(&serviceType, "service", "s", "web", "Which service to send the command to. [web, db]")
RootCmd.AddCommand(LocalDevExecCmd)
}
| 1 | 10,695 | I think we'll want @rickmanelius (or somebody) to go through all the help and make it more accessible. Probably later in the cycle. But "Run a command in an app container" doesn't do it for me :) | drud-ddev | go |
@@ -8,6 +8,10 @@ import (
. "github.com/weaveworks/weave/common"
)
+var (
+ weaveWaitEntrypoint = []string{"/home/weavewait/weavewait"}
+)
+
func callWeave(args ...string) ([]byte, error) {
args = append([]string{"--local"}, args...)
Debug.Print("Calling weave", args) | 1 | package proxy
import (
"os/exec"
"strings"
"github.com/fsouza/go-dockerclient"
. "github.com/weaveworks/weave/common"
)
func callWeave(args ...string) ([]byte, error) {
args = append([]string{"--local"}, args...)
Debug.Print("Calling weave", args)
cmd := exec.Command("./weave", args...)
cmd.Env = []string{"PROCFS=/hostproc", "PATH=/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}
out, err := cmd.CombinedOutput()
return out, err
}
func weaveCIDRsFromConfig(config *docker.Config) ([]string, bool) {
for _, e := range config.Env {
if strings.HasPrefix(e, "WEAVE_CIDR=") {
result := strings.Trim(e[11:], " ")
return strings.Split(strings.TrimSpace(result), " "), result != ""
}
}
return nil, false
}
| 1 | 8,693 | shouldn't this be a const? | weaveworks-weave | go |
@@ -135,10 +135,7 @@ class MongoDBMochaReporter extends mocha.reporters.Spec {
timestamp = timestamp ? timestamp.toISOString().split('.')[0] : '';
output.testSuites.push({
- package:
- suite.file.includes('functional') || suite.file.includes('integration')
- ? 'Functional'
- : 'Unit',
+ package: suite.file.includes('integration') ? 'Functional' : 'Unit',
id,
name: className,
timestamp, | 1 | //@ts-check
'use strict';
const mocha = require('mocha');
const chalk = require('chalk');
chalk.level = 3;
const {
EVENT_RUN_BEGIN,
EVENT_RUN_END,
EVENT_TEST_FAIL,
EVENT_TEST_PASS,
EVENT_SUITE_BEGIN,
EVENT_SUITE_END,
EVENT_TEST_PENDING,
EVENT_TEST_BEGIN,
EVENT_TEST_END
} = mocha.Runner.constants;
const fs = require('fs');
const os = require('os');
/**
* @typedef {object} MongoMochaSuiteExtension
* @property {Date} timestamp - suite start date
* @property {string} stdout - capture of stdout
* @property {string} stderr - capture of stderr
* @property {MongoMochaTest} test - capture of stderr
* @typedef {object} MongoMochaTestExtension
* @property {Date} startTime - test start date
* @property {Date} endTime - test end date
* @property {number} elapsedTime - difference between end and start
* @property {Error} [error] - The possible error from a test
* @property {true} [skipped] - Set if test was skipped
* @typedef {MongoMochaSuiteExtension & Mocha.Suite} MongoMochaSuite
* @typedef {MongoMochaTestExtension & Mocha.Test} MongoMochaTest
*/
// Turn this on if you have to debug this custom reporter!
let REPORT_TO_STDIO = false;
function captureStream(stream) {
var oldWrite = stream.write;
var buf = '';
stream.write = function (chunk) {
buf += chunk.toString(); // chunk is a String or Buffer
oldWrite.apply(stream, arguments);
};
return {
unhook: function unhook() {
stream.write = oldWrite;
return buf;
},
captured: function () {
return buf;
}
};
}
/**
* @param {Mocha.Runner} runner
* @this {any}
*/
class MongoDBMochaReporter extends mocha.reporters.Spec {
constructor(runner) {
super(runner);
/** @type {Map<string, {suite: MongoMochaSuite, stdout?: any, stderr?: any}>} */
this.suites = new Map();
this.xunitWritten = false;
runner.on(EVENT_RUN_BEGIN, () => this.start());
runner.on(EVENT_RUN_END, () => this.end());
runner.on(EVENT_SUITE_BEGIN, suite => this.onSuite(suite));
runner.on(EVENT_TEST_BEGIN, test => this.onTest(test));
runner.on(EVENT_TEST_PASS, test => this.pass(test));
runner.on(EVENT_TEST_FAIL, (test, error) => this.fail(test, error));
runner.on(EVENT_TEST_PENDING, test => this.pending(test));
runner.on(EVENT_SUITE_END, suite => this.suiteEnd(suite));
runner.on(EVENT_TEST_END, test => this.testEnd(test));
process.on('SIGINT', () => this.end(true));
}
start() {}
end(ctrlC) {
try {
if (ctrlC) console.log('emergency exit!');
const output = { testSuites: [] };
for (const [id, [className, { suite }]] of [...this.suites.entries()].entries()) {
let totalSuiteTime = 0;
let testCases = [];
let failureCount = 0;
const tests = /** @type {MongoMochaTest[]}*/ (suite.tests);
for (const test of tests) {
let time = test.elapsedTime / 1000;
time = Number.isNaN(time) ? 0 : time;
totalSuiteTime += time;
failureCount += test.state === 'failed' ? 1 : 0;
/** @type {string | Date | number} */
let startTime = test.startTime;
startTime = startTime ? startTime.toISOString() : 0;
/** @type {string | Date | number} */
let endTime = test.endTime;
endTime = endTime ? endTime.toISOString() : 0;
let error = test.error;
let failure = error
? {
type: error.constructor.name,
message: error.message,
stack: error.stack
}
: undefined;
let skipped = !!test.skipped;
testCases.push({
name: test.title,
className,
time,
startTime,
endTime,
skipped,
failure
});
}
/** @type {string | Date | number} */
let timestamp = suite.timestamp;
timestamp = timestamp ? timestamp.toISOString().split('.')[0] : '';
output.testSuites.push({
package:
suite.file.includes('functional') || suite.file.includes('integration')
? 'Functional'
: 'Unit',
id,
name: className,
timestamp,
hostname: os.hostname(),
tests: suite.tests.length,
failures: failureCount,
errors: '0',
time: totalSuiteTime,
testCases,
stdout: suite.stdout,
stderr: suite.stderr
});
}
if (!this.xunitWritten) {
fs.writeFileSync('xunit.xml', outputToXML(output), { encoding: 'utf8' });
}
this.xunitWritten = true;
console.log(chalk.bold('wrote xunit.xml'));
} catch (error) {
console.error(chalk.red(`Failed to output xunit report! ${error}`));
} finally {
if (ctrlC) process.exit(1);
}
}
/**
* @param {MongoMochaSuite} suite
*/
onSuite(suite) {
if (suite.root) return;
if (!this.suites.has(suite.fullTitle())) {
suite.timestamp = new Date();
this.suites.set(suite.fullTitle(), {
suite,
stdout: captureStream(process.stdout),
stderr: captureStream(process.stderr)
});
} else {
console.warn(`${chalk.yellow('WARNING:')} ${suite.fullTitle()} started twice`);
}
}
/**
* @param {MongoMochaSuite} suite
*/
suiteEnd(suite) {
if (suite.root) return;
const currentSuite = this.suites.get(suite.fullTitle());
if (!currentSuite) {
console.error('Suite never started >:(');
process.exit(1);
}
if (currentSuite.stdout || currentSuite.stderr) {
suite.stdout = currentSuite.stdout.unhook();
suite.stderr = currentSuite.stderr.unhook();
delete currentSuite.stdout;
delete currentSuite.stderr;
}
}
/**
* @param {MongoMochaTest} test
*/
onTest(test) {
test.startTime = new Date();
}
/**
* @param {MongoMochaTest} test
*/
testEnd(test) {
test.endTime = new Date();
test.elapsedTime = Number(test.endTime) - Number(test.startTime);
}
/**
* @param {MongoMochaTest} test
*/
pass(test) {
if (REPORT_TO_STDIO) console.log(chalk.green(`✔ ${test.fullTitle()}`));
}
/**
* @param {MongoMochaTest} test
* @param {Error} error
*/
fail(test, error) {
if (REPORT_TO_STDIO) console.log(chalk.red(`⨯ ${test.fullTitle()} -- ${error.message}`));
test.error = error;
}
/**
* @param {MongoMochaTest & {skipReason?: string}} test
*/
pending(test) {
if (REPORT_TO_STDIO) console.log(chalk.cyan(`↬ ${test.fullTitle()}`));
if (typeof test.skipReason === 'string') {
console.log(chalk.cyan(`${' '.repeat(test.titlePath().length + 1)}↬ ${test.skipReason}`));
}
test.skipped = true;
}
}
module.exports = MongoDBMochaReporter;
function replaceIllegalXMLCharacters(string) {
// prettier-ignore
return String(string)
.split('"').join('"')
.split('<').join('﹤')
.split('>').join('﹥')
.split('&').join('﹠');
}
const ANSI_ESCAPE_REGEX =
// eslint-disable-next-line no-control-regex
/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g;
function outputToXML(output) {
function cdata(str) {
return `<![CDATA[${String(str)
.split(ANSI_ESCAPE_REGEX)
.join('')
.split(']]>')
.join('\\]\\]\\>')}]]>`;
}
function makeTag(name, attributes, selfClose, content) {
const attributesString = Object.entries(attributes || {})
.map(([k, v]) => `${k}="${replaceIllegalXMLCharacters(v)}"`)
.join(' ');
let tag = `<${name}${attributesString ? ' ' + attributesString : ''}`;
if (selfClose) return tag + '/>\n';
else tag += '>';
if (content) return tag + content + `</${name}>`;
return tag;
}
let s =
'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-model href="./test/tools/reporter/xunit.xsd" ?>\n<testsuites>\n';
for (const suite of output.testSuites) {
s += makeTag('testsuite', {
package: suite.package,
id: suite.id,
name: suite.name,
timestamp: suite.timestamp,
hostname: suite.hostname,
tests: suite.tests,
failures: suite.failures,
errors: suite.errors,
time: suite.time
});
s += '\n\t' + makeTag('properties') + '</properties>\n'; // can put metadata here?
for (const test of suite.testCases) {
s +=
'\t' +
makeTag(
'testcase',
{
name: test.name,
classname: test.className,
time: test.time,
start: test.startTime,
end: test.endTime
},
!test.failure && !test.skipped
);
if (test.failure) {
s +=
'\n\t\t' +
makeTag('failure', { type: test.failure.type }, false, cdata(test.failure.stack)) +
'\n';
s += `\t</testcase>\n`;
}
if (test.skipped) {
s += makeTag('skipped', {}, true);
s += `\t</testcase>\n`;
}
}
s += '\t' + makeTag('system-out', {}, false, cdata(suite.stdout)) + '\n';
s += '\t' + makeTag('system-err', {}, false, cdata(suite.stderr)) + '\n';
s += `</testsuite>\n`;
}
return s + '</testsuites>\n';
}
| 1 | 21,921 | This is a bit of a throwaway field in the xunit output, it doesn't impact anything on EVG, should we just name it `integration` now? | mongodb-node-mongodb-native | js |
@@ -29,6 +29,6 @@ import java.lang.annotation.Target;
@Documented
@Retention(SOURCE)
@Target(TYPE)
-public @interface SolrSingleThreaded {
+public @interface SolrThreadUnsafe {
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.annotation;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.SOURCE;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Annotation for classes in Solr that are not thread safe. This provides a clear indication of the thread safety of the class.
*/
@Documented
@Retention(SOURCE)
@Target(TYPE)
public @interface SolrSingleThreaded {
}
| 1 | 36,353 | Please change the name of the file too. | apache-lucene-solr | java |
@@ -286,7 +286,7 @@ func inject(args []string) string {
func init() {
flag := injectCmd.Flags()
- flag.StringVar(&injectCfg.configPath, "injector-config-path", "./tools/actioninjector/gentsfaddrs.yaml", "path of config file of genesis transfer addresses")
+ flag.StringVar(&injectCfg.configPath, "injector-config-path", "./tools/actioninjector.v2/gentsfaddrs.yaml", "path of config file of genesis transfer addresses")
flag.StringVar(&injectCfg.serverAddr, "addr", "127.0.0.1:14004", "target ip:port for grpc connection")
flag.Uint64Var(&injectCfg.transferGasLimit, "transfer-gas-limit", 20000, "transfer gas limit")
flag.Int64Var(&injectCfg.rawTransferGasPrice, "transfer-gas-price", 10, "transfer gas price") | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package cmd
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"sync"
"time"
"github.com/cenkalti/backoff"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/tools/actioninjector.v2/internal/client"
)
// KeyPairs indicate the keypair of accounts getting transfers from Creator in genesis block
type KeyPairs struct {
Pairs []KeyPair `yaml:"pkPairs"`
}
// KeyPair contains the public and private key of an address
type KeyPair struct {
PK string `yaml:"pubKey"`
SK string `yaml:"priKey"`
}
// AddressKey contains the encoded address and private key of an account
type AddressKey struct {
EncodedAddr string
PriKey keypair.PrivateKey
}
type injectProcessor struct {
c *client.Client
nonces *sync.Map
accounts []*AddressKey
}
func newInjectionProcessor() (*injectProcessor, error) {
c, err := client.New(injectCfg.serverAddr)
if err != nil {
return nil, err
}
p := &injectProcessor{
c: c,
nonces: &sync.Map{},
}
if err := p.loadAccounts(injectCfg.configPath); err != nil {
return p, err
}
p.syncNonces(context.Background())
return p, nil
}
func (p *injectProcessor) loadAccounts(keypairsPath string) error {
keyPairBytes, err := ioutil.ReadFile(keypairsPath)
if err != nil {
return errors.Wrap(err, "failed to read key pairs file")
}
var keypairs KeyPairs
if err := yaml.Unmarshal(keyPairBytes, &keypairs); err != nil {
return errors.Wrap(err, "failed to unmarshal key pairs bytes")
}
// Construct iotex addresses from loaded key pairs
addrKeys := make([]*AddressKey, 0)
for _, pair := range keypairs.Pairs {
pk, err := keypair.DecodePublicKey(pair.PK)
if err != nil {
return errors.Wrap(err, "failed to decode public key")
}
sk, err := keypair.DecodePrivateKey(pair.SK)
if err != nil {
return errors.Wrap(err, "failed to decode private key")
}
pkHash := keypair.HashPubKey(pk)
addr, err := address.FromBytes(pkHash[:])
if err != nil {
return err
}
addrKeys = append(addrKeys, &AddressKey{EncodedAddr: addr.String(), PriKey: sk})
}
p.accounts = addrKeys
return nil
}
func (p *injectProcessor) syncNoncesProcess(ctx context.Context) {
reset := time.Tick(injectCfg.resetInterval)
for {
select {
case <-ctx.Done():
return
case <-reset:
p.syncNonces(context.Background())
}
}
}
func (p *injectProcessor) syncNonces(ctx context.Context) {
p.nonces.Range(func(key interface{}, value interface{}) bool {
addr := key.(string)
err := backoff.Retry(func() error {
resp, err := p.c.GetAccount(ctx, addr)
if err != nil {
return err
}
p.nonces.Store(addr, resp.GetAccountMeta().GetNonce())
return nil
}, backoff.NewExponentialBackOff())
if err != nil {
log.L().Fatal("Failed to inject actions by APS",
zap.Error(err),
zap.String("addr", addr))
}
return true
})
}
func (p *injectProcessor) injectProcess(ctx context.Context) {
var workers sync.WaitGroup
ticks := make(chan uint64)
for i := uint64(0); i < injectCfg.workers; i++ {
workers.Add(1)
go p.inject(&workers, ticks)
}
defer workers.Wait()
defer close(ticks)
interval := uint64(time.Second.Nanoseconds() / int64(injectCfg.aps))
began, count := time.Now(), uint64(0)
for {
now, next := time.Now(), began.Add(time.Duration(count*interval))
time.Sleep(next.Sub(now))
select {
case <-ctx.Done():
return
case ticks <- count:
count++
default:
workers.Add(1)
go p.inject(&workers, ticks)
}
}
}
func (p *injectProcessor) inject(workers *sync.WaitGroup, ticks <-chan uint64) {
defer workers.Done()
for range ticks {
selp, err := p.pickAction()
if err != nil {
log.L().Error("Failed to create an action", zap.Error(err))
}
bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(injectCfg.retryInterval), injectCfg.retryNum)
if err := backoff.Retry(func() error {
return p.c.SendAction(context.Background(), selp)
}, bo); err != nil {
log.L().Error("Failed to inject.", zap.Error(err))
}
log.L().Debug("Sent out the action.")
}
}
func (p *injectProcessor) pickAction() (action.SealedEnvelope, error) {
var nonce uint64
sender := p.accounts[rand.Intn(len(p.accounts))]
val, ok := p.nonces.Load(sender.EncodedAddr)
if ok {
nonce = val.(uint64)
}
p.nonces.Store(sender.EncodedAddr, nonce+1)
bd := &action.EnvelopeBuilder{}
var elp action.Envelope
switch rand.Intn(2) {
case 0:
amount := int64(0)
for amount == int64(0) {
amount = int64(rand.Intn(5))
}
recipient := p.accounts[rand.Intn(len(p.accounts))]
transfer, err := action.NewTransfer(
nonce, unit.ConvertIotxToRau(amount), recipient.EncodedAddr, injectCfg.transferPayload, injectCfg.transferGasLimit, injectCfg.transferGasPrice)
if err != nil {
return action.SealedEnvelope{}, errors.Wrap(err, "failed to create raw transfer")
}
elp = bd.SetNonce(nonce).
SetGasPrice(injectCfg.transferGasPrice).
SetDestinationAddress(recipient.EncodedAddr).
SetGasLimit(injectCfg.transferGasLimit).
SetAction(transfer).Build()
case 1:
execution, err := action.NewExecution(injectCfg.contract, nonce, injectCfg.executionAmount, injectCfg.executionGasLimit, injectCfg.executionGasPrice, injectCfg.executionData)
if err != nil {
return action.SealedEnvelope{}, errors.Wrap(err, "failed to create raw execution")
}
elp = bd.SetNonce(nonce).
SetGasPrice(injectCfg.executionGasPrice).
SetDestinationAddress(injectCfg.contract).
SetGasLimit(injectCfg.executionGasLimit).
SetAction(execution).Build()
}
selp, err := action.Sign(elp, sender.PriKey)
if err != nil {
return action.SealedEnvelope{}, errors.Wrapf(err, "failed to sign transfer %v", elp)
}
return selp, nil
}
// injectCmd represents the inject command
var injectCmd = &cobra.Command{
Use: "random",
Short: "inject random actions",
Long: `inject random actions.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(inject(args))
},
}
var injectCfg = struct {
configPath string
serverAddr string
transferGasLimit uint64
rawTransferGasPrice int64
transferGasPrice *big.Int
rawTransferPayload string
transferPayload []byte
contract string
rawExecutionAmount int64
executionAmount *big.Int
executionGasLimit uint64
rawExecutionGasPrice int64
executionGasPrice *big.Int
rawExecutionData string
executionData []byte
retryNum uint64
retryInterval time.Duration
duration time.Duration
resetInterval time.Duration
aps int
workers uint64
}{}
func inject(args []string) string {
var err error
injectCfg.transferPayload, err = hex.DecodeString(injectCfg.rawTransferPayload)
if err != nil {
return fmt.Sprintf("failed to decode payload %s: %v.", injectCfg.transferPayload, err)
}
injectCfg.executionData, err = hex.DecodeString(injectCfg.rawExecutionData)
if err != nil {
return fmt.Sprintf("failed to decode data %s: %v", injectCfg.rawExecutionData, err)
}
injectCfg.transferGasPrice = big.NewInt(injectCfg.rawTransferGasPrice)
injectCfg.executionGasPrice = big.NewInt(injectCfg.rawExecutionGasPrice)
injectCfg.executionAmount = big.NewInt(injectCfg.rawExecutionAmount)
p, err := newInjectionProcessor()
if err != nil {
return fmt.Sprintf("failed to create injector processor: %v.", err)
}
ctx, cancel := context.WithTimeout(context.Background(), injectCfg.duration)
defer cancel()
go p.injectProcess(ctx)
go p.syncNoncesProcess(ctx)
<-ctx.Done()
return ""
}
func init() {
flag := injectCmd.Flags()
flag.StringVar(&injectCfg.configPath, "injector-config-path", "./tools/actioninjector/gentsfaddrs.yaml", "path of config file of genesis transfer addresses")
flag.StringVar(&injectCfg.serverAddr, "addr", "127.0.0.1:14004", "target ip:port for grpc connection")
flag.Uint64Var(&injectCfg.transferGasLimit, "transfer-gas-limit", 20000, "transfer gas limit")
flag.Int64Var(&injectCfg.rawTransferGasPrice, "transfer-gas-price", 10, "transfer gas price")
flag.StringVar(&injectCfg.rawTransferPayload, "transfer-payload", "", "transfer payload")
flag.StringVar(&injectCfg.contract, "contract", "io1pmjhyksxmz2xpxn2qmz4gx9qq2kn2gdr8un4xq", "smart contract address")
flag.Int64Var(&injectCfg.rawExecutionAmount, "execution-amount", 50, "execution amount")
flag.Uint64Var(&injectCfg.executionGasLimit, "execution-gas-limit", 20000, "execution gas limit")
flag.Int64Var(&injectCfg.rawExecutionGasPrice, "execution-gas-price", 10, "execution gas price")
flag.StringVar(&injectCfg.rawExecutionData, "execution-data", "2885ad2c", "execution data")
flag.Uint64Var(&injectCfg.retryNum, "retry-num", 5, "maximum number of rpc retries")
flag.DurationVar(&injectCfg.retryInterval, "retry-interval", 1*time.Second, "sleep interval between two consecutive rpc retries")
flag.DurationVar(&injectCfg.duration, "duration", 60*time.Hour, "duration when the injection will run")
flag.DurationVar(&injectCfg.resetInterval, "reset-interval", 10*time.Second, "time interval to reset nonce counter")
flag.IntVar(&injectCfg.aps, "aps", 30, "actions to be injected per second")
flag.Uint64Var(&injectCfg.workers, "workers", 10, "number of workers")
rootCmd.AddCommand(injectCmd)
}
| 1 | 15,590 | line is 161 characters (from `lll`) | iotexproject-iotex-core | go |
@@ -195,8 +195,10 @@ module Bolt
@outputter.stop_spin
# Automatically generate types after installing modules
- @outputter.print_action_step("Generating type references")
- @pal.generate_types(cache: true)
+ if ok
+ @outputter.print_action_step("Generating type references")
+ @pal.generate_types(cache: true)
+ end
@outputter.print_puppetfile_result(ok, path, moduledir)
| 1 | # frozen_string_literal: true
require 'bolt/error'
require 'bolt/logger'
require 'bolt/module_installer/installer'
require 'bolt/module_installer/puppetfile'
require 'bolt/module_installer/resolver'
require 'bolt/module_installer/specs'
module Bolt
class ModuleInstaller
def initialize(outputter, pal)
@outputter = outputter
@pal = pal
@logger = Bolt::Logger.logger(self)
end
# Adds a single module to the project.
#
def add(name, specs, puppetfile_path, moduledir, project_file, config)
project_specs = Specs.new(specs)
# Exit early if project config already includes a spec with this name.
if project_specs.include?(name)
@outputter.print_message(
"Project configuration file #{project_file} already includes specification "\
"with name #{name}. Nothing to do."
)
return true
end
@outputter.print_message("Adding module #{name} to project\n\n")
# Generate the specs to resolve from. If a Puppetfile exists, parse it and
# convert the modules to specs. Otherwise, use the project specs.
resolve_specs = if puppetfile_path.exist?
existing_puppetfile = Puppetfile.parse(puppetfile_path)
existing_puppetfile.assert_satisfies(project_specs)
Specs.from_puppetfile(existing_puppetfile)
else
project_specs
end
# Resolve module dependencies. Attempt to first resolve with resolve
# specss. If that fails, fall back to resolving from project specs.
# This prevents Bolt from modifying installed modules unless there is
# a version conflict.
@outputter.print_action_step("Resolving module dependencies, this may take a moment")
@outputter.start_spin
begin
resolve_specs.add_specs('name' => name)
puppetfile = Resolver.new.resolve(resolve_specs, config)
rescue Bolt::Error
project_specs.add_specs('name' => name)
puppetfile = Resolver.new.resolve(project_specs, config)
end
@outputter.stop_spin
# Display the diff between the existing Puppetfile and the new Puppetfile.
print_puppetfile_diff(existing_puppetfile, puppetfile)
# Add the module to the project configuration.
@outputter.print_action_step("Updating project configuration file at #{project_file}")
data = Bolt::Util.read_yaml_hash(project_file, 'project')
data['modules'] ||= []
data['modules'] << name.tr('-', '/')
begin
File.write(project_file, data.to_yaml)
rescue SystemCallError => e
raise Bolt::FileError.new(
"Unable to update project configuration file: #{e.message}",
project_file
)
end
# Write the Puppetfile.
@outputter.print_action_step("Writing Puppetfile at #{puppetfile_path}")
puppetfile.write(puppetfile_path, moduledir)
# Install the modules.
install_puppetfile(puppetfile_path, moduledir, config)
end
# Outputs a diff of an old Puppetfile and a new Puppetfile.
#
def print_puppetfile_diff(old, new)
# Build hashes mapping the module name to the module object. This makes it
# a little easier to determine which modules have been added, removed, or
# modified.
old = (old&.modules || []).each_with_object({}) do |mod, acc|
next unless mod.type == :forge
acc[mod.full_name] = mod
end
new = new.modules.each_with_object({}) do |mod, acc|
next unless mod.type == :forge
acc[mod.full_name] = mod
end
# New modules are those present in new but not in old.
added = new.reject { |full_name, _mod| old.include?(full_name) }.values
if added.any?
diff = "Adding the following modules:\n"
added.each { |mod| diff += "#{mod.full_name} #{mod.version}\n" }
@outputter.print_action_step(diff)
end
# Upgraded modules are those that have a newer version in new than old.
upgraded = new.select do |full_name, mod|
if old.include?(full_name)
mod.version > old[full_name].version
end
end.keys
if upgraded.any?
diff = "Upgrading the following modules:\n"
upgraded.each { |full_name| diff += "#{full_name} #{old[full_name].version} to #{new[full_name].version}\n" }
@outputter.print_action_step(diff)
end
# Downgraded modules are those that have an older version in new than old.
downgraded = new.select do |full_name, mod|
if old.include?(full_name)
mod.version < old[full_name].version
end
end.keys
if downgraded.any?
diff = "Downgrading the following modules: \n"
downgraded.each { |full_name| diff += "#{full_name} #{old[full_name].version} to #{new[full_name].version}\n" }
@outputter.print_action_step(diff)
end
# Removed modules are those present in old but not in new.
removed = old.reject { |full_name, _mod| new.include?(full_name) }.values
if removed.any?
diff = "Removing the following modules:\n"
removed.each { |mod| diff += "#{mod.full_name} #{mod.version}\n" }
@outputter.print_action_step(diff)
end
end
# Installs a project's module dependencies.
#
def install(specs, path, moduledir, config = {}, force: false, resolve: true)
@outputter.print_message("Installing project modules\n\n")
if resolve != false
specs = Specs.new(specs)
# If forcibly installing or if there is no Puppetfile, resolve
# and write a Puppetfile.
if force || !path.exist?
@outputter.print_action_step("Resolving module dependencies, this may take a moment")
# This doesn't use the block as it's more testable to just mock *_spin
@outputter.start_spin
puppetfile = Resolver.new.resolve(specs, config)
@outputter.stop_spin
# We get here either through 'bolt module install' which uses the
# managed modulepath (which isn't configurable) or through bolt
# project init --modules, which uses the default modulepath. This
# should be safe to assume that if `.modules/` is the moduledir the
# user is using the new workflow
@outputter.print_action_step("Writing Puppetfile at #{path}")
if moduledir.basename.to_s == '.modules'
puppetfile.write(path, moduledir)
else
puppetfile.write(path)
end
# If not forcibly installing and there is a Puppetfile, assert
# that it satisfies the specs.
else
puppetfile = Puppetfile.parse(path)
puppetfile.assert_satisfies(specs)
end
end
# Install the modules.
install_puppetfile(path, moduledir, config)
end
# Installs the Puppetfile and generates types.
#
def install_puppetfile(path, moduledir, config = {})
@outputter.print_action_step("Syncing modules from #{path} to #{moduledir}")
@outputter.start_spin
ok = Installer.new(config).install(path, moduledir)
@outputter.stop_spin
# Automatically generate types after installing modules
@outputter.print_action_step("Generating type references")
@pal.generate_types(cache: true)
@outputter.print_puppetfile_result(ok, path, moduledir)
ok
end
end
end
| 1 | 17,487 | When is this false? | puppetlabs-bolt | rb |
@@ -540,7 +540,6 @@ Dispose()
[InlineData("${counter}", DbType.Int16, (short)1)]
[InlineData("${counter}", DbType.Int32, 1)]
[InlineData("${counter}", DbType.Int64, (long)1)]
- [InlineData("${counter}", DbType.Int16, (short)1)]
[InlineData("${counter:norawvalue=true}", DbType.Int16, (short)1)] //fallback
[InlineData("${counter}", DbType.VarNumeric, 1, true)]
[InlineData("${counter}", DbType.AnsiString, "1")] | 1 | //
// Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.UnitTests.Targets
{
using System;
using System.Collections;
using System.Collections.Generic;
#if !NETSTANDARD
using System.Configuration;
#endif
using System.Data;
using System.Data.Common;
using System.Globalization;
using System.IO;
using System.Linq;
using NLog.Common;
using NLog.Config;
using NLog.Targets;
using Xunit;
using Xunit.Extensions;
using System.Data.SqlClient;
#if MONO
using Mono.Data.Sqlite;
#elif NETSTANDARD
using Microsoft.Data.Sqlite;
#else
using System.Data.SQLite;
#endif
public class DatabaseTargetTests : NLogTestBase
{
#if !MONO && !NETSTANDARD
static DatabaseTargetTests()
{
var data = (DataSet)ConfigurationManager.GetSection("system.data");
var providerFactories = data.Tables["DBProviderFactories"];
providerFactories.Rows.Add("MockDb Provider", "MockDb Provider", "MockDb",
typeof(MockDbFactory).AssemblyQualifiedName);
providerFactories.AcceptChanges();
}
#endif
[Fact]
public void SimpleDatabaseTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "FooBar",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
List<Exception> exceptions = new List<Exception>();
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add));
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
Close()
Dispose()
Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
Close()
Dispose()
Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void SimpleBatchedDatabaseTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "FooBar",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
List<Exception> exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void KeepConnectionOpenTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "FooBar",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
List<Exception> exceptions = new List<Exception>();
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add));
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
";
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void KeepConnectionOpenBatchedTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "FooBar",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
var exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg3").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('FooBar').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
";
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void KeepConnectionOpenTest2()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "Database=${logger}",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
List<Exception> exceptions = new List<Exception>();
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger2", "msg3").WithContinuation(exceptions.Add));
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "MyLogger", "msg4").WithContinuation(exceptions.Add));
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('Database=MyLogger').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
Close()
Dispose()
Open('Database=MyLogger2').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
Close()
Dispose()
Open('Database=MyLogger').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg4')
";
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void KeepConnectionOpenBatchedTest2()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES('${message}')",
ConnectionString = "Database=${logger}",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
// when we pass multiple log events in an array, the target will bucket-sort them by
// connection string and group all commands for the same connection string together
// to minimize number of db open/close operations
// in this case msg1, msg2 and msg4 will be written together to MyLogger database
// and msg3 will be written to MyLogger2 database
List<Exception> exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg2").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger2", "msg3").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Info, "MyLogger", "msg4").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('Database=MyLogger').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg1')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg2')
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg4')
Close()
Dispose()
Open('Database=MyLogger2').
ExecuteNonQuery: INSERT INTO FooBar VALUES('msg3')
";
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void InstallParameterTest()
{
MockDbConnection.ClearLog();
DatabaseCommandInfo installDbCommand = new DatabaseCommandInfo
{
Text = $"INSERT INTO dbo.SomeTable(SomeColumn) SELECT @paramOne WHERE NOT EXISTS(SELECT 1 FROM dbo.SomeOtherTable WHERE SomeColumn = @paramOne);"
};
installDbCommand.Parameters.Add(new DatabaseParameterInfo("paramOne", "SomeValue"));
DatabaseTarget dt = new DatabaseTarget()
{
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
CommandText = "not_important"
};
dt.InstallDdlCommands.Add(installDbCommand);
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
dt.Install(new InstallationContext());
string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;').
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=paramOne
Parameter #0 Value=""SomeValue""
Add Parameter Parameter #0
ExecuteNonQuery: INSERT INTO dbo.SomeTable(SomeColumn) SELECT @paramOne WHERE NOT EXISTS(SELECT 1 FROM dbo.SomeOtherTable WHERE SomeColumn = @paramOne);
Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void ParameterTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES(@msg, @lvl, @lg)",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
Parameters =
{
new DatabaseParameterInfo("msg", "${message}"),
new DatabaseParameterInfo("lvl", "${level}"),
new DatabaseParameterInfo("lg", "${logger}")
}
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
// when we pass multiple log events in an array, the target will bucket-sort them by
// connection string and group all commands for the same connection string together
// to minimize number of db open/close operations
// in this case msg1, msg2 and msg4 will be written together to MyLogger database
// and msg3 will be written to MyLogger2 database
List<Exception> exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Debug, "MyLogger2", "msg3").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;').
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=msg
Parameter #0 Value=""msg1""
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=lvl
Parameter #1 Value=""Info""
Add Parameter Parameter #1
CreateParameter(2)
Parameter #2 Direction=Input
Parameter #2 Name=lg
Parameter #2 Value=""MyLogger""
Add Parameter Parameter #2
ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg)
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=msg
Parameter #0 Value=""msg3""
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=lvl
Parameter #1 Value=""Debug""
Add Parameter Parameter #1
CreateParameter(2)
Parameter #2 Direction=Input
Parameter #2 Name=lg
Parameter #2 Value=""MyLogger2""
Add Parameter Parameter #2
ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg)
";
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Theory]
[InlineData(null, true, @"""2""")]
[InlineData(null, false, @"""2""")]
[InlineData(DbType.Int32, true, "2")]
[InlineData(DbType.Int32, false, "2")]
[InlineData(DbType.Object, true, @"""2""")]
[InlineData(DbType.Object, false, "Info")]
public void LevelParameterTest(DbType? dbType, bool noRawValue, string expectedValue)
{
string lvlLayout = noRawValue ? "${level:format=Ordinal:norawvalue=true}" : "${level:format=Ordinal}";
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES(@lvl, @msg)",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
Parameters =
{
new DatabaseParameterInfo("lvl", lvlLayout) { DbType = dbType?.ToString() },
new DatabaseParameterInfo("msg", "${message}")
}
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
List<Exception> exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = string.Format(@"Open('Server=.;Trusted_Connection=SSPI;').
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=lvl{0}
Parameter #0 Value={1}
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=msg
Parameter #1 Value=""msg1""
Add Parameter Parameter #1
ExecuteNonQuery: INSERT INTO FooBar VALUES(@lvl, @msg)
", dbType.HasValue ? $"\r\nParameter #0 DbType={dbType.Value}" : "", expectedValue);
AssertLog(expectedLog);
MockDbConnection.ClearLog();
dt.Close();
expectedLog = @"Close()
Dispose()
";
AssertLog(expectedLog);
}
[Theory]
[InlineData("${counter}", DbType.Int16, (short)1)]
[InlineData("${counter}", DbType.Int32, 1)]
[InlineData("${counter}", DbType.Int64, (long)1)]
[InlineData("${counter}", DbType.Int16, (short)1)]
[InlineData("${counter:norawvalue=true}", DbType.Int16, (short)1)] //fallback
[InlineData("${counter}", DbType.VarNumeric, 1, true)]
[InlineData("${counter}", DbType.AnsiString, "1")]
[InlineData("${level}", DbType.AnsiString, "Debug")]
[InlineData("${level}", DbType.Int32, 1)]
[InlineData("${level}", DbType.UInt16, (ushort) 1)]
[InlineData("${event-properties:boolprop}", DbType.Boolean, true)]
[InlineData("${event-properties:intprop}", DbType.Int32, 123)]
[InlineData("${event-properties:intprop}", DbType.AnsiString, "123")]
[InlineData("${event-properties:intprop}", DbType.AnsiStringFixedLength, "123")]
[InlineData("${event-properties:intprop}", DbType.String, "123")]
[InlineData("${event-properties:intprop}", DbType.StringFixedLength, "123")]
[InlineData("${event-properties:almostAsIntProp}", DbType.Int16, (short)124)]
[InlineData("${event-properties:almostAsIntProp:norawvalue=true}", DbType.Int16, (short)124)]
[InlineData("${event-properties:almostAsIntProp}", DbType.Int32, 124)]
[InlineData("${event-properties:almostAsIntProp}", DbType.Int64, (long)124)]
[InlineData("${event-properties:almostAsIntProp}", DbType.AnsiString, " 124 ")]
public void GetParameterValueTest(string layout, DbType dbtype, object expected, bool convertToDecimal = false)
{
// Arrange
var logEventInfo = new LogEventInfo(LogLevel.Debug, "logger1", "message 2");
logEventInfo.Properties["intprop"] = 123;
logEventInfo.Properties["boolprop"] = true;
logEventInfo.Properties["almostAsIntProp"] = " 124 ";
logEventInfo.Properties["dateprop"] = new DateTime(2018, 12, 30, 13, 34, 56);
var parameterName = "@param1";
var databaseParameterInfo = new DatabaseParameterInfo
{
DbType = dbtype.ToString(),
Layout = layout,
Name = parameterName,
};
databaseParameterInfo.SetDbType(new MockDbConnection().CreateCommand().CreateParameter());
// Act
var result = new DatabaseTarget().GetDatabaseParameterValue(logEventInfo, databaseParameterInfo);
//Assert
if (convertToDecimal)
{
//fix that we can't pass decimals into attributes (InlineData)
expected = (decimal) (int) expected;
}
Assert.Equal(expected, result);
}
[Theory]
[MemberData(nameof(ConvertFromStringTestCases))]
public void GetParameterValueFromStringTest(string value, DbType dbType, object expected, string format = null, CultureInfo cultureInfo = null)
{
var culture = System.Threading.Thread.CurrentThread.CurrentCulture;
try
{
System.Threading.Thread.CurrentThread.CurrentCulture = new CultureInfo("NL-nl");
// Arrange
var databaseParameterInfo = new DatabaseParameterInfo("@test", value)
{
Format = format,
DbType = dbType.ToString(),
Culture = cultureInfo,
};
databaseParameterInfo.SetDbType(new MockDbConnection().CreateCommand().CreateParameter());
// Act
var result = new DatabaseTarget().GetDatabaseParameterValue(LogEventInfo.CreateNullEvent(), databaseParameterInfo);
// Assert
Assert.Equal(expected, result);
}
finally
{
// Restore
System.Threading.Thread.CurrentThread.CurrentCulture = culture;
}
}
public static IEnumerable<object[]> ConvertFromStringTestCases()
{
yield return new object[] { "true", DbType.Boolean, true };
yield return new object[] { "True", DbType.Boolean, true };
yield return new object[] { "1,2", DbType.VarNumeric, (decimal)1.2 };
yield return new object[] { "1,2", DbType.Currency, (decimal)1.2 };
yield return new object[] { "1,2", DbType.Decimal, (decimal)1.2 };
yield return new object[] { "1,2", DbType.Double, (double)1.2 };
yield return new object[] { "1,2", DbType.Single, (Single)1.2 };
yield return new object[] { "2:30", DbType.Time, new TimeSpan(0, 2, 30, 0), };
yield return new object[] { "2018-12-23 22:56", DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), };
yield return new object[] { "2018-12-23 22:56", DbType.DateTime2, new DateTime(2018, 12, 23, 22, 56, 0), };
yield return new object[] { "23-12-2018 22:56", DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), "dd-MM-yyyy HH:mm" };
yield return new object[] { new DateTime(2018, 12, 23, 22, 56, 0).ToString(CultureInfo.InvariantCulture), DbType.DateTime, new DateTime(2018, 12, 23, 22, 56, 0), null, CultureInfo.InvariantCulture };
yield return new object[] { "2018-12-23", DbType.Date, new DateTime(2018, 12, 23, 0, 0, 0), };
yield return new object[] { "2018-12-23 +2:30", DbType.DateTimeOffset, new DateTimeOffset(2018, 12, 23, 0, 0, 0, new TimeSpan(2, 30, 0)) };
yield return new object[] { "23-12-2018 22:56 +2:30", DbType.DateTimeOffset, new DateTimeOffset(2018, 12, 23, 22, 56, 0, new TimeSpan(2, 30, 0)), "dd-MM-yyyy HH:mm zzz" };
yield return new object[] { "3888CCA3-D11D-45C9-89A5-E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287") };
yield return new object[] { "3888CCA3D11D45C989A5E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287") };
yield return new object[] { "3888CCA3D11D45C989A5E6B72185D287", DbType.Guid, Guid.Parse("3888CCA3-D11D-45C9-89A5-E6B72185D287"), "N" };
yield return new object[] { "3", DbType.Byte, (byte)3 };
yield return new object[] { "3", DbType.SByte, (sbyte)3 };
yield return new object[] { "3", DbType.Int16, (short)3 };
yield return new object[] { " 3 ", DbType.Int16, (short)3 };
yield return new object[] { "3", DbType.Int32, 3 };
yield return new object[] { "3", DbType.Int64, (long)3 };
yield return new object[] { "3", DbType.UInt16, (ushort)3 };
yield return new object[] { "3", DbType.UInt32, (uint)3 };
yield return new object[] { "3", DbType.UInt64, (ulong)3 };
yield return new object[] { "3", DbType.AnsiString, "3" };
yield return new object[] { "${db-null}", DbType.DateTime, DBNull.Value };
yield return new object[] { "${event-properties:userid}", DbType.Int32, 0 };
yield return new object[] { "${date:universalTime=true:format=yyyy-MM:norawvalue=true}", DbType.DateTime, DateTime.SpecifyKind(DateTime.UtcNow.Date.AddDays(-DateTime.UtcNow.Day + 1), DateTimeKind.Unspecified)};
}
[Fact]
public void ParameterFacetTest()
{
MockDbConnection.ClearLog();
DatabaseTarget dt = new DatabaseTarget()
{
CommandText = "INSERT INTO FooBar VALUES(@msg, @lvl, @lg)",
DBProvider = typeof(MockDbConnection).AssemblyQualifiedName,
KeepConnection = true,
Parameters =
{
new DatabaseParameterInfo("msg", "${message}")
{
Precision = 3,
Scale = 7,
Size = 9,
},
new DatabaseParameterInfo("lvl", "${level}")
{
Scale = 7
},
new DatabaseParameterInfo("lg", "${logger}")
{
Precision = 0
},
}
};
dt.Initialize(null);
Assert.Same(typeof(MockDbConnection), dt.ConnectionType);
// when we pass multiple log events in an array, the target will bucket-sort them by
// connection string and group all commands for the same connection string together
// to minimize number of db open/close operations
// in this case msg1, msg2 and msg4 will be written together to MyLogger database
// and msg3 will be written to MyLogger2 database
var exceptions = new List<Exception>();
var events = new[]
{
new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add),
new LogEventInfo(LogLevel.Debug, "MyLogger2", "msg3").WithContinuation(exceptions.Add),
};
dt.WriteAsyncLogEvents(events);
dt.Close();
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('Server=.;Trusted_Connection=SSPI;').
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=msg
Parameter #0 Size=9
Parameter #0 Precision=3
Parameter #0 Scale=7
Parameter #0 Value=""msg1""
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=lvl
Parameter #1 Scale=7
Parameter #1 Value=""Info""
Add Parameter Parameter #1
CreateParameter(2)
Parameter #2 Direction=Input
Parameter #2 Name=lg
Parameter #2 Value=""MyLogger""
Add Parameter Parameter #2
ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg)
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=msg
Parameter #0 Size=9
Parameter #0 Precision=3
Parameter #0 Scale=7
Parameter #0 Value=""msg3""
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=lvl
Parameter #1 Scale=7
Parameter #1 Value=""Debug""
Add Parameter Parameter #1
CreateParameter(2)
Parameter #2 Direction=Input
Parameter #2 Name=lg
Parameter #2 Value=""MyLogger2""
Add Parameter Parameter #2
ExecuteNonQuery: INSERT INTO FooBar VALUES(@msg, @lvl, @lg)
Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void ParameterDbTypePropertyNameTest()
{
MockDbConnection.ClearLog();
LoggingConfiguration c = XmlLoggingConfiguration.CreateFromXmlString(@"
<nlog>
<targets>
<target name='dt' type='Database'>
<DBProvider>MockDb</DBProvider>
<ConnectionString>FooBar</ConnectionString>
<CommandText>INSERT INTO FooBar VALUES(@message,@level,@date)</CommandText>
<parameter name='@message' layout='${message}'/>
<parameter name='@level' dbType='MockDbType.Int32' layout='${level:format=Ordinal}'/>
<parameter name='@date' dbType='MockDbType.DateTime' format='yyyy-MM-dd HH:mm:ss.fff' layout='${date:format=yyyy-MM-dd HH\:mm\:ss.fff}'/>
</target>
</targets>
</nlog>");
DatabaseTarget dt = c.FindTargetByName("dt") as DatabaseTarget;
Assert.NotNull(dt);
dt.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName;
dt.Initialize(c);
List<Exception> exceptions = new List<Exception>();
var alogEvent = new LogEventInfo(LogLevel.Info, "MyLogger", "msg1").WithContinuation(exceptions.Add);
dt.WriteAsyncLogEvent(alogEvent);
dt.WriteAsyncLogEvent(alogEvent);
foreach (var ex in exceptions)
{
Assert.Null(ex);
}
string expectedLog = @"Open('FooBar').
CreateParameter(0)
Parameter #0 Direction=Input
Parameter #0 Name=@message
Parameter #0 Value=""msg1""
Add Parameter Parameter #0
CreateParameter(1)
Parameter #1 Direction=Input
Parameter #1 Name=@level
Parameter #1 MockDbType=Int32
Parameter #1 Value=""{0}""
Add Parameter Parameter #1
CreateParameter(2)
Parameter #2 Direction=Input
Parameter #2 Name=@date
Parameter #2 MockDbType=DateTime
Parameter #2 Value={1}
Add Parameter Parameter #2
ExecuteNonQuery: INSERT INTO FooBar VALUES(@message,@level,@date)
Close()
Dispose()
";
expectedLog = string.Format(expectedLog + expectedLog, LogLevel.Info.Ordinal, alogEvent.LogEvent.TimeStamp.ToString(CultureInfo.InvariantCulture));
AssertLog(expectedLog);
}
[Fact]
public void ConnectionStringBuilderTest1()
{
DatabaseTarget dt;
dt = new DatabaseTarget();
Assert.Equal("Server=.;Trusted_Connection=SSPI;", GetConnectionString(dt));
dt = new DatabaseTarget();
dt.DBHost = "${logger}";
Assert.Equal("Server=Logger1;Trusted_Connection=SSPI;", GetConnectionString(dt));
dt = new DatabaseTarget();
dt.DBHost = "HOST1";
dt.DBDatabase = "${logger}";
Assert.Equal("Server=HOST1;Trusted_Connection=SSPI;Database=Logger1", GetConnectionString(dt));
dt = new DatabaseTarget();
dt.DBHost = "HOST1";
dt.DBDatabase = "${logger}";
dt.DBUserName = "user1";
dt.DBPassword = "password1";
Assert.Equal("Server=HOST1;User id=user1;Password=password1;Database=Logger1", GetConnectionString(dt));
dt = new DatabaseTarget();
dt.ConnectionString = "customConnectionString42";
dt.DBHost = "HOST1";
dt.DBDatabase = "${logger}";
dt.DBUserName = "user1";
dt.DBPassword = "password1";
Assert.Equal("customConnectionString42", GetConnectionString(dt));
}
[Fact]
public void DatabaseExceptionTest1()
{
MockDbConnection.ClearLog();
var exceptions = new List<Exception>();
using (new NoThrowNLogExceptions())
{
var db = new DatabaseTarget();
db.CommandText = "not important";
db.ConnectionString = "cannotconnect";
db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName;
db.Initialize(null);
db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add));
db.Close();
}
Assert.Single(exceptions);
Assert.NotNull(exceptions[0]);
Assert.Equal("Cannot open fake database.", exceptions[0].Message);
Assert.Equal("Open('cannotconnect').\r\n", MockDbConnection.Log);
}
[Fact]
public void DatabaseExceptionTest2()
{
MockDbConnection.ClearLog();
var exceptions = new List<Exception>();
using (new NoThrowNLogExceptions())
{
var db = new DatabaseTarget();
db.CommandText = "not important";
db.ConnectionString = "cannotexecute";
db.KeepConnection = true;
db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName;
db.Initialize(null);
db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add));
db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add));
db.WriteAsyncLogEvent(LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add));
db.Close();
}
Assert.Equal(3, exceptions.Count);
Assert.NotNull(exceptions[0]);
Assert.NotNull(exceptions[1]);
Assert.NotNull(exceptions[2]);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message);
string expectedLog = @"Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
";
AssertLog(expectedLog);
}
[Fact]
public void DatabaseExceptionTest3()
{
MockDbConnection.ClearLog();
var exceptions = new List<Exception>();
using (new NoThrowNLogExceptions())
{
var db = new DatabaseTarget();
db.CommandText = "not important";
db.ConnectionString = "cannotexecute";
db.KeepConnection = true;
db.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName;
db.Initialize(null);
db.WriteAsyncLogEvents(
LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add),
LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add),
LogEventInfo.CreateNullEvent().WithContinuation(exceptions.Add));
db.Close();
}
Assert.Equal(3, exceptions.Count);
Assert.NotNull(exceptions[0]);
Assert.NotNull(exceptions[1]);
Assert.NotNull(exceptions[2]);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[0].Message);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[1].Message);
Assert.Equal("Failure during ExecuteNonQuery", exceptions[2].Message);
string expectedLog = @"Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
Open('cannotexecute').
ExecuteNonQuery: not important
Close()
Dispose()
";
AssertLog(expectedLog);
}
#if !MONO && !NETSTANDARD
[Fact]
public void ConnectionStringNameInitTest()
{
var dt = new DatabaseTarget
{
ConnectionStringName = "MyConnectionString",
CommandText = "notimportant",
};
Assert.Same(ConfigurationManager.ConnectionStrings, dt.ConnectionStringsSettings);
dt.ConnectionStringsSettings = new ConnectionStringSettingsCollection()
{
new ConnectionStringSettings("MyConnectionString", "cs1", "MockDb"),
};
dt.Initialize(null);
Assert.Same(MockDbFactory.Instance, dt.ProviderFactory);
Assert.Equal("cs1", dt.ConnectionString.Render(LogEventInfo.CreateNullEvent()));
}
[Fact]
public void ConnectionStringNameNegativeTest_if_ThrowConfigExceptions()
{
LogManager.ThrowConfigExceptions = true;
var dt = new DatabaseTarget
{
ConnectionStringName = "MyConnectionString",
CommandText = "notimportant",
ConnectionStringsSettings = new ConnectionStringSettingsCollection(),
};
try
{
dt.Initialize(null);
Assert.True(false, "Exception expected.");
}
catch (NLogConfigurationException configurationException)
{
Assert.Equal(
"Connection string 'MyConnectionString' is not declared in <connectionStrings /> section.",
configurationException.Message);
}
}
[Fact]
public void ProviderFactoryInitTest()
{
var dt = new DatabaseTarget();
dt.DBProvider = "MockDb";
dt.CommandText = "Notimportant";
dt.Initialize(null);
Assert.Same(MockDbFactory.Instance, dt.ProviderFactory);
dt.OpenConnection("myConnectionString");
Assert.Equal(1, MockDbConnection2.OpenCount);
Assert.Equal("myConnectionString", MockDbConnection2.LastOpenConnectionString);
}
#endif
[Fact]
public void SqlServerShorthandNotationTest()
{
foreach (string provName in new[] { "microsoft", "msde", "mssql", "sqlserver" })
{
var dt = new DatabaseTarget()
{
Name = "myTarget",
DBProvider = provName,
ConnectionString = "notimportant",
CommandText = "notimportant",
};
dt.Initialize(null);
#if !NETSTANDARD
Assert.Equal(typeof(SqlConnection), dt.ConnectionType);
#else
Assert.NotNull(dt.ConnectionType);
#endif
}
}
#if !NETSTANDARD
[Fact]
public void OleDbShorthandNotationTest()
{
var dt = new DatabaseTarget()
{
Name = "myTarget",
DBProvider = "oledb",
ConnectionString = "notimportant",
CommandText = "notimportant",
};
dt.Initialize(null);
Assert.Equal(typeof(System.Data.OleDb.OleDbConnection), dt.ConnectionType);
}
[Fact]
public void OdbcShorthandNotationTest()
{
var dt = new DatabaseTarget()
{
Name = "myTarget",
DBProvider = "odbc",
ConnectionString = "notimportant",
CommandText = "notimportant",
};
dt.Initialize(null);
Assert.Equal(typeof(System.Data.Odbc.OdbcConnection), dt.ConnectionType);
}
#endif
[Fact]
public void SQLite_InstallAndLogMessageProgrammatically()
{
SQLiteTest sqlLite = new SQLiteTest("TestLogProgram.sqlite");
// delete database if it for some reason already exists
sqlLite.TryDropDatabase();
LogManager.ThrowExceptions = true;
try
{
sqlLite.CreateDatabase();
var connectionString = sqlLite.GetConnectionString();
DatabaseTarget testTarget = new DatabaseTarget("TestSqliteTarget");
testTarget.ConnectionString = connectionString;
testTarget.DBProvider = GetSQLiteDbProvider();
testTarget.InstallDdlCommands.Add(new DatabaseCommandInfo()
{
CommandType = CommandType.Text,
Text = $@"
CREATE TABLE NLogTestTable (
Id int PRIMARY KEY,
Message varchar(100) NULL)"
});
using (var context = new InstallationContext())
{
testTarget.Install(context);
}
// check so table is created
var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogTestTable'");
Assert.Equal("NLogTestTable", tableName);
testTarget.CommandText = "INSERT INTO NLogTestTable (Message) VALUES (@message)";
testTarget.Parameters.Add(new DatabaseParameterInfo("@message", new NLog.Layouts.SimpleLayout("${message}")));
// setup logging
var config = new LoggingConfiguration();
config.AddTarget("dbTarget", testTarget);
var rule = new LoggingRule("*", LogLevel.Debug, testTarget);
config.LoggingRules.Add(rule);
// try to log
LogManager.Configuration = config;
var logger = LogManager.GetLogger("testLog");
logger.Debug("Test debug message");
logger.Error("Test error message");
// will return long
var logcount = sqlLite.IssueScalarQuery("SELECT count(1) FROM NLogTestTable");
Assert.Equal((long)2, logcount);
}
finally
{
sqlLite.TryDropDatabase();
}
}
private string GetSQLiteDbProvider()
{
#if MONO
return "Mono.Data.Sqlite.SqliteConnection, Mono.Data.Sqlite";
#elif NETSTANDARD
return "Microsoft.Data.Sqlite.SqliteConnection, Microsoft.Data.Sqlite";
#else
return "System.Data.SQLite.SQLiteConnection, System.Data.SQLite";
#endif
}
[Fact]
public void SQLite_InstallAndLogMessage()
{
SQLiteTest sqlLite = new SQLiteTest("TestLogXml.sqlite");
// delete database just in case
sqlLite.TryDropDatabase();
LogManager.ThrowExceptions = true;
try
{
sqlLite.CreateDatabase();
var connectionString = sqlLite.GetConnectionString();
string dbProvider = GetSQLiteDbProvider();
// Create log with xml config
LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@"
<nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'>
<targets>
<target name='database' xsi:type='Database' dbProvider=""" + dbProvider + @""" connectionstring=""" + connectionString + @"""
commandText='insert into NLogSqlLiteTest (Message) values (@message);'>
<parameter name='@message' layout='${message}' />
<install-command ignoreFailures=""false""
text=""CREATE TABLE NLogSqlLiteTest (
Id int PRIMARY KEY,
Message varchar(100) NULL
);""/>
</target>
</targets>
<rules>
<logger name='*' writeTo='database' />
</rules>
</nlog>");
//install
InstallationContext context = new InstallationContext();
LogManager.Configuration.Install(context);
// check so table is created
var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTest'");
Assert.Equal("NLogSqlLiteTest", tableName);
// start to log
var logger = LogManager.GetLogger("SQLite");
logger.Debug("Test");
logger.Error("Test2");
logger.Info("Final test row");
// returns long
var logcount = sqlLite.IssueScalarQuery("SELECT count(1) FROM NLogSqlLiteTest");
Assert.Equal((long)3, logcount);
}
finally
{
sqlLite.TryDropDatabase();
}
}
[Fact]
public void SQLite_InstallTest()
{
SQLiteTest sqlLite = new SQLiteTest("TestInstallXml.sqlite");
// delete database just in case
sqlLite.TryDropDatabase();
LogManager.ThrowExceptions = true;
try
{
sqlLite.CreateDatabase();
var connectionString = sqlLite.GetConnectionString();
string dbProvider = GetSQLiteDbProvider();
// Create log with xml config
LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@"
<nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'>
<targets>
<target name='database' xsi:type='Database' dbProvider=""" + dbProvider + @""" connectionstring=""" + connectionString + @"""
commandText='not_important'>
<install-command ignoreFailures=""false""
text=""CREATE TABLE NLogSqlLiteTestAppNames (
Id int PRIMARY KEY,
Name varchar(100) NULL
);
INSERT INTO NLogSqlLiteTestAppNames(Id, Name) VALUES (1, @appName);"">
<parameter name='@appName' layout='MyApp' />
</install-command>
</target>
</targets>
<rules>
<logger name='*' writeTo='database' />
</rules>
</nlog>");
//install
InstallationContext context = new InstallationContext();
LogManager.Configuration.Install(context);
// check so table is created
var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTestAppNames'");
Assert.Equal("NLogSqlLiteTestAppNames", tableName);
// returns long
var logcount = sqlLite.IssueScalarQuery("SELECT count(*) FROM NLogSqlLiteTestAppNames");
Assert.Equal((long)1, logcount);
// check if entry was correct
var entryValue = sqlLite.IssueScalarQuery("SELECT Name FROM NLogSqlLiteTestAppNames WHERE ID = 1");
Assert.Equal("MyApp", entryValue);
}
finally
{
sqlLite.TryDropDatabase();
}
}
[Fact]
public void SQLite_InstallProgramaticallyTest()
{
SQLiteTest sqlLite = new SQLiteTest("TestInstallProgram.sqlite");
// delete database just in case
sqlLite.TryDropDatabase();
LogManager.ThrowExceptions = true;
try
{
sqlLite.CreateDatabase();
var connectionString = sqlLite.GetConnectionString();
string dbProvider = GetSQLiteDbProvider();
DatabaseTarget testTarget = new DatabaseTarget("TestSqliteTargetInstallProgram");
testTarget.ConnectionString = connectionString;
testTarget.DBProvider = dbProvider;
DatabaseCommandInfo installDbCommand = new DatabaseCommandInfo
{
Text = "CREATE TABLE NLogSqlLiteTestAppNames (Id int PRIMARY KEY, Name varchar(100) NULL); " +
"INSERT INTO NLogSqlLiteTestAppNames(Id, Name) SELECT 1, @paramOne WHERE NOT EXISTS(SELECT 1 FROM NLogSqlLiteTestAppNames WHERE Name = @paramOne);"
};
installDbCommand.Parameters.Add(new DatabaseParameterInfo("@paramOne", "MyApp"));
testTarget.InstallDdlCommands.Add(installDbCommand);
//install
InstallationContext context = new InstallationContext();
testTarget.Install(context);
// check so table is created
var tableName = sqlLite.IssueScalarQuery("SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'NLogSqlLiteTestAppNames'");
Assert.Equal("NLogSqlLiteTestAppNames", tableName);
// returns long
var logcount = sqlLite.IssueScalarQuery("SELECT count(*) FROM NLogSqlLiteTestAppNames");
Assert.Equal((long)1, logcount);
// check if entry was correct
var entryValue = sqlLite.IssueScalarQuery("SELECT Name FROM NLogSqlLiteTestAppNames WHERE ID = 1");
Assert.Equal("MyApp", entryValue);
}
finally
{
sqlLite.TryDropDatabase();
}
}
private void SetupSqliteConfigWithInvalidInstallCommand(string databaseName)
{
var nlogXmlConfig = @"
<nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='false'>
<targets>
<target name='database' xsi:type='Database' dbProvider='{0}' connectionstring='{1}'
commandText='insert into RethrowingInstallExceptionsTable (Message) values (@message);'>
<parameter name='@message' layout='${{message}}' />
<install-command text='THIS IS NOT VALID SQL;' />
</target>
</targets>
<rules>
<logger name='*' writeTo='database' />
</rules>
</nlog>";
// Use an in memory SQLite database
// See https://www.sqlite.org/inmemorydb.html
#if NETSTANDARD
var connectionString = "Data Source=:memory:";
#else
var connectionString = "Uri=file::memory:;Version=3";
#endif
LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(String.Format(nlogXmlConfig, GetSQLiteDbProvider(), connectionString));
}
[Fact]
public void NotRethrowingInstallExceptions()
{
using (new NoThrowNLogExceptions())
{
SetupSqliteConfigWithInvalidInstallCommand("not_rethrowing_install_exceptions");
// Default InstallationContext should not rethrow exceptions
InstallationContext context = new InstallationContext();
Assert.False(context.IgnoreFailures, "Failures should not be ignored by default");
Assert.False(context.ThrowExceptions, "Exceptions should not be thrown by default");
var exRecorded = Record.Exception(() => LogManager.Configuration.Install(context));
Assert.Null(exRecorded);
}
}
[Fact]
public void RethrowingInstallExceptions()
{
using (new NoThrowNLogExceptions())
{
SetupSqliteConfigWithInvalidInstallCommand("rethrowing_install_exceptions");
InstallationContext context = new InstallationContext()
{
ThrowExceptions = true
};
Assert.True(context.ThrowExceptions); // Sanity check
#if MONO || NETSTANDARD
Assert.Throws<SqliteException>(() => LogManager.Configuration.Install(context));
#else
Assert.Throws<SQLiteException>(() => LogManager.Configuration.Install(context));
#endif
}
}
[Fact]
public void SqlServer_NoTargetInstallException()
{
if (IsTravis())
{
Console.WriteLine("skipping test SqlServer_NoTargetInstallException because we are running in Travis");
return;
}
bool isAppVeyor = IsAppVeyor();
SqlServerTest.TryDropDatabase(isAppVeyor);
try
{
SqlServerTest.CreateDatabase(isAppVeyor);
var connectionString = SqlServerTest.GetConnectionString(isAppVeyor);
DatabaseTarget testTarget = new DatabaseTarget("TestDbTarget");
testTarget.ConnectionString = connectionString;
testTarget.InstallDdlCommands.Add(new DatabaseCommandInfo()
{
CommandType = CommandType.Text,
Text = $@"
IF EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = 'dbo' AND TABLE_NAME = 'NLogTestTable')
RETURN
CREATE TABLE [Dbo].[NLogTestTable] (
[ID] [int] IDENTITY(1,1) NOT NULL,
[MachineName] [nvarchar](200) NULL)"
});
using (var context = new InstallationContext())
{
testTarget.Install(context);
}
var tableCatalog = SqlServerTest.IssueScalarQuery(isAppVeyor, @"SELECT TABLE_NAME FROM NLogTest.INFORMATION_SCHEMA.TABLES
WHERE TABLE_TYPE = 'BASE TABLE'
AND TABLE_NAME = 'NLogTestTable'
");
//check if table exists
Assert.Equal("NLogTestTable", tableCatalog);
}
finally
{
SqlServerTest.TryDropDatabase(isAppVeyor);
}
}
[Fact]
public void SqlServer_InstallAndLogMessage()
{
if (IsTravis())
{
Console.WriteLine("skipping test SqlServer_InstallAndLogMessage because we are running in Travis");
return;
}
bool isAppVeyor = IsAppVeyor();
SqlServerTest.TryDropDatabase(isAppVeyor);
try
{
SqlServerTest.CreateDatabase(isAppVeyor);
var connectionString = SqlServerTest.GetConnectionString(IsAppVeyor());
LogManager.Configuration = XmlLoggingConfiguration.CreateFromXmlString(@"
<nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' throwExceptions='true'>
<targets>
<target name='database' xsi:type='Database' connectionstring=""" + connectionString + @"""
commandText='insert into dbo.NLogSqlServerTest (Uid, LogDate) values (@uid, @logdate);'>
<parameter name='@uid' layout='${event-properties:uid}' />
<parameter name='@logdate' layout='${date}' />
<install-command ignoreFailures=""false""
text=""CREATE TABLE dbo.NLogSqlServerTest (
Id int NOT NULL IDENTITY(1,1) PRIMARY KEY CLUSTERED,
Uid uniqueidentifier NULL,
LogDate date NULL
);""/>
</target>
</targets>
<rules>
<logger name='*' writeTo='database' />
</rules>
</nlog>");
//install
InstallationContext context = new InstallationContext();
LogManager.Configuration.Install(context);
var tableCatalog = SqlServerTest.IssueScalarQuery(isAppVeyor, @"SELECT TABLE_CATALOG FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = 'Dbo'
AND TABLE_NAME = 'NLogSqlServerTest'");
//check if table exists
Assert.Equal("NLogTest", tableCatalog);
var logger = LogManager.GetLogger("A");
var target = LogManager.Configuration.FindTargetByName<DatabaseTarget>("database");
var uid = new Guid("e7c648b4-3508-4df2-b001-753148659d6d");
var logEvent = new LogEventInfo(LogLevel.Info, null, null);
logEvent.Properties["uid"] = uid;
logger.Log(logEvent);
var count = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT count(1) FROM dbo.NLogSqlServerTest");
Assert.Equal(1, count);
var result = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT Uid FROM dbo.NLogSqlServerTest");
Assert.Equal(uid, result);
var result2 = SqlServerTest.IssueScalarQuery(isAppVeyor, "SELECT LogDate FROM dbo.NLogSqlServerTest");
Assert.Equal(DateTime.Today, result2);
}
finally
{
SqlServerTest.TryDropDatabase(isAppVeyor);
}
}
#if !NETSTANDARD
[Fact]
public void GetProviderNameFromAppConfig()
{
LogManager.ThrowExceptions = true;
var databaseTarget = new DatabaseTarget()
{
Name = "myTarget",
ConnectionStringName = "test_connectionstring_with_providerName",
CommandText = "notimportant",
};
databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection()
{
new ConnectionStringSettings("test_connectionstring_without_providerName", "some connectionstring"),
new ConnectionStringSettings("test_connectionstring_with_providerName", "some connectionstring",
"System.Data.SqlClient"),
};
databaseTarget.Initialize(null);
Assert.NotNull(databaseTarget.ProviderFactory);
Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType());
}
[Fact]
public void DontRequireProviderNameInAppConfig()
{
LogManager.ThrowExceptions = true;
var databaseTarget = new DatabaseTarget()
{
Name = "myTarget",
ConnectionStringName = "test_connectionstring_without_providerName",
CommandText = "notimportant",
DBProvider = "System.Data.SqlClient"
};
databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection()
{
new ConnectionStringSettings("test_connectionstring_without_providerName", "some connectionstring"),
new ConnectionStringSettings("test_connectionstring_with_providerName", "some connectionstring",
"System.Data.SqlClient"),
};
databaseTarget.Initialize(null);
Assert.NotNull(databaseTarget.ProviderFactory);
Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType());
}
[Fact]
public void GetProviderNameFromConnectionString()
{
LogManager.ThrowExceptions = true;
var databaseTarget = new DatabaseTarget()
{
Name = "myTarget",
ConnectionStringName = "test_connectionstring_with_providerName",
CommandText = "notimportant",
};
databaseTarget.ConnectionStringsSettings = new ConnectionStringSettingsCollection()
{
new ConnectionStringSettings("test_connectionstring_with_providerName",
"metadata=res://*/Model.csdl|res://*/Model.ssdl|res://*/Model.msl;provider=System.Data.SqlClient;provider connection string=\"data source=192.168.0.100;initial catalog=TEST_DB;user id=myUser;password=SecretPassword;multipleactiveresultsets=True;application name=EntityFramework\"",
"System.Data.EntityClient"),
};
databaseTarget.Initialize(null);
Assert.NotNull(databaseTarget.ProviderFactory);
Assert.Equal(typeof(SqlClientFactory), databaseTarget.ProviderFactory.GetType());
Assert.Equal("data source=192.168.0.100;initial catalog=TEST_DB;user id=myUser;password=SecretPassword;multipleactiveresultsets=True;application name=EntityFramework", ((NLog.Layouts.SimpleLayout)databaseTarget.ConnectionString).FixedText);
}
#endif
[Theory]
[InlineData("usetransactions='false'", true)]
[InlineData("usetransactions='true'", true)]
[InlineData("", false)]
public void WarningForObsoleteUseTransactions(string property, bool printWarning)
{
LoggingConfiguration c = XmlLoggingConfiguration.CreateFromXmlString($@"
<nlog ThrowExceptions='true' internalLogLevel='Info'>
<targets>
<target type='database' {property} name='t1' commandtext='fake sql' connectionstring='somewhere' />
</targets>
<rules>
<logger name='*' writeTo='t1'>
</logger>
</rules>
</nlog>");
StringWriter writer1 = new StringWriter()
{
NewLine = "\n"
};
InternalLogger.LogWriter = writer1;
var t = c.FindTargetByName<DatabaseTarget>("t1");
t.Initialize(null);
var internalLog = writer1.ToString();
if (printWarning)
{
Assert.Contains("obsolete", internalLog, StringComparison.OrdinalIgnoreCase);
Assert.Contains("usetransactions", internalLog, StringComparison.OrdinalIgnoreCase);
}
else
{
Assert.DoesNotContain("obsolete", internalLog, StringComparison.OrdinalIgnoreCase);
Assert.DoesNotContain("usetransactions", internalLog, StringComparison.OrdinalIgnoreCase);
}
}
private static void AssertLog(string expectedLog)
{
Assert.Equal(expectedLog.Replace("\r", ""), MockDbConnection.Log.Replace("\r", ""));
}
private string GetConnectionString(DatabaseTarget dt)
{
MockDbConnection.ClearLog();
dt.DBProvider = typeof(MockDbConnection).AssemblyQualifiedName;
dt.CommandText = "NotImportant";
var exceptions = new List<Exception>();
dt.Initialize(null);
dt.WriteAsyncLogEvent(new LogEventInfo(LogLevel.Info, "Logger1", "msg1").WithContinuation(exceptions.Add));
dt.Close();
return MockDbConnection.LastConnectionString;
}
public class MockDbConnection : IDbConnection
{
public static string Log { get; private set; }
public static string LastConnectionString { get; private set; }
public MockDbConnection()
{
}
public MockDbConnection(string connectionString)
{
ConnectionString = connectionString;
}
public IDbTransaction BeginTransaction(IsolationLevel il)
{
throw new NotImplementedException();
}
public IDbTransaction BeginTransaction()
{
throw new NotImplementedException();
}
public void ChangeDatabase(string databaseName)
{
throw new NotImplementedException();
}
public void Close()
{
AddToLog("Close()");
}
public string ConnectionString { get; set; }
public int ConnectionTimeout => throw new NotImplementedException();
public IDbCommand CreateCommand()
{
return new MockDbCommand() { Connection = this };
}
public string Database => throw new NotImplementedException();
public void Open()
{
LastConnectionString = ConnectionString;
AddToLog("Open('{0}').", ConnectionString);
if (ConnectionString == "cannotconnect")
{
throw new ApplicationException("Cannot open fake database.");
}
}
public ConnectionState State => throw new NotImplementedException();
public void Dispose()
{
AddToLog("Dispose()");
}
public static void ClearLog()
{
Log = string.Empty;
}
public void AddToLog(string message, params object[] args)
{
if (args.Length > 0)
{
message = string.Format(CultureInfo.InvariantCulture, message, args);
}
Log += message + "\r\n";
}
}
private class MockDbCommand : IDbCommand
{
private int paramCount;
private IDataParameterCollection parameters;
public MockDbCommand()
{
parameters = new MockParameterCollection(this);
}
public void Cancel()
{
throw new NotImplementedException();
}
public string CommandText { get; set; }
public int CommandTimeout { get; set; }
public CommandType CommandType { get; set; }
public IDbConnection Connection { get; set; }
public IDbDataParameter CreateParameter()
{
((MockDbConnection)Connection).AddToLog("CreateParameter({0})", paramCount);
return new MockDbParameter(this, paramCount++);
}
public int ExecuteNonQuery()
{
((MockDbConnection)Connection).AddToLog("ExecuteNonQuery: {0}", CommandText);
if (Connection.ConnectionString == "cannotexecute")
{
throw new ApplicationException("Failure during ExecuteNonQuery");
}
return 0;
}
public IDataReader ExecuteReader(CommandBehavior behavior)
{
throw new NotImplementedException();
}
public IDataReader ExecuteReader()
{
throw new NotImplementedException();
}
public object ExecuteScalar()
{
throw new NotImplementedException();
}
public IDataParameterCollection Parameters => parameters;
public void Prepare()
{
throw new NotImplementedException();
}
public IDbTransaction Transaction { get; set; }
public UpdateRowSource UpdatedRowSource
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
public void Dispose()
{
Transaction = null;
Connection = null;
}
}
private class MockDbParameter : IDbDataParameter
{
private readonly MockDbCommand mockDbCommand;
private readonly int paramId;
private string parameterName;
private object parameterValue;
private DbType parameterType;
public MockDbParameter(MockDbCommand mockDbCommand, int paramId)
{
this.mockDbCommand = mockDbCommand;
this.paramId = paramId;
}
public DbType DbType
{
get { return parameterType; }
set
{
((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} DbType={1}", paramId, value);
parameterType = value;
}
}
public DbType MockDbType
{
get { return parameterType; }
set
{
((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} MockDbType={1}", paramId, value);
parameterType = value;
}
}
public ParameterDirection Direction
{
get => throw new NotImplementedException();
set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Direction={1}", paramId,
value);
}
public bool IsNullable => throw new NotImplementedException();
public string ParameterName
{
get => parameterName;
set
{
((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Name={1}", paramId, value);
parameterName = value;
}
}
public string SourceColumn
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
public DataRowVersion SourceVersion
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
public object Value
{
get => parameterValue;
set
{
object valueOutput = value is string valueString ? $"\"{valueString}\"" : value;
((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Value={1}", paramId, valueOutput);
parameterValue = value;
}
}
public byte Precision
{
get => throw new NotImplementedException();
set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Precision={1}", paramId,
value);
}
public byte Scale
{
get => throw new NotImplementedException();
set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Scale={1}", paramId, value);
}
public int Size
{
get => throw new NotImplementedException();
set => ((MockDbConnection)mockDbCommand.Connection).AddToLog("Parameter #{0} Size={1}", paramId, value);
}
public override string ToString()
{
return "Parameter #" + paramId;
}
}
private class MockParameterCollection : IDataParameterCollection
{
private readonly MockDbCommand command;
public MockParameterCollection(MockDbCommand command)
{
this.command = command;
}
public IEnumerator GetEnumerator()
{
throw new NotImplementedException();
}
public void CopyTo(Array array, int index)
{
throw new NotImplementedException();
}
public int Count => throw new NotImplementedException();
public object SyncRoot => throw new NotImplementedException();
public bool IsSynchronized => throw new NotImplementedException();
public int Add(object value)
{
((MockDbConnection)command.Connection).AddToLog("Add Parameter {0}", value);
return 0;
}
public bool Contains(object value)
{
throw new NotImplementedException();
}
public void Clear()
{
throw new NotImplementedException();
}
public int IndexOf(object value)
{
throw new NotImplementedException();
}
public void Insert(int index, object value)
{
throw new NotImplementedException();
}
public void Remove(object value)
{
throw new NotImplementedException();
}
public void RemoveAt(int index)
{
throw new NotImplementedException();
}
object IList.this[int index]
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
public bool IsReadOnly => throw new NotImplementedException();
public bool IsFixedSize => throw new NotImplementedException();
public bool Contains(string parameterName)
{
throw new NotImplementedException();
}
public int IndexOf(string parameterName)
{
throw new NotImplementedException();
}
public void RemoveAt(string parameterName)
{
throw new NotImplementedException();
}
object IDataParameterCollection.this[string parameterName]
{
get => throw new NotImplementedException();
set => throw new NotImplementedException();
}
}
public class MockDbFactory : DbProviderFactory
{
public static readonly MockDbFactory Instance = new MockDbFactory();
public override DbConnection CreateConnection()
{
return new MockDbConnection2();
}
}
public class MockDbConnection2 : DbConnection
{
public static int OpenCount { get; private set; }
public static string LastOpenConnectionString { get; private set; }
protected override DbTransaction BeginDbTransaction(IsolationLevel isolationLevel)
{
throw new NotImplementedException();
}
public override void ChangeDatabase(string databaseName)
{
throw new NotImplementedException();
}
public override void Close()
{
throw new NotImplementedException();
}
public override string ConnectionString { get; set; }
protected override DbCommand CreateDbCommand()
{
throw new NotImplementedException();
}
public override string DataSource => throw new NotImplementedException();
public override string Database => throw new NotImplementedException();
public override void Open()
{
LastOpenConnectionString = ConnectionString;
OpenCount++;
}
public override string ServerVersion => throw new NotImplementedException();
public override ConnectionState State => throw new NotImplementedException();
}
private class SQLiteTest
{
private string dbName = "NLogTest.sqlite";
private string connectionString;
public SQLiteTest(string dbName)
{
this.dbName = dbName;
#if NETSTANDARD
connectionString = "Data Source=" + this.dbName;
#else
connectionString = "Data Source=" + this.dbName + ";Version=3;";
#endif
}
public string GetConnectionString()
{
return connectionString;
}
public void CreateDatabase()
{
if (DatabaseExists())
{
TryDropDatabase();
}
SQLiteHandler.CreateDatabase(dbName);
}
public bool DatabaseExists()
{
return File.Exists(dbName);
}
public void TryDropDatabase()
{
try
{
if (DatabaseExists())
{
File.Delete(dbName);
}
}
catch
{
}
}
public void IssueCommand(string commandString)
{
using (DbConnection connection = SQLiteHandler.GetConnection(connectionString))
{
connection.Open();
using (DbCommand command = SQLiteHandler.CreateCommand(commandString, connection))
{
command.ExecuteNonQuery();
}
}
}
public object IssueScalarQuery(string commandString)
{
using (DbConnection connection = SQLiteHandler.GetConnection(connectionString))
{
connection.Open();
using (DbCommand command = SQLiteHandler.CreateCommand(commandString, connection))
{
var scalar = command.ExecuteScalar();
return scalar;
}
}
}
}
private static class SQLiteHandler
{
public static void CreateDatabase(string dbName)
{
#if NETSTANDARD
// Using ConnectionString Mode=ReadWriteCreate
#elif MONO
SqliteConnection.CreateFile(dbName);
#else
SQLiteConnection.CreateFile(dbName);
#endif
}
public static DbConnection GetConnection(string connectionString)
{
#if NETSTANDARD
return new SqliteConnection(connectionString + ";Mode=ReadWriteCreate;");
#elif MONO
return new SqliteConnection(connectionString);
#else
return new SQLiteConnection(connectionString);
#endif
}
public static DbCommand CreateCommand(string commandString, DbConnection connection)
{
#if MONO || NETSTANDARD
return new SqliteCommand(commandString, (SqliteConnection)connection);
#else
return new SQLiteCommand(commandString, (SQLiteConnection)connection);
#endif
}
}
private static class SqlServerTest
{
static SqlServerTest()
{
}
public static string GetConnectionString(bool isAppVeyor)
{
string connectionString = string.Empty;
#if !NETSTANDARD
connectionString = ConfigurationManager.AppSettings["SqlServerTestConnectionString"];
#endif
if (String.IsNullOrWhiteSpace(connectionString))
{
connectionString = isAppVeyor ? AppVeyorConnectionStringNLogTest : LocalConnectionStringNLogTest;
}
return connectionString;
}
/// <summary>
/// AppVeyor connectionstring for SQL 2016, see https://www.appveyor.com/docs/services-databases/
/// </summary>
private const string AppVeyorConnectionStringMaster =
@"Server=(local)\SQL2016;Database=master;User ID=sa;Password=Password12!";
private const string AppVeyorConnectionStringNLogTest =
@"Server=(local)\SQL2016;Database=NLogTest;User ID=sa;Password=Password12!";
private const string LocalConnectionStringMaster =
@"Data Source=(localdb)\MSSQLLocalDB; Database=master; Integrated Security=True;";
private const string LocalConnectionStringNLogTest =
@"Data Source=(localdb)\MSSQLLocalDB; Database=NLogTest; Integrated Security=True;";
public static void CreateDatabase(bool isAppVeyor)
{
var connectionString = GetMasterConnectionString(isAppVeyor);
IssueCommand(IsAppVeyor(), "CREATE DATABASE NLogTest", connectionString);
}
public static bool NLogTestDatabaseExists(bool isAppVeyor)
{
var connectionString = GetMasterConnectionString(isAppVeyor);
var dbId = IssueScalarQuery(IsAppVeyor(), "select db_id('NLogTest')", connectionString);
return dbId != null && dbId != DBNull.Value;
}
private static string GetMasterConnectionString(bool isAppVeyor)
{
return isAppVeyor ? AppVeyorConnectionStringMaster : LocalConnectionStringMaster;
}
public static void IssueCommand(bool isAppVeyor, string commandString, string connectionString = null)
{
using (var connection = new SqlConnection(connectionString ?? GetConnectionString(isAppVeyor)))
{
connection.Open();
if (connectionString == null)
connection.ChangeDatabase("NLogTest");
using (var command = new SqlCommand(commandString, connection))
{
command.ExecuteNonQuery();
}
}
}
public static object IssueScalarQuery(bool isAppVeyor, string commandString, string connectionString = null)
{
using (var connection = new SqlConnection(connectionString ?? GetConnectionString(isAppVeyor)))
{
connection.Open();
if (connectionString == null)
connection.ChangeDatabase("NLogTest");
using (var command = new SqlCommand(commandString, connection))
{
var scalar = command.ExecuteScalar();
return scalar;
}
}
}
/// <summary>
/// Try dropping. IF fail, not exception
/// </summary>
public static bool TryDropDatabase(bool isAppVeyor)
{
try
{
if (NLogTestDatabaseExists(isAppVeyor))
{
var connectionString = GetMasterConnectionString(isAppVeyor);
IssueCommand(isAppVeyor,
"ALTER DATABASE [NLogTest] SET SINGLE_USER WITH ROLLBACK IMMEDIATE; DROP DATABASE NLogTest;",
connectionString);
return true;
}
return false;
}
catch (Exception)
{
//ignore
return false;
}
}
}
}
}
| 1 | 18,947 | note: also here ;) | NLog-NLog | .cs |
@@ -145,6 +145,10 @@ public class TemporaryFilesystem {
}
public boolean deleteBaseDir() {
- return baseDir.delete();
+ boolean wasDeleted = baseDir.delete();
+ if (wasDeleted) {
+ Runtime.getRuntime().removeShutdownHook(shutdownHook);
+ }
+ return wasDeleted;
}
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.io;
import org.openqa.selenium.WebDriverException;
import java.io.File;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
/**
* A wrapper around temporary filesystem behaviour.
*/
public class TemporaryFilesystem {
private final Set<File> temporaryFiles = new CopyOnWriteArraySet<File>();
private final File baseDir;
private final Thread shutdownHook = new Thread() { // Thread safety reviewed
@Override
public void run() {
deleteTemporaryFiles();
}
};
private static File sysTemp = new File(System.getProperty("java.io.tmpdir"));
private static TemporaryFilesystem instance = new TemporaryFilesystem(sysTemp);
public static TemporaryFilesystem getDefaultTmpFS() {
return instance;
}
public static void setTemporaryDirectory(File directory) {
synchronized (TemporaryFilesystem.class) {
instance = new TemporaryFilesystem(directory);
}
}
public static TemporaryFilesystem getTmpFsBasedOn(File directory) {
return new TemporaryFilesystem(directory);
}
private TemporaryFilesystem(File baseDir) {
this.baseDir = baseDir;
Runtime.getRuntime().addShutdownHook(shutdownHook);
if (!baseDir.exists()) {
throw new WebDriverException("Unable to find tmp dir: " + baseDir.getAbsolutePath());
}
if (!baseDir.canWrite()) {
throw new WebDriverException("Unable to write to tmp dir: " + baseDir.getAbsolutePath());
}
}
/**
* Create a temporary directory, and track it for deletion.
*
* @param prefix the prefix to use when creating the temporary directory
* @param suffix the suffix to use when creating the temporary directory
* @return the temporary directory to create
*/
public File createTempDir(String prefix, String suffix) {
try {
// Create a tempfile, and delete it.
File file = File.createTempFile(prefix, suffix, baseDir);
file.delete();
// Create it as a directory.
File dir = new File(file.getAbsolutePath());
if (!dir.mkdirs()) {
throw new WebDriverException("Cannot create profile directory at " + dir.getAbsolutePath());
}
// Create the directory and mark it writable.
FileHandler.createDir(dir);
temporaryFiles.add(dir);
return dir;
} catch (IOException e) {
throw new WebDriverException(
"Unable to create temporary file at " + baseDir.getAbsolutePath());
}
}
/**
* Delete a temporary directory that we were responsible for creating.
*
* @param file the file to delete
* @throws WebDriverException if interrupted
*/
public void deleteTempDir(File file) {
if (!shouldReap()) {
return;
}
// If the tempfile can be removed, delete it. If not, it wasn't created by us.
if (temporaryFiles.remove(file)) {
FileHandler.delete(file);
}
}
/**
* Perform the operation that a shutdown hook would have.
*/
public void deleteTemporaryFiles() {
if (!shouldReap()) {
return;
}
for (File file : temporaryFiles) {
try {
FileHandler.delete(file);
} catch (WebDriverException e) {
// ignore; an interrupt will already have been logged.
}
}
}
/**
* Returns true if we should be reaping profiles. Used to control tempfile deletion.
*
* @return true if reaping is enabled.
*/
boolean shouldReap() {
String reap = System.getProperty("webdriver.reap_profile", "true");
return Boolean.valueOf(reap);
}
public boolean deleteBaseDir() {
return baseDir.delete();
}
}
| 1 | 13,021 | I don't think we need to necessarily check if that returned true or not, we should just remove the shutdown hook. Since nothing would check or do anything with this flag anyways. | SeleniumHQ-selenium | js |
@@ -38,10 +38,15 @@ const kytheExtractionConfigFile = ".kythe-extraction-config"
// Repo is a container of input/output parameters for doing extraction on remote
// repositories.
type Repo struct {
- // Clone extracts a copy of the repo to the specified output Directory.
- Clone func(ctx context.Context, outputDir string) error
+ // Either GitRepo or LocalRepo should be set, not both.
+ // A remote git repo, e.g. https://github.com/google/kythe.
+ Git string
+ // A local copy of a repository.
+ Local string
+
// Where to write from an extraction.
OutputPath string
+
// An optional path to a file containing a
// kythe.proto.ExtractionConfiguration encoded as JSON that details how
// to perform extraction. If this is unset, the extractor will first try | 1 | /*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"context"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"kythe.io/kythe/go/extractors/config/default/mvn"
ecpb "kythe.io/kythe/proto/extraction_config_go_proto"
)
// kytheConfigFileName The name of the Kythe extraction config
const kytheExtractionConfigFile = ".kythe-extraction-config"
// Repo is a container of input/output parameters for doing extraction on remote
// repositories.
type Repo struct {
// Clone extracts a copy of the repo to the specified output Directory.
Clone func(ctx context.Context, outputDir string) error
// Where to write from an extraction.
OutputPath string
// An optional path to a file containing a
// kythe.proto.ExtractionConfiguration encoded as JSON that details how
// to perform extraction. If this is unset, the extractor will first try
// to find a config defined in the repo, or finally use a hard coded
// default config.
ConfigPath string
}
// GitCopier returns a function that clones a repository via git command line.
func GitCopier(repoURI string) func(ctx context.Context, outputDir string) error {
return func(ctx context.Context, outputDir string) error {
// TODO(danielmoy): strongly consider go-git instead of os.exec
return exec.CommandContext(ctx, "git", "clone", repoURI, outputDir).Run()
}
}
// LocalCopier returns a function that copies a local repository.
// This function assumes the eventual output directory is already created.
func LocalCopier(repoPath string) func(ctx context.Context, outputDir string) error {
return func(ctx context.Context, outputDir string) error {
gitDir := filepath.Join(repoPath, ".git")
// TODO(danielmoy): consider extracting all or part of this
// to a more common place.
return filepath.Walk(repoPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if repoPath == path {
// Intentionally do nothing for base dir.
return nil
}
if filepath.HasPrefix(path, gitDir) {
return filepath.SkipDir
}
rel, err := filepath.Rel(repoPath, path)
if err != nil {
return err
}
outPath := filepath.Join(outputDir, rel)
if info.Mode().IsRegular() {
if err := os.MkdirAll(filepath.Dir(outPath), 0755); err != nil {
return fmt.Errorf("failed to make dir: %v", err)
}
inf, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open input file from repo: %v", err)
}
defer inf.Close()
of, err := os.Create(outPath)
if err != nil {
return fmt.Errorf("failed to open output file for repo copy: %v", err)
}
if _, err := io.Copy(of, inf); err != nil {
of.Close()
return fmt.Errorf("failed to copy repo file: %v", err)
}
return of.Close()
} else if !info.IsDir() {
// Notably in here are any links or other odd things.
log.Printf("Unsupported file %s with mode %s\n", path, info.Mode())
}
return nil
})
}
}
// Extractor is the interface for handling kindex generation on repos.
//
// ExtractRepo takes an input repo, output path to a directory, and optional
// kythe.proto.ExtractionConfiguration file path, and performs kythe extraction
// on the repo, depositing results in the output directory path.
type Extractor func(ctx context.Context, repo Repo) error
// ExtractRepo extracts a given code repository and outputs kindex files.
//
// It makes a local clone of the repository. It optionally uses a passed
// extraction config, otherwise it attempts to find a Kythe config named
// ".kythe-extraction-config".
//
// It builds a one-off customized Docker image for extraction, and then runs it,
// generating kindex files (format defined here:
// http://kythe.io/docs/kythe-index-pack.html).
//
// This function requires both Git and Docker to be in $PATH during execution.
func ExtractRepo(ctx context.Context, repo Repo) error {
if err := verifyRequiredTools(); err != nil {
return fmt.Errorf("ExtractRepo requires git and docker to be in $PATH: %v", err)
}
// create a temporary directory for the repo clone
repoDir, err := ioutil.TempDir("", "repoDir")
if err != nil {
return fmt.Errorf("creating tmp repo dir: %v", err)
}
defer os.RemoveAll(repoDir)
// create a temporary directory for the extraction output
tmpOutDir, err := ioutil.TempDir("", "tmpOutDir")
if err != nil {
return fmt.Errorf("creating tmp out dir: %v", err)
}
defer os.RemoveAll(tmpOutDir)
// copy the repo into our temp directory, so we can mutate its
// build config without affecting the original source.
if err := repo.Clone(ctx, repoDir); err != nil {
return fmt.Errorf("copying repo: %v", err)
}
log.Printf("Using configuration file: %q", repo.ConfigPath)
extractionDockerFile, err := ioutil.TempFile(tmpOutDir, "extractionDockerFile")
if err != nil {
return fmt.Errorf("creating tmp Dockerfile: %v", err)
}
// generate an extraction image from the config
extractionConfig, err := findConfig(repo.ConfigPath, repoDir)
if err != nil {
return fmt.Errorf("reading config file: %v", err)
}
err = CreateImage(extractionDockerFile.Name(), extractionConfig)
if err != nil {
return fmt.Errorf("creating extraction image: %v", err)
}
// use Docker to build the extraction image
imageTag := strings.ToLower(filepath.Base(extractionDockerFile.Name()))
output, err := exec.CommandContext(ctx, "docker", "build", "-f", extractionDockerFile.Name(), "-t", imageTag, tmpOutDir).CombinedOutput()
defer mustCleanUpImage(ctx, imageTag)
if err != nil {
return fmt.Errorf("building docker image: %v\nCommand output %s", err, string(output))
}
// run the extraction
output, err = exec.CommandContext(ctx, "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", repoDir, DefaultRepoVolume), "-v", fmt.Sprintf("%s:%s", repo.OutputPath, DefaultOutputVolume), "-t", imageTag).CombinedOutput()
if err != nil {
return fmt.Errorf("extracting repo: %v\nCommand output: %s", err, string(output))
}
return nil
}
func verifyRequiredTools() error {
if _, err := exec.LookPath("git"); err != nil {
return err
}
if _, err := exec.LookPath("docker"); err != nil {
return err
}
return nil
}
func findConfig(configPath, repoDir string) (*ecpb.ExtractionConfiguration, error) {
// if a config was passed in, use the specified config, otherwise go
// hunt for one in the repository.
if configPath == "" {
// otherwise, use a Kythe config within the repo (if it exists)
configPath = filepath.Join(repoDir, kytheExtractionConfigFile)
}
f, err := os.Open(configPath)
if os.IsNotExist(err) {
// TODO(danielmoy): This needs to be configurable by builder, language, etc.
return Load(mvn.DefaultConfig())
} else if err != nil {
return nil, fmt.Errorf("opening config file: %v", err)
}
defer f.Close()
return Load(f)
}
func mustCleanUpImage(ctx context.Context, tmpImageTag string) {
cmd := exec.CommandContext(ctx, "docker", "image", "rm", tmpImageTag)
err := cmd.Run()
if err != nil {
log.Printf("Failed to clean up docker image: %v", err)
}
}
| 1 | 8,478 | nit: blank between, for consistency. | kythe-kythe | go |
@@ -21,8 +21,10 @@ import java.util.regex.Matcher;
/** Utility class for formatting source comments to follow RDoc style. */
public class RDocCommentFixer {
- /** Returns a Sphinx-formatted comment string. */
+ /** Returns a RDoc-formatted comment string. */
public static String rdocify(String comment) {
+ // escape '$' in the comment first
+ comment = comment.replaceAll("\\$", "\\\\\\$");
comment = CommentPatterns.BACK_QUOTE_PATTERN.matcher(comment).replaceAll("+");
comment = rdocifyProtoMarkdownLinks(comment);
comment = rdocifyCloudMarkdownLinks(comment); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.ruby;
import com.google.api.codegen.CommentPatterns;
import com.google.common.base.Splitter;
import java.util.regex.Matcher;
/** Utility class for formatting source comments to follow RDoc style. */
public class RDocCommentFixer {
/** Returns a Sphinx-formatted comment string. */
public static String rdocify(String comment) {
comment = CommentPatterns.BACK_QUOTE_PATTERN.matcher(comment).replaceAll("+");
comment = rdocifyProtoMarkdownLinks(comment);
comment = rdocifyCloudMarkdownLinks(comment);
comment = rdocifyAbsoluteMarkdownLinks(comment);
comment = rdocifyHeadline(comment);
return cleanupTrailingWhitespaces(comment);
}
private static String protoToRubyDoc(String comment) {
boolean messageFound = false;
boolean isFirstSegment = true;
String result = "";
for (String name : Splitter.on(".").splitToList(comment)) {
char firstChar = name.charAt(0);
if (Character.isUpperCase(firstChar)) {
messageFound = true;
result += (isFirstSegment ? "" : "::") + name;
} else if (messageFound) {
// Lowercase segment after message is found is field.
// In Ruby, it is referred as "Message#field" format.
result += "#" + name;
} else {
result +=
(isFirstSegment ? "" : "::") + Character.toUpperCase(firstChar) + name.substring(1);
}
isFirstSegment = false;
}
return result;
}
/** Returns a string with all proto markdown links formatted to RDoc style. */
private static String rdocifyProtoMarkdownLinks(String comment) {
StringBuffer sb = new StringBuffer();
Matcher m = CommentPatterns.PROTO_LINK_PATTERN.matcher(comment);
if (!m.find()) {
return comment;
}
do {
m.appendReplacement(sb, String.format("%s", protoToRubyDoc(m.group(1))));
} while (m.find());
m.appendTail(sb);
return sb.toString();
}
/** Returns a string with all cloud markdown links formatted to Sphinx style. */
private static String rdocifyCloudMarkdownLinks(String comment) {
StringBuffer sb = new StringBuffer();
Matcher m = CommentPatterns.CLOUD_LINK_PATTERN.matcher(comment);
if (!m.find()) {
return comment;
}
do {
String url = "https://cloud.google.com" + m.group(2);
m.appendReplacement(sb, String.format("{%s}[%s]", m.group(1), url));
} while (m.find());
m.appendTail(sb);
return sb.toString();
}
/** Returns a string with all cloud markdown links formatted to Sphinx style. */
private static String rdocifyAbsoluteMarkdownLinks(String comment) {
StringBuffer sb = new StringBuffer();
Matcher m = CommentPatterns.ABSOLUTE_LINK_PATTERN.matcher(comment);
if (!m.find()) {
return comment;
}
do {
m.appendReplacement(sb, String.format("{%s}[%s]", m.group(1), m.group(2)));
} while (m.find());
m.appendTail(sb);
return sb.toString();
}
private static String rdocifyHeadline(String comment) {
StringBuffer sb = new StringBuffer();
Matcher m = CommentPatterns.HEADLINE_PATTERN.matcher(comment);
if (!m.find()) {
return comment;
}
do {
m.appendReplacement(sb, m.group().replace("#", "="));
} while (m.find());
m.appendTail(sb);
return sb.toString();
}
private static String cleanupTrailingWhitespaces(String comment) {
return comment.trim();
}
}
| 1 | 19,424 | ditto for RDoc or YARD link | googleapis-gapic-generator | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.