patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -4,9 +4,10 @@ import java.io.File; import java.io.IOException; public class FileTypeDetection { - - public static String determineFileType(File file) throws IOException { - return FileUtil.determineFileType(file, file.getName()); + // Question: why do we need this utility? - as opposed to just calling the + // static method in FileUtil directly? - L.A. + public static String determineFileType(File file, String fileName) throws IOException { + return FileUtil.determineFileType(file, fileName); } }
1
package edu.harvard.iq.dataverse.util; import java.io.File; import java.io.IOException; public class FileTypeDetection { public static String determineFileType(File file) throws IOException { return FileUtil.determineFileType(file, file.getName()); } }
1
43,947
good question - I vote we just remove, up to you if you want to handle in this PR though. (as it would require changing the places you call it; maybe moving the test? Though my guess is if we don't now, we never will.
IQSS-dataverse
java
@@ -15,6 +15,10 @@ module Travis super << "--gemfile-" << config[:gemfile].to_s end + def use_directory_cache? + super or data.cache?(:bundler) + end + def setup super setup_bundler
1
module Travis module Build class Script class Ruby < Script DEFAULTS = { rvm: 'default', gemfile: 'Gemfile' } include Jdk include RVM def cache_slug # ruby version is added by RVM] super << "--gemfile-" << config[:gemfile].to_s end def setup super setup_bundler end def announce super cmd 'gem --version' end def install gemfile? do |sh| sh.if "-f #{config[:gemfile]}.lock" do |sub| directory_cache.add(sub, bundler_path) if data.cache? :bundler sub.cmd bundler_command("--deployment"), fold: 'install', retry: true end sh.else do |sub| # cache bundler if it has been explicitely enabled directory_cache.add(sub, bundler_path) if data.cache? :bundler, false path_arg = "--path=#{bundler_path}" if bundler_path sub.cmd bundler_command(path_arg), fold: 'install', retry: true end end end def script gemfile? then: 'bundle exec rake', else: 'rake' end def prepare_cache "bundle clean" if bundler_path end private def bundler_path if bundler_args Array(bundler_args).join(" ")[/--path[= ](\S+)/, 1] else "${BUNDLE_PATH:-vendor/bundle}" end end def bundler_command(args = nil) args = bundler_args if bundler_args ["bundle install", args].compact.join(" ") end def bundler_args config[:bundler_args] end def setup_bundler gemfile? do |sh| set 'BUNDLE_GEMFILE', "$PWD/#{config[:gemfile]}" cmd 'gem query --local | grep bundler >/dev/null || gem install bundler' end end def gemfile?(*args, &block) self.if "-f #{config[:gemfile]}", *args, &block end def uses_java? config[:rvm] =~ /jruby/i end def uses_jdk? uses_java? && super end end end end end
1
10,874
This is so that if we turn on bundler caching globally it still won't affect python etc. Same inheritance logic as for the cache slug.
travis-ci-travis-build
rb
@@ -58,7 +58,8 @@ namespace Nethermind.Synchronization.Peers public bool CanBeAllocated(AllocationContexts contexts) { return !IsAsleep(contexts) && - !IsAllocated(contexts); + !IsAllocated(contexts) && + this.SupportsAllocation(contexts); } [MethodImpl(MethodImplOptions.Synchronized)]
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Runtime.CompilerServices; using System.Threading; using Nethermind.Blockchain.Synchronization; using Nethermind.Core.Crypto; using Nethermind.Int256; using Nethermind.Stats.Model; [assembly: InternalsVisibleTo("Nethermind.Synchronization.Test")] namespace Nethermind.Synchronization.Peers { public class PeerInfo { public PeerInfo(ISyncPeer syncPeer) { SyncPeer = syncPeer; } public NodeClientType PeerClientType => SyncPeer?.ClientType ?? NodeClientType.Unknown; public AllocationContexts AllocatedContexts { get; private set; } public AllocationContexts SleepingContexts { get; private set; } private ConcurrentDictionary<AllocationContexts, DateTime?> SleepingSince { get; } = new(); public ISyncPeer SyncPeer { get; } public bool IsInitialized => SyncPeer.IsInitialized; public UInt256 TotalDifficulty => SyncPeer.TotalDifficulty; public long HeadNumber => SyncPeer.HeadNumber; public Keccak HeadHash => SyncPeer.HeadHash; [MethodImpl(MethodImplOptions.Synchronized)] public bool CanBeAllocated(AllocationContexts contexts) { return !IsAsleep(contexts) && !IsAllocated(contexts); } [MethodImpl(MethodImplOptions.Synchronized)] public bool IsAsleep(AllocationContexts contexts) { return (contexts & SleepingContexts) != AllocationContexts.None; } [MethodImpl(MethodImplOptions.Synchronized)] public bool IsAllocated(AllocationContexts contexts) { return (contexts & AllocatedContexts) != AllocationContexts.None; } [MethodImpl(MethodImplOptions.Synchronized)] public bool TryAllocate(AllocationContexts contexts) { if (CanBeAllocated(contexts)) { AllocatedContexts |= contexts; return true; } return false; } [MethodImpl(MethodImplOptions.Synchronized)] public void Free(AllocationContexts contexts) { AllocatedContexts ^= contexts; } [MethodImpl(MethodImplOptions.Synchronized)] public void PutToSleep(AllocationContexts contexts, DateTime dateTime) { SleepingContexts |= contexts; SleepingSince[contexts] = dateTime; } [MethodImpl(MethodImplOptions.Synchronized)] public void TryToWakeUp(DateTime dateTime, TimeSpan wakeUpIfSleepsMoreThanThis) { foreach (KeyValuePair<AllocationContexts, DateTime?> keyValuePair in SleepingSince) { if (IsAsleep(keyValuePair.Key)) { if (dateTime - keyValuePair.Value >= wakeUpIfSleepsMoreThanThis) { WakeUp(keyValuePair.Key); } } } } [MethodImpl(MethodImplOptions.Synchronized)] private void WakeUp(AllocationContexts allocationContexts) { SleepingContexts ^= allocationContexts; foreach (KeyValuePair<AllocationContexts, int> allocationIndex in AllocationIndexes) { if ((allocationContexts & allocationIndex.Key) == allocationIndex.Key) { _weaknesses[allocationIndex.Value] = 0; } } SleepingSince.TryRemove(allocationContexts, out _); } // map from AllocationContexts single flag to index in array of _weaknesses private static readonly IDictionary<AllocationContexts, int> AllocationIndexes = ((AllocationContexts[])Enum.GetValues(typeof(AllocationContexts))) .Where(c => c != AllocationContexts.All && c != AllocationContexts.None) .Select((a, i) => (a, i)) .ToDictionary(v => v.a, v => v.i); private readonly int[] _weaknesses = new int[AllocationIndexes.Count]; public const int SleepThreshold = 2; public AllocationContexts IncreaseWeakness(AllocationContexts allocationContexts) { AllocationContexts sleeps = AllocationContexts.None; foreach (KeyValuePair<AllocationContexts, int> allocationIndex in AllocationIndexes) { if ((allocationContexts & allocationIndex.Key) == allocationIndex.Key) { ResolveWeaknessChecks(ref _weaknesses[allocationIndex.Value], allocationIndex.Key, ref sleeps); } } return sleeps; } private void ResolveWeaknessChecks(ref int weakness, AllocationContexts singleContext, ref AllocationContexts sleeps) { int level = Interlocked.Increment(ref weakness); if (level >= SleepThreshold) { sleeps |= singleContext; } } private static string BuildContextString(AllocationContexts contexts) { return $"{((contexts & AllocationContexts.Headers) == AllocationContexts.Headers ? "H" : " ")}{((contexts & AllocationContexts.Bodies) == AllocationContexts.Bodies ? "B" : " ")}{((contexts & AllocationContexts.Receipts) == AllocationContexts.Receipts ? "R" : " ")}{((contexts & AllocationContexts.State) == AllocationContexts.State ? "S" : " ")}{((contexts & AllocationContexts.Witness) == AllocationContexts.Witness ? "W" : " ")}"; } public override string ToString() => $"[{BuildContextString(AllocatedContexts)}][{BuildContextString(SleepingContexts)}]{SyncPeer}"; } }
1
26,404
[You can ignore it] Maybe a better method name would be IsSupported or HasSupportForAllocation?
NethermindEth-nethermind
.cs
@@ -32,7 +32,7 @@ def escape_and_globify(pattern_string): pattern_string (str): The pattern string of which to make a regex. Returns: - str: The pattern string, escaped except for the "*", which is + str: The pattern string, escaped except for the "\*", which is transformed into ".+" (match on one or more characters). """ if pattern_string is None:
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regex utility module.""" import re # pylint: disable=anomalous-backslash-in-string def escape_and_globify(pattern_string): """Given a pattern string with a glob, create actual regex pattern. To require > 0 length glob, change the "*" to ".+". This is to handle strings like "\*@company.com". (The actual regex would probably be ".\*@company.com", except that we don't want to match zero-length usernames before the "@".) Special case the pattern '*' to match 0 or more characters. Args: pattern_string (str): The pattern string of which to make a regex. Returns: str: The pattern string, escaped except for the "*", which is transformed into ".+" (match on one or more characters). """ if pattern_string is None: return None # pylint: enable=anomalous-backslash-in-string if pattern_string == '*': return '^.*$' return '^{}$'.format(re.escape(pattern_string).replace('\\*', '.+?'))
1
32,367
Why is this changed to `\*`? The behavior of the code on line 42, shows that `*` is what's handled?
forseti-security-forseti-security
py
@@ -117,6 +117,16 @@ module.exports = BaseTest.extend({ TestCase.assertEqual(Realm.defaultPath, newPath, "defaultPath should have been updated"); }, + testRealmSchemaVersion: function() { + TestCase.assertEqual(Realm.schemaVersion(Realm.defaultPath), 0xFFFFFFFFFFFFFFFF); + + var realm = new Realm({schema: []}); + TestCase.assertEqual(Realm.schemaVersion(Realm.defaultPath), 0); + + realm = new Realm({schema: [], schemaVersion: 2, path: 'another.realm'}); + TestCase.assertEqual(Realm.schemaVersion('another.realm'), 2); + }, + testRealmCreate: function() { var realm = new Realm({schema: [schemas.IntPrimary, schemas.AllTypes, schemas.TestObject]});
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; var Realm = require('realm'); var BaseTest = require('./base-test'); var TestCase = require('./asserts'); var schemas = require('./schemas'); module.exports = BaseTest.extend({ testRealmConstructor: function() { var realm = new Realm({schema: []}); TestCase.assertTrue(realm instanceof Realm); }, testRealmConstructorPath: function() { TestCase.assertThrows(function() { new Realm('/invalidpath'); }, 'Realm cannot be created with an invalid path'); TestCase.assertThrows(function() { new Realm('test1.realm', 'invalidArgument'); }, 'Realm constructor can only have 0 or 1 argument(s)'); var defaultRealm = new Realm({schema: []}); TestCase.assertEqual(defaultRealm.path, Realm.defaultPath); var defaultRealm2 = new Realm(); TestCase.assertEqual(defaultRealm2.path, Realm.defaultPath); var defaultDir = Realm.defaultPath.substring(0, Realm.defaultPath.lastIndexOf("/") + 1) var testPath = 'test1.realm'; var realm = new Realm({schema: [], path: testPath}); //TestCase.assertTrue(realm instanceof Realm); TestCase.assertEqual(realm.path, defaultDir + testPath); var testPath2 = 'test2.realm'; var realm2 = new Realm({schema: [], path: testPath2}); //TestCase.assertTrue(realm2 instanceof Realm); TestCase.assertEqual(realm2.path, defaultDir + testPath2); }, testRealmConstructorSchemaVersion: function() { var defaultRealm = new Realm({schema: []}); TestCase.assertEqual(defaultRealm.schemaVersion, 0); TestCase.assertThrows(function() { new Realm({schemaVersion: 1}); }, "Realm already opened at a different schema version"); TestCase.assertEqual(new Realm().schemaVersion, 0); TestCase.assertEqual(new Realm({schemaVersion: 0}).schemaVersion, 0); var realm = new Realm({path: 'test1.realm', schema: [], schemaVersion: 1}); TestCase.assertEqual(realm.schemaVersion, 1); // FIXME - enable once Realm exposes a schema object //TestCase.assertEqual(realm.schema.length, 0); realm.close(); // FIXME - enable once realm initialization supports schema comparison // TestCase.assertThrows(function() { // realm = new Realm({path: testPath, schema: [schemas.TestObject], schemaVersion: 1}); // }, "schema changes require updating the schema version"); realm = new Realm({path: 'test1.realm', schema: [schemas.TestObject], schemaVersion: 2}); realm.write(function() { realm.create('TestObject', {doubleCol: 1}); }); TestCase.assertEqual(realm.objects('TestObject')[0].doubleCol, 1) }, testRealmConstructorSchemaValidation: function() { TestCase.assertThrows(function() { new Realm({schema: schemas.AllTypes}); }, 'The schema should be an array'); TestCase.assertThrows(function() { new Realm({schema: ['SomeType']}); }, 'The schema should be an array of objects'); TestCase.assertThrows(function() { new Realm({schema: [{}]}); }, 'The schema should be an array of ObjectSchema objects'); TestCase.assertThrows(function() { new Realm({schema: [{name: 'SomeObject'}]}); }, 'The schema should be an array of ObjectSchema objects'); TestCase.assertThrows(function() { new Realm({schema: [{properties: {intCol: Realm.Types.INT}}]}); }, 'The schema should be an array of ObjectSchema objects'); }, testDefaultPath: function() { var defaultRealm = new Realm({schema: []}); TestCase.assertEqual(defaultRealm.path, Realm.defaultPath); var newPath = Realm.defaultPath.substring(0, Realm.defaultPath.lastIndexOf("/") + 1) + 'default2.realm'; Realm.defaultPath = newPath; defaultRealm = new Realm({schema: []}); TestCase.assertEqual(defaultRealm.path, newPath, "should use updated default realm path"); TestCase.assertEqual(Realm.defaultPath, newPath, "defaultPath should have been updated"); }, testRealmCreate: function() { var realm = new Realm({schema: [schemas.IntPrimary, schemas.AllTypes, schemas.TestObject]}); TestCase.assertThrows(function() { realm.create('TestObject', {doubleCol: 1}); }, 'can only create inside a write transaction'); realm.write(function() { realm.create('TestObject', {doubleCol: 1}); realm.create('TestObject', {doubleCol: 2}); }); var objects = realm.objects('TestObject'); TestCase.assertEqual(objects.length, 2, 'wrong object count'); TestCase.assertEqual(objects[0].doubleCol, 1, 'wrong object property value'); TestCase.assertEqual(objects[1].doubleCol, 2, 'wrong object property value'); // test int primary object realm.write(function() { var obj0 = realm.create('IntPrimaryObject', { primaryCol: 0, valueCol: 'val0', }); TestCase.assertThrows(function() { realm.create('IntPrimaryObject', { primaryCol: 0, valueCol: 'val0', }); }, 'cannot create object with conflicting primary key'); realm.create('IntPrimaryObject', { primaryCol: 1, valueCol: 'val1', }, true); var objects = realm.objects('IntPrimaryObject'); TestCase.assertEqual(objects.length, 2); realm.create('IntPrimaryObject', { primaryCol: 0, valueCol: 'newVal0', }, true); TestCase.assertEqual(obj0.valueCol, 'newVal0'); TestCase.assertEqual(objects.length, 2); realm.create('IntPrimaryObject', {primaryCol: 0}, true); TestCase.assertEqual(obj0.valueCol, 'newVal0'); }); // test upsert with all type and string primary object realm.write(function() { var values = { primaryCol: '0', boolCol: true, intCol: 1, floatCol: 1.1, doubleCol: 1.11, stringCol: '1', dateCol: new Date(1), dataCol: new ArrayBuffer(1), objectCol: {doubleCol: 1}, arrayCol: [], }; var obj0 = realm.create('AllTypesObject', values); TestCase.assertThrows(function() { realm.create('AllTypesObject', values); }, 'cannot create object with conflicting primary key'); var obj1 = realm.create('AllTypesObject', { primaryCol: '1', boolCol: false, intCol: 2, floatCol: 2.2, doubleCol: 2.22, stringCol: '2', dateCol: new Date(2), dataCol: new ArrayBuffer(2), objectCol: {doubleCol: 0}, arrayCol: [{doubleCol: 2}], }, true); var objects = realm.objects('AllTypesObject'); TestCase.assertEqual(objects.length, 2); realm.create('AllTypesObject', { primaryCol: '0', boolCol: false, intCol: 2, floatCol: 2.2, doubleCol: 2.22, stringCol: '2', dateCol: new Date(2), dataCol: new ArrayBuffer(2), objectCol: null, arrayCol: [{doubleCol: 2}], }, true); TestCase.assertEqual(objects.length, 2); TestCase.assertEqual(obj0.stringCol, '2'); TestCase.assertEqual(obj0.boolCol, false); TestCase.assertEqual(obj0.intCol, 2); TestCase.assertEqualWithTolerance(obj0.floatCol, 2.2, 0.000001); TestCase.assertEqualWithTolerance(obj0.doubleCol, 2.22, 0.000001); TestCase.assertEqual(obj0.dateCol.getTime(), 2); TestCase.assertEqual(obj0.dataCol.byteLength, 2); TestCase.assertEqual(obj0.objectCol, null); TestCase.assertEqual(obj0.arrayCol.length, 1); realm.create('AllTypesObject', {primaryCol: '0'}, true); realm.create('AllTypesObject', {primaryCol: '1'}, true); TestCase.assertEqual(obj0.stringCol, '2'); TestCase.assertEqual(obj0.objectCol, null); TestCase.assertEqual(obj1.objectCol.doubleCol, 0); realm.create('AllTypesObject', { primaryCol: '0', stringCol: '3', objectCol: {doubleCol: 0}, }, true); TestCase.assertEqual(obj0.stringCol, '3'); TestCase.assertEqual(obj0.boolCol, false); TestCase.assertEqual(obj0.intCol, 2); TestCase.assertEqualWithTolerance(obj0.floatCol, 2.2, 0.000001); TestCase.assertEqualWithTolerance(obj0.doubleCol, 2.22, 0.000001); TestCase.assertEqual(obj0.dateCol.getTime(), 2); TestCase.assertEqual(obj0.dataCol.byteLength, 2); TestCase.assertEqual(obj0.objectCol.doubleCol, 0); TestCase.assertEqual(obj0.arrayCol.length, 1); realm.create('AllTypesObject', {primaryCol: '0', objectCol: undefined}, true); realm.create('AllTypesObject', {primaryCol: '1', objectCol: null}, true); TestCase.assertEqual(obj0.objectCol, null); TestCase.assertEqual(obj1.objectCol, null); }); }, testRealmWithIndexedProperties: function() { var IndexedTypes = { name: 'IndexedTypesObject', properties: { boolCol: {type: 'bool', indexed: true}, intCol: {type: 'int', indexed: true}, stringCol: {type: 'string', indexed: true}, dateCol: {type: 'date', indexed: true}, } }; var realm = new Realm({schema: [IndexedTypes]}); realm.write(function() { realm.create('IndexedTypesObject', {boolCol: true, intCol: 1, stringCol: '1', dateCol: new Date(1)}); }); var NotIndexed = { name: 'NotIndexedObject', properties: { floatCol: {type: 'float', indexed: false} } }; new Realm({schema: [NotIndexed], path: '1'}); TestCase.assertThrows(function() { IndexedTypes.properties = { floatCol: {type: 'float', indexed: true} } new Realm({schema: [IndexedTypes], path: '2'}); }); TestCase.assertThrows(function() { IndexedTypes.properties = { doubleCol: {type: 'double', indexed: true} } new Realm({schema: [IndexedTypes], path: '3'}); }); TestCase.assertThrows(function() { IndexedTypes.properties = { dataCol: {type: 'data', indexed: true} } new Realm({schema: [IndexedTypes], path: '4'}); }); // primary key IndexedTypes.primaryKey = 'boolCol'; IndexedTypes.properties = { boolCol: {type: 'bool', indexed: true} } // Test this doesn't throw new Realm({schema: [IndexedTypes], path: '5'}); }, testRealmCreateWithDefaults: function() { var realm = new Realm({schema: [schemas.DefaultValues, schemas.TestObject]}); realm.write(function() { var obj = realm.create('DefaultValuesObject', {}); var properties = schemas.DefaultValues.properties; TestCase.assertEqual(obj.boolCol, properties.boolCol.default); TestCase.assertEqual(obj.intCol, properties.intCol.default); TestCase.assertEqualWithTolerance(obj.floatCol, properties.floatCol.default, 0.000001); TestCase.assertEqualWithTolerance(obj.doubleCol, properties.doubleCol.default, 0.000001); TestCase.assertEqual(obj.stringCol, properties.stringCol.default); TestCase.assertEqual(obj.dateCol.getTime(), properties.dateCol.default.getTime()); TestCase.assertEqual(obj.dataCol.byteLength, properties.dataCol.default.byteLength); TestCase.assertEqual(obj.objectCol.doubleCol, properties.objectCol.default.doubleCol); TestCase.assertEqual(obj.nullObjectCol, null); TestCase.assertEqual(obj.arrayCol.length, properties.arrayCol.default.length); TestCase.assertEqual(obj.arrayCol[0].doubleCol, properties.arrayCol.default[0].doubleCol); }); }, testRealmCreateWithConstructor: function() { var customCreated = 0; function CustomObject() { customCreated++; this.intCol *= 100; } CustomObject.schema = { name: 'CustomObject', properties: { intCol: 'int' } } function InvalidObject() { return {}; } TestCase.assertThrows(function() { new Realm({schema: [InvalidObject]}); }); InvalidObject.schema = { name: 'InvalidObject', properties: { intCol: 'int' } } var realm = new Realm({schema: [CustomObject, InvalidObject]}); realm.write(function() { var object = realm.create('CustomObject', {intCol: 1}); TestCase.assertTrue(object instanceof CustomObject); TestCase.assertTrue(Object.getPrototypeOf(object) == CustomObject.prototype); TestCase.assertEqual(customCreated, 1); // Should have been multiplied by 100 in the constructor. TestCase.assertEqual(object.intCol, 100); // Should be able to create object by passing in constructor. object = realm.create(CustomObject, {intCol: 2}); TestCase.assertTrue(object instanceof CustomObject); TestCase.assertTrue(Object.getPrototypeOf(object) == CustomObject.prototype); TestCase.assertEqual(customCreated, 2); TestCase.assertEqual(object.intCol, 200); }); TestCase.assertThrows(function() { realm.write(function() { realm.create('InvalidObject', {intCol: 1}); }); }); // Only the original constructor should be valid. function InvalidCustomObject() {} InvalidCustomObject.schema = CustomObject.schema; TestCase.assertThrows(function() { realm.write(function() { realm.create(InvalidCustomObject, {intCol: 1}); }); }); }, testRealmDelete: function() { var realm = new Realm({schema: [schemas.TestObject]}); realm.write(function() { for (var i = 0; i < 10; i++) { realm.create('TestObject', {doubleCol: i}); } }); var objects = realm.objects('TestObject'); TestCase.assertThrows(function() { realm.delete(objects[0]); }, 'can only delete in a write transaction'); realm.write(function() { TestCase.assertThrows(function() { realm.delete(); }); realm.delete(objects[0]); TestCase.assertEqual(objects.length, 9, 'wrong object count'); TestCase.assertEqual(objects[0].doubleCol, 9, "wrong property value"); TestCase.assertEqual(objects[1].doubleCol, 1, "wrong property value"); realm.delete([objects[0], objects[1]]); TestCase.assertEqual(objects.length, 7, 'wrong object count'); TestCase.assertEqual(objects[0].doubleCol, 7, "wrong property value"); TestCase.assertEqual(objects[1].doubleCol, 8, "wrong property value"); var threeObjects = realm.objects('TestObject').filtered("doubleCol < 5"); TestCase.assertEqual(threeObjects.length, 3, "wrong results count"); realm.delete(threeObjects); TestCase.assertEqual(objects.length, 4, 'wrong object count'); TestCase.assertEqual(threeObjects.length, 0, 'threeObject should have been deleted'); }); }, testDeleteAll: function() { var realm = new Realm({schema: [schemas.TestObject, schemas.IntPrimary]}); realm.write(function() { realm.create('TestObject', {doubleCol: 1}); realm.create('TestObject', {doubleCol: 2}); realm.create('IntPrimaryObject', {primaryCol: 2, valueCol: 'value'}); }); TestCase.assertEqual(realm.objects('TestObject').length, 2); TestCase.assertEqual(realm.objects('IntPrimaryObject').length, 1); TestCase.assertThrows(function() { realm.deleteAll(); }, 'can only deleteAll in a write transaction'); realm.write(function() { realm.deleteAll(); }); TestCase.assertEqual(realm.objects('TestObject').length, 0); TestCase.assertEqual(realm.objects('IntPrimaryObject').length, 0); }, testRealmObjects: function() { var realm = new Realm({schema: [schemas.PersonObject, schemas.DefaultValues, schemas.TestObject]}); realm.write(function() { realm.create('PersonObject', {name: 'Ari', age: 10}); realm.create('PersonObject', {name: 'Tim', age: 11}); realm.create('PersonObject', {name: 'Bjarne', age: 12}); realm.create('PersonObject', {name: 'Alex', age: 12, married: true}); }); // Should be able to pass constructor for getting objects. var objects = realm.objects(schemas.PersonObject); TestCase.assertTrue(objects[0] instanceof schemas.PersonObject); function InvalidPerson() {} InvalidPerson.schema = schemas.PersonObject.schema; TestCase.assertThrows(function() { realm.objects(); }); TestCase.assertThrows(function() { realm.objects([]); }); TestCase.assertThrows(function() { realm.objects('InvalidClass'); }); TestCase.assertThrows(function() { realm.objects('PersonObject', 'truepredicate'); }); TestCase.assertThrows(function() { realm.objects(InvalidPerson); }); }, testNotifications: function() { var realm = new Realm({schema: []}); var notificationCount = 0; var notificationName; realm.addListener('change', function(realm, name) { notificationCount++; notificationName = name; }); TestCase.assertEqual(notificationCount, 0); realm.write(function() {}); TestCase.assertEqual(notificationCount, 1); TestCase.assertEqual(notificationName, 'change'); var secondNotificationCount = 0; function secondNotification(realm, name) { secondNotificationCount++; } // The listener should only be added once. realm.addListener('change', secondNotification); realm.addListener('change', secondNotification); realm.write(function() {}); TestCase.assertEqual(notificationCount, 2); TestCase.assertEqual(secondNotificationCount, 1); realm.removeListener('change', secondNotification); realm.write(function() {}); TestCase.assertEqual(notificationCount, 3); TestCase.assertEqual(secondNotificationCount, 1); realm.removeAllListeners(); realm.write(function() {}); TestCase.assertEqual(notificationCount, 3); TestCase.assertEqual(secondNotificationCount, 1); TestCase.assertThrows(function() { realm.addListener('invalid', function() {}); }); realm.addListener('change', function() { throw new Error('error'); }); TestCase.assertThrows(function() { realm.write(function() {}); }); }, });
1
15,155
I think this should either return `-1` or throw. I think my vote is on the former.
realm-realm-js
js
@@ -54,6 +54,7 @@ DEFAULT_SETTINGS = { 'kinto.core.events.setup_transaction_hook', ), 'event_listeners': '', + 'heartbeat_timeout_seconds': 5, 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer', 'newrelic_config': None, 'newrelic_env': 'dev',
1
"""Main entry point """ import pkg_resources from cornice import Service as CorniceService from pyramid.settings import aslist from kinto.core import authentication from kinto.core import errors from kinto.core import events from kinto.core.initialization import ( # NOQA initialize, install_middlewares, load_default_settings) from kinto.core.utils import ( follow_subrequest, current_service, current_resource_name) from kinto.core.logs import logger # Module version, as defined in PEP-0396. __version__ = pkg_resources.get_distribution('kinto').version # FIXME? DEFAULT_SETTINGS = { 'backoff': None, 'batch_max_requests': 25, 'cache_backend': '', 'cache_url': '', 'cache_pool_size': 25, 'cache_prefix': '', 'cors_origins': '*', 'cors_max_age_seconds': 3600, 'eos': None, 'eos_message': None, 'eos_url': None, 'error_info_link': 'https://github.com/Kinto/kinto/issues/', 'http_host': None, 'http_scheme': None, 'id_generator': 'kinto.core.storage.generators.UUID4', 'includes': '', 'initialization_sequence': ( 'kinto.core.initialization.setup_request_bound_data', 'kinto.core.initialization.setup_json_serializer', 'kinto.core.initialization.setup_logging', 'kinto.core.initialization.setup_storage', 'kinto.core.initialization.setup_permission', 'kinto.core.initialization.setup_cache', 'kinto.core.initialization.setup_requests_scheme', 'kinto.core.initialization.setup_version_redirection', 'kinto.core.initialization.setup_deprecation', 'kinto.core.initialization.setup_authentication', 'kinto.core.initialization.setup_backoff', 'kinto.core.initialization.setup_statsd', 'kinto.core.initialization.setup_listeners', 'kinto.core.events.setup_transaction_hook', ), 'event_listeners': '', 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer', 'newrelic_config': None, 'newrelic_env': 'dev', 'paginate_by': None, 'permission_backend': '', 'permission_url': '', 'permission_pool_size': 25, 'profiler_dir': '/tmp', 'profiler_enabled': False, 'project_docs': '', 'project_name': '', 'project_version': '', 'readonly': False, 'retry_after_seconds': 30, 'statsd_prefix': 'kinto.core', 'statsd_url': None, 'storage_backend': '', 'storage_url': '', 'storage_max_fetch_size': 10000, 'storage_pool_size': 25, 'tm.annotate_user': False, # Do annotate transactions with the user-id. 'transaction_per_request': True, 'userid_hmac_secret': '', 'version_prefix_redirect_enabled': True, 'trailing_slash_redirect_enabled': True, 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder', 'multiauth.policies': 'basicauth', 'multiauth.policy.basicauth.use': ('kinto.core.authentication.' 'BasicAuthAuthenticationPolicy'), 'multiauth.authorization_policy': ('kinto.core.authorization.' 'AuthorizationPolicy') } class Service(CorniceService): """Subclass of the default cornice service. This is useful in order to attach specific behaviours without monkey patching the default cornice service (which would impact other uses of it) """ default_cors_headers = ('Backoff', 'Retry-After', 'Alert', 'Content-Length') def error_handler(self, error): return errors.json_error_handler(error) @classmethod def init_from_settings(cls, settings): cls.cors_origins = tuple(aslist(settings['cors_origins'])) cors_max_age = settings['cors_max_age_seconds'] cls.cors_max_age = int(cors_max_age) if cors_max_age else None def includeme(config): settings = config.get_settings() # Heartbeat registry. config.registry.heartbeats = {} # Public settings registry. config.registry.public_settings = {'batch_max_requests', 'readonly'} # Directive to declare arbitrary API capabilities. def add_api_capability(config, identifier, description="", url="", **kw): existing = config.registry.api_capabilities.get(identifier) if existing: error_msg = "The '%s' API capability was already registered (%s)." raise ValueError(error_msg % (identifier, existing)) capability = dict(description=description, url=url, **kw) config.registry.api_capabilities[identifier] = capability config.add_directive('add_api_capability', add_api_capability) config.registry.api_capabilities = {} # Resource events helpers. config.add_request_method(events.get_resource_events, name='get_resource_events') config.add_request_method(events.notify_resource_event, name='notify_resource_event') # Setup cornice. config.include("cornice") # Per-request transaction. config.include("pyramid_tm") # Add CORS settings to the base kinto.core Service class. Service.init_from_settings(settings) # Setup components. for step in aslist(settings['initialization_sequence']): step_func = config.maybe_dotted(step) step_func(config) # Custom helpers. config.add_request_method(follow_subrequest) config.add_request_method(authentication.prefixed_userid, property=True) config.add_request_method(lambda r: {'id': r.prefixed_userid}, name='get_user_info') config.add_request_method(current_resource_name, reify=True) config.add_request_method(current_service, reify=True) config.commit() # Include plugins after init, unlike pyramid includes. includes = aslist(settings['includes']) for app in includes: config.include(app) # # Show settings to output. # for key, value in settings.items(): # logger.info('Using %s = %s' % (key, value)) # Scan views. config.scan("kinto.core.views") # Give sign of life. msg = "%(project_name)s %(project_version)s starting." logger.info(msg % settings)
1
9,294
I am confused, I've read 2, 3 and 5 seconds in various places
Kinto-kinto
py
@@ -91,7 +91,14 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) fcgiBackend.SetSendTimeout(rule.SendTimeout) var resp *http.Response - contentLength, _ := strconv.Atoi(r.Header.Get("Content-Length")) + + var contentLength int64 + // if ContentLength is already set + if r.ContentLength > 0 { + contentLength = r.ContentLength + } else { + contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) + } switch r.Method { case "HEAD": resp, err = fcgiBackend.Head(env)
1
// Package fastcgi has middleware that acts as a FastCGI client. Requests // that get forwarded to FastCGI stop the middleware execution chain. // The most common use for this package is to serve PHP websites via php-fpm. package fastcgi import ( "errors" "io" "net" "net/http" "os" "path" "path/filepath" "strconv" "strings" "time" "github.com/mholt/caddy" "github.com/mholt/caddy/caddyhttp/httpserver" ) // Handler is a middleware type that can handle requests as a FastCGI client. type Handler struct { Next httpserver.Handler Rules []Rule Root string FileSys http.FileSystem // These are sent to CGI scripts in env variables SoftwareName string SoftwareVersion string ServerName string ServerPort string } // When a rewrite is performed, a header field of this name // is added to the request // It contains the original request URI before the rewrite. const internalRewriteFieldName = "Caddy-Rewrite-Original-URI" // ServeHTTP satisfies the httpserver.Handler interface. func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { for _, rule := range h.Rules { // First requirement: Base path must match and the path must be allowed. if !httpserver.Path(r.URL.Path).Matches(rule.Path) || !rule.AllowedPath(r.URL.Path) { continue } // In addition to matching the path, a request must meet some // other criteria before being proxied as FastCGI. For example, // we probably want to exclude static assets (CSS, JS, images...) // but we also want to be flexible for the script we proxy to. fpath := r.URL.Path if idx, ok := httpserver.IndexFile(h.FileSys, fpath, rule.IndexFiles); ok { fpath = idx // Index file present. // If request path cannot be split, return error. if !rule.canSplit(fpath) { return http.StatusInternalServerError, ErrIndexMissingSplit } } else { // No index file present. // If request path cannot be split, ignore request. if !rule.canSplit(fpath) { continue } } // These criteria work well in this order for PHP sites if !h.exists(fpath) || fpath[len(fpath)-1] == '/' || strings.HasSuffix(fpath, rule.Ext) { // Create environment for CGI script env, err := h.buildEnv(r, rule, fpath) if err != nil { return http.StatusInternalServerError, err } // Connect to FastCGI gateway fcgiBackend, err := rule.dialer.Dial() if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { return http.StatusGatewayTimeout, err } return http.StatusBadGateway, err } defer fcgiBackend.Close() fcgiBackend.SetReadTimeout(rule.ReadTimeout) fcgiBackend.SetSendTimeout(rule.SendTimeout) var resp *http.Response contentLength, _ := strconv.Atoi(r.Header.Get("Content-Length")) switch r.Method { case "HEAD": resp, err = fcgiBackend.Head(env) case "GET": resp, err = fcgiBackend.Get(env) case "OPTIONS": resp, err = fcgiBackend.Options(env) default: resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength) } if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { return http.StatusGatewayTimeout, err } else if err != io.EOF { return http.StatusBadGateway, err } } // Write response header writeHeader(w, resp) // Write the response body _, err = io.Copy(w, resp.Body) if err != nil { return http.StatusBadGateway, err } // Log any stderr output from upstream if stderr := fcgiBackend.StdErr(); stderr.Len() != 0 { // Remove trailing newline, error logger already does this. err = LogError(strings.TrimSuffix(stderr.String(), "\n")) } // Normally we would return the status code if it is an error status (>= 400), // however, upstream FastCGI apps don't know about our contract and have // probably already written an error page. So we just return 0, indicating // that the response body is already written. However, we do return any // error value so it can be logged. // Note that the proxy middleware works the same way, returning status=0. return 0, err } } return h.Next.ServeHTTP(w, r) } // parseAddress returns the network and address of fcgiAddress. // The first string is the network, "tcp" or "unix", implied from the scheme and address. // The second string is fcgiAddress, with scheme prefixes removed. // The two returned strings can be used as parameters to the Dial() function. func parseAddress(fcgiAddress string) (string, string) { // check if address has tcp scheme explicitly set if strings.HasPrefix(fcgiAddress, "tcp://") { return "tcp", fcgiAddress[len("tcp://"):] } // check if address has fastcgi scheme explicitly set if strings.HasPrefix(fcgiAddress, "fastcgi://") { return "tcp", fcgiAddress[len("fastcgi://"):] } // check if unix socket if trim := strings.HasPrefix(fcgiAddress, "unix"); strings.HasPrefix(fcgiAddress, "/") || trim { if trim { return "unix", fcgiAddress[len("unix:"):] } return "unix", fcgiAddress } // default case, a plain tcp address with no scheme return "tcp", fcgiAddress } func writeHeader(w http.ResponseWriter, r *http.Response) { for key, vals := range r.Header { for _, val := range vals { w.Header().Add(key, val) } } w.WriteHeader(r.StatusCode) } func (h Handler) exists(path string) bool { if _, err := os.Stat(h.Root + path); err == nil { return true } return false } // buildEnv returns a set of CGI environment variables for the request. func (h Handler) buildEnv(r *http.Request, rule Rule, fpath string) (map[string]string, error) { var env map[string]string // Get absolute path of requested resource absPath := filepath.Join(rule.Root, fpath) // Separate remote IP and port; more lenient than net.SplitHostPort var ip, port string if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 { ip = r.RemoteAddr[:idx] port = r.RemoteAddr[idx+1:] } else { ip = r.RemoteAddr } // Remove [] from IPv6 addresses ip = strings.Replace(ip, "[", "", 1) ip = strings.Replace(ip, "]", "", 1) // Split path in preparation for env variables. // Previous rule.canSplit checks ensure this can never be -1. splitPos := rule.splitPos(fpath) // Request has the extension; path was split successfully docURI := fpath[:splitPos+len(rule.SplitPath)] pathInfo := fpath[splitPos+len(rule.SplitPath):] scriptName := fpath scriptFilename := absPath // Strip PATH_INFO from SCRIPT_NAME scriptName = strings.TrimSuffix(scriptName, pathInfo) // Get the request URI. The request URI might be as it came in over the wire, // or it might have been rewritten internally by the rewrite middleware (see issue #256). // If it was rewritten, there will be a header indicating the original URL, // which is needed to get the correct RequestURI value for PHP apps. reqURI := r.URL.RequestURI() if origURI := r.Header.Get(internalRewriteFieldName); origURI != "" { reqURI = origURI } // Retrieve name of remote user that was set by some downstream middleware, // possibly basicauth. remoteUser, _ := r.Context().Value(caddy.CtxKey("remote_user")).(string) // Blank if not set // Some variables are unused but cleared explicitly to prevent // the parent environment from interfering. env = map[string]string{ // Variables defined in CGI 1.1 spec "AUTH_TYPE": "", // Not used "CONTENT_LENGTH": r.Header.Get("Content-Length"), "CONTENT_TYPE": r.Header.Get("Content-Type"), "GATEWAY_INTERFACE": "CGI/1.1", "PATH_INFO": pathInfo, "QUERY_STRING": r.URL.RawQuery, "REMOTE_ADDR": ip, "REMOTE_HOST": ip, // For speed, remote host lookups disabled "REMOTE_PORT": port, "REMOTE_IDENT": "", // Not used "REMOTE_USER": remoteUser, "REQUEST_METHOD": r.Method, "SERVER_NAME": h.ServerName, "SERVER_PORT": h.ServerPort, "SERVER_PROTOCOL": r.Proto, "SERVER_SOFTWARE": h.SoftwareName + "/" + h.SoftwareVersion, // Other variables "DOCUMENT_ROOT": rule.Root, "DOCUMENT_URI": docURI, "HTTP_HOST": r.Host, // added here, since not always part of headers "REQUEST_URI": reqURI, "SCRIPT_FILENAME": scriptFilename, "SCRIPT_NAME": scriptName, } // compliance with the CGI specification that PATH_TRANSLATED // should only exist if PATH_INFO is defined. // Info: https://www.ietf.org/rfc/rfc3875 Page 14 if env["PATH_INFO"] != "" { env["PATH_TRANSLATED"] = filepath.Join(rule.Root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html } // Some web apps rely on knowing HTTPS or not if r.TLS != nil { env["HTTPS"] = "on" } replacer := httpserver.NewReplacer(r, nil, "") // Add env variables from config for _, envVar := range rule.EnvVars { // replace request placeholders in environment variables env[envVar[0]] = replacer.Replace(envVar[1]) } // Add all HTTP headers (except Caddy-Rewrite-Original-URI ) to env variables for field, val := range r.Header { if strings.ToLower(field) == strings.ToLower(internalRewriteFieldName) { continue } header := strings.ToUpper(field) header = headerNameReplacer.Replace(header) env["HTTP_"+header] = strings.Join(val, ", ") } return env, nil } // Rule represents a FastCGI handling rule. // It is parsed from the fastcgi directive in the Caddyfile, see setup.go. type Rule struct { // The base path to match. Required. Path string // The address of the FastCGI server. Required. Address string // Always process files with this extension with fastcgi. Ext string // Use this directory as the fastcgi root directory. Defaults to the root // directory of the parent virtual host. Root string // The path in the URL will be split into two, with the first piece ending // with the value of SplitPath. The first piece will be assumed as the // actual resource (CGI script) name, and the second piece will be set to // PATH_INFO for the CGI script to use. SplitPath string // If the URL ends with '/' (which indicates a directory), these index // files will be tried instead. IndexFiles []string // Environment Variables EnvVars [][2]string // Ignored paths IgnoredSubPaths []string // The duration used to set a deadline when reading from the FastCGI server. ReadTimeout time.Duration // The duration used to set a deadline when sending to the FastCGI server. SendTimeout time.Duration // FCGI dialer dialer dialer } // canSplit checks if path can split into two based on rule.SplitPath. func (r Rule) canSplit(path string) bool { return r.splitPos(path) >= 0 } // splitPos returns the index where path should be split // based on rule.SplitPath. func (r Rule) splitPos(path string) int { if httpserver.CaseSensitivePath { return strings.Index(path, r.SplitPath) } return strings.Index(strings.ToLower(path), strings.ToLower(r.SplitPath)) } // AllowedPath checks if requestPath is not an ignored path. func (r Rule) AllowedPath(requestPath string) bool { for _, ignoredSubPath := range r.IgnoredSubPaths { if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(r.Path, ignoredSubPath)) { return false } } return true } var ( headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_") // ErrIndexMissingSplit describes an index configuration error. ErrIndexMissingSplit = errors.New("configured index file(s) must include split value") ) // LogError is a non fatal error that allows requests to go through. type LogError string // Error satisfies error interface. func (l LogError) Error() string { return string(l) }
1
10,286
Any particular reason you chose int64 instead of int? This requires adding type conversions throughout the code below.
caddyserver-caddy
go
@@ -280,14 +280,16 @@ class CommandTest(QuiltTestCase): mock_input.return_value = old_refresh_token - with pytest.raises(command.CommandException, match='Invalid team name'): + with pytest.raises(command.CommandException, + match='The team you specified is not a valid team.'): command.login('fo!o') mock_open.assert_not_called() mock_login_with_token.assert_not_called() def test_login_with_token_invalid_team(self): - with pytest.raises(command.CommandException, match='Invalid team name'): + with pytest.raises(command.CommandException, + match='The team you specified is not a valid team.'): command.login_with_token('123', 'fo!o') @patch('quilt.tools.command._save_auth')
1
""" Tests for commands. Covered cases: CRUD related: 1. users/list - OK - no auth - not found - server error 2. users/create - OK - no auth - not found - server error - already created (duplicate) - bogus email - empty name - empty email - non existing team 3. users/disable - OK - no auth - not found - server error - already disabled - empty name - deleted user - unknown user - non existing team 4. users/delete - OK - no auth - not found - server error - already deleted - empty name - unknown user - non existing team 5. audit user or package - OK - no auth - no team - not admin 6. users/list_detailed - OK - no auth - no admin - not found - server error 7. access list - OK - no auth 8. access remove - OK - no auth - not owner - revoke owner - free plan 9. access add - OK - no auth - not owner """ # Disable no-self-use, protected-access, too-many-public-methods # pylint: disable=R0201, W0212, R0904 import hashlib import json import os import shutil import time import requests import responses import pytest from io import StringIO import pandas as pd from six import assertRaisesRegex from .utils import QuiltTestCase, patch from ..tools import command, store class CommandTest(QuiltTestCase): def _mock_error(self, endpoint, status, team=None, message="", method=responses.POST): self.requests_mock.add( method, '%s/api/%s' % (command.get_registry_url(team), endpoint), body=json.dumps(dict(message=message)), status=status ) @patch('quilt.tools.command._save_config') @patch('quilt.tools.command._load_config') @patch('quilt.tools.command.input') @patch.dict('os.environ') def test_config_urls_default(self, mock_input, mock_load_config, mock_save_config): os.environ.pop('QUILT_PKG_URL', None) # Remove it cause it takes precedence over config. # test setting default URL with blank string -- result should be default mock_load_config.return_value = {} mock_input.return_value = '' command.config() assert mock_input.called args, kwargs = mock_save_config.call_args mock_load_config.return_value = args[0] if args else kwargs['cfg'] assert command.get_registry_url(None) == command.DEFAULT_REGISTRY_URL @patch('quilt.tools.command._save_config') @patch('quilt.tools.command._load_config') @patch('quilt.tools.command.input') @patch.dict('os.environ') def test_config_good_urls(self, mock_input, mock_load_config, mock_save_config): os.environ.pop('QUILT_PKG_URL', None) # Remove it cause it takes precedence over config. test_urls = [ 'https://foo.com', 'http://foo.com', 'https://foo.bar.net', ] # test general URL setting -- result should match input for test_url in test_urls: mock_load_config.return_value = {} mock_input.return_value = test_url command.config() assert mock_input.called mock_input.reset_mock() args, kwargs = mock_save_config.call_args mock_load_config.return_value = args[0] if args else kwargs['cfg'] assert test_url == command.get_registry_url(None) @patch('quilt.tools.command._save_config') @patch('quilt.tools.command._load_config') @patch('quilt.tools.command.input') def test_config_bad_urls(self, mock_input, mock_load_config, mock_save_config): test_urls = [ 'foo.com', 'ftp://foo.com', 'blah://bar.com', 'http://foo.bar.com/baz', ] # test general URL setting -- result should match initial state mock_load_config.return_value = {} initial_url = command.get_registry_url(None) for test_url in test_urls: mock_input.return_value = test_url with assertRaisesRegex(self, command.CommandException, 'Invalid URL'): command.config() assert mock_input.called mock_input.reset_mock() mock_save_config.assert_not_called() assert command.get_registry_url(None) == initial_url def test_version_add_badversion(self): with assertRaisesRegex(self, command.CommandException, 'Invalid version format'): command.version_add('user/test', '2.9.12.2error', 'fabc123', force=True) @patch('quilt.tools.command._match_hash') @patch('quilt.tools.command.input') def test_version_add_confirmed(self, mock_input, mock_match_hash): registry_url = command.get_registry_url(None) mock_input.return_value = 'y' mock_match_hash.return_value = 'fabc123' # Response content is not checked by version_add, so # status ok and URL verification are enough self.requests_mock.add( responses.PUT, registry_url + "/api/version/user/test/2.9.12", status=200, ) command.version_add('user/test', '2.9.12', 'fabc123') @patch('quilt.tools.command.input') def test_version_add_declined(self, mock_input): mock_input.return_value = 'n' command.version_add('user/test', '2.9.12', 'fabc123') # should produce no mock network activity def test_ambiguous_hash(self): registry_url = command.get_registry_url(None) ambiguous_token = "795a7b" # There should be at least two results that start with the ambiguous_token, plus some non-ambiguous # results in fake_data to test against. fake_data = {'logs': [ {'author': 'user', 'created': 1490816524.0, 'hash': '885696c6e40613b3c601e95037caf4e43bda58c39f67ab5d5e56beefb3662ff4'}, {'author': 'user', 'created': 1490816507.0, 'hash': '795a7bc9e40613b3c601e95037caf4e43bda58c39f67ab5d5e56beefb3662ff4'}, {'author': 'user', 'created': 1490816473.0, 'hash': '795a7bc6e40613b3c601e95037caf4e43bda58c39f67ab5d5e56beefb3662ff4'}, {'author': 'user', 'created': 1490816524.0, 'hash': '2501a6c6e40a7b355901fc5037caf4e43bda58c39f67ab5d5e56beefb3662ff4'}, ]} self.requests_mock.add( responses.GET, registry_url + "/api/log/user/test/", json=fake_data ) # Ambiguous hashes in _match_hash's exception will be sorted -- sorted here to match. fake_data_ambiguous = sorted(entry['hash'] for entry in fake_data['logs'] if entry['hash'].startswith(ambiguous_token)) # this will match each ambiguous hash, in order, separated by anything. # ..it allows for formatting changes in the error, but requires the same order. fake_data_regexp = r'(.|\n)+'.join(fake_data_ambiguous) with assertRaisesRegex(self, command.CommandException, fake_data_regexp): command._match_hash('user/test', hash='795a7b') def test_push_invalid_package(self): with assertRaisesRegex(self, command.CommandException, "owner/package_name"): command.push(package="no_user") with assertRaisesRegex(self, command.CommandException, "owner/package_name"): command.push(package="a/b/c") def test_install_invalid_package(self): with assertRaisesRegex(self, command.CommandException, "owner/package_name"): command.install(package="no_user") def test_inspect_invalid_package(self): with assertRaisesRegex(self, command.CommandException, "owner/package_name"): command.inspect(package="no_user") with assertRaisesRegex(self, command.CommandException, "owner/package_name"): command.inspect(package="a/b/c") def test_push_missing_package(self): with assertRaisesRegex(self, command.CommandException, "not found"): command.push(package="owner/package") def test_inspect_missing_package(self): with assertRaisesRegex(self, command.CommandException, "not found"): command.inspect(package="owner/package") @patch('quilt.tools.command._open_url') @patch('quilt.tools.command.input') @patch('quilt.tools.command.login_with_token') def test_login(self, mock_login_with_token, mock_input, mock_open): old_refresh_token = "123" mock_input.return_value = old_refresh_token command.login(None) mock_open.assert_called_with('%s/login' % command.get_registry_url(None)) mock_login_with_token.assert_called_with(old_refresh_token, None) @patch('quilt.tools.command._open_url') @patch('quilt.tools.command.input') @patch('quilt.tools.command.login_with_token') def test_login_with_team(self, mock_login_with_token, mock_input, mock_open): old_refresh_token = "123" mock_input.return_value = old_refresh_token command.login('foo') mock_open.assert_called_with('%s/login' % command.get_registry_url('foo')) mock_login_with_token.assert_called_with(old_refresh_token, 'foo') @patch('quilt.tools.command._open_url') @patch('quilt.tools.command.input') @patch('quilt.tools.command.login_with_token') def test_login_invalid_team(self, mock_login_with_token, mock_input, mock_open): old_refresh_token = "123" mock_input.return_value = old_refresh_token with pytest.raises(command.CommandException, match='Invalid team name'): command.login('fo!o') mock_open.assert_not_called() mock_login_with_token.assert_not_called() def test_login_with_token_invalid_team(self): with pytest.raises(command.CommandException, match='Invalid team name'): command.login_with_token('123', 'fo!o') @patch('quilt.tools.command._save_auth') def test_login_token(self, mock_save): old_refresh_token = "123" refresh_token = "456" access_token = "abc" expires_at = 1000.0 self.requests_mock.add( responses.POST, '%s/api/token' % command.get_registry_url(None), json=dict( status=200, refresh_token=refresh_token, access_token=access_token, expires_at=expires_at ) ) command.login_with_token(old_refresh_token, None) assert self.requests_mock.calls[0].request.body == "refresh_token=%s" % old_refresh_token mock_save.assert_called_with({ command.get_registry_url(None): dict( team=None, refresh_token=refresh_token, access_token=access_token, expires_at=expires_at ) }) @patch('quilt.tools.command._save_auth') def test_login_token_server_error(self, mock_save): self.requests_mock.add( responses.POST, '%s/api/token' % command.get_registry_url(None), status=500 ) with self.assertRaises(command.CommandException): command.login_with_token("123", None) mock_save.assert_not_called() @patch('quilt.tools.command._save_auth') def test_login_token_auth_fail(self, mock_save): self.requests_mock.add( responses.POST, '%s/api/token' % command.get_registry_url(None), json=dict( status=200, error="Bad token!" ) ) with self.assertRaises(command.CommandException): command.login_with_token("123", None) mock_save.assert_not_called() @patch('quilt.tools.command._save_auth') @patch('quilt.tools.command._load_auth') @patch('quilt.tools.command._open_url') @patch('quilt.tools.command.input', lambda x: '') @patch('quilt.tools.command.login_with_token', lambda x, y: None) def test_login_not_allowed(self, mock_open, mock_load, mock_save): # Already logged is as a public user. mock_load.return_value = { command.get_registry_url(None): dict( team=None ) } # Normal login is ok. command.login(None) mock_open.reset_mock() mock_save.reset_mock() # Team login is not allowed. with self.assertRaises(command.CommandException): command.login('foo') mock_open.assert_not_called() mock_save.assert_not_called() # Already logged is as a team user. mock_load.return_value = { command.get_registry_url('foo'): dict( team='foo' ) } # Normal login is not allowed. with self.assertRaises(command.CommandException): command.login(None) # Login as 'foo' is ok. command.login('foo') mock_open.reset_mock() mock_save.reset_mock() # Login as a different team is not allowed. with self.assertRaises(command.CommandException): command.login('bar') mock_open.assert_not_called() mock_save.assert_not_called() def test_ls(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.ls() def test_search(self): self.requests_mock.add( responses.GET, '%s/api/search/?q=asdf' % command.get_registry_url(None), status=200, json={ "packages": [], "status": 200 } ) command.search("asdf") @patch('quilt.tools.command._find_logged_in_team', lambda: "teamname") def test_search_team(self): self.requests_mock.add( responses.GET, '%s/api/search/?q=asdf' % command.get_registry_url("teamname"), status=200, json={ "packages": [], "status": 200 } ) self.requests_mock.add( responses.GET, '%s/api/search/?q=asdf' % command.get_registry_url(None), status=200, json={ "packages": [], "status": 200 } ) command.search("asdf") def test_inspect_valid_package(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.inspect('foo/bar') def test_user_list(self): self.requests_mock.add( responses.GET, '%s/api/users/list' % command.get_registry_url(None), status=200, json={ 'count':'1', 'results':[{ 'username':'admin', 'email':'[email protected]', 'first_name':'', 'last_name':'', 'is_superuser':True, 'is_admin':True, 'is_staff':True }] } ) command.list_users() def test_user_list_no_auth(self): self._mock_error('users/list', status=401, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users() def test_user_list_not_found(self): self._mock_error('users/list', status=404, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users() def test_user_list_server_error(self): self._mock_error('users/list', status=500, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users() def test_user_list_detailed(self): self.requests_mock.add( responses.GET, '%s/api/users/list_detailed' % command.get_registry_url(None), status=200, json=json.dumps({ 'users': { 'admin': { 'packages': '1', 'installs': {'admin': '1'}, 'previews': {'admin': '1'}, 'pushes': {'admin': '1'}, 'deletes': {'admin': '1'}, 'status': 'active', 'last_seen': '' } } })) command.list_users_detailed() def test_user_detailed_list_no_auth(self): self._mock_error('users/list_detailed', status=401, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users_detailed() def test_user_detailed_list_no_admin(self): self._mock_error('users/list_detailed', status=403, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users_detailed() def test_user_detailed_list_not_found(self): self._mock_error('users/list_detailed', status=404, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users_detailed() def test_user_detailed_list_server_error(self): self._mock_error('users/list_detailed', status=500, method=responses.GET) with self.assertRaises(command.CommandException): command.list_users_detailed() def test_user_create(self): self.requests_mock.add( responses.POST, '%s/api/users/create' % command.get_registry_url(None), status=201, json={ 'count':'1', 'username':'admin', 'first_name':'', 'last_name':'', 'is_superuser':True, 'is_admin':True, 'is_staff':True, } ) command.create_user('bob', '[email protected]', None) def test_user_create_no_auth(self): self._mock_error('users/create', status=401) with self.assertRaises(command.CommandException): command.create_user('bob', '[email protected]', None) def test_user_disable(self): self.requests_mock.add( responses.POST, '%s/api/users/disable' % command.get_registry_url(None), status=201 ) command.disable_user('bob', None) def test_user_enable(self): self.requests_mock.add( responses.POST, '%s/api/users/enable' % command.get_registry_url(None), status=201 ) command.enable_user('bob', None) def test_create_not_found(self): self._mock_error('users/create', team='qux', status=404) with self.assertRaises(command.CommandException): command.create_user('bob', '[email protected]', team='qux') def test_create_server_error(self): self._mock_error('users/create', team='qux', status=500) with self.assertRaises(command.CommandException): command.create_user('bob', '[email protected]', team='qux') def test_create_duplicate(self): self._mock_error('users/create', status=400, team='qux', message="Bad request. Maybe there's already") with assertRaisesRegex(self, command.CommandException, "Bad request. Maybe there's already"): command.create_user('bob', '[email protected]', team='qux') def test_user_create_bogus(self): self._mock_error('users/create', status=400, team='qux', message="Please enter a valid email address.") with assertRaisesRegex(self, command.CommandException, "Please enter a valid email address."): command.create_user('bob', 'wrongemail', 'qux') def test_user_create_empty_email_team(self): self._mock_error('users/create', status=400, team='qux', message="Please enter a valid email address.") with assertRaisesRegex(self, command.CommandException, "Please enter a valid email address."): command.create_user('bob', '', team='qux') def test_user_create_empty(self): self._mock_error('users/create', status=400, team='qux', message="Bad request. Maybe there's already") with assertRaisesRegex(self, command.CommandException, "Bad request. Maybe there's already"): command.create_user('', '[email protected]', team='qux') def test_user_create_bogus_team(self): self._mock_error('users/create', status=400, team='qux', message="Please enter a valid email address.") with assertRaisesRegex(self, command.CommandException, "Please enter a valid email address."): command.create_user('bob', 'wrongemail', team='qux') def test_user_create_empty_team(self): self._mock_error('users/create', status=400, team='qux', message="Bad request. Maybe there's already") with assertRaisesRegex(self, command.CommandException, "Bad request. Maybe there's already"): command.create_user('', '[email protected]', team='qux') def test_user_create_nonexisting_team(self): self._mock_error('users/create', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.create_user('bob', '[email protected]', team='nonexisting') def test_user_disable_not_found(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', 'qux') def test_user_disable_server_error(self): self._mock_error('users/disable', team='qux', status=500) with self.assertRaises(command.CommandException): command.disable_user('bob', 'qux') def test_user_disable_already(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', team='qux') def test_user_disable_deleted(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('deleted', team='qux') def test_user_disable_non_existing_team(self): self._mock_error('users/disable', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.disable_user('bob', team='nonexisting') def test_user_disable_non_existing(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('nonexisting', team='qux') def test_user_disable_empty(self): self._mock_error('users/disable', status=400, team='qux', message="Username is not valid") with assertRaisesRegex(self, command.CommandException, "Username is not valid"): command.disable_user('', team='qux') def test_user_disable_no_auth(self): self._mock_error('users/disable', status=401, team='qux') with self.assertRaises(command.CommandException): command.disable_user('bob', team='qux') def test_user_disable_unknown(self): self._mock_error('users/disable', status=404, team='qux') with self.assertRaises(command.CommandException): command.disable_user('unknown', team='qux') def test_user_delete(self): self._mock_error('users/delete', status=201, team='qux') command.delete_user('bob', force=True, team='qux') def test_user_delete_not_found(self): self._mock_error('users/delete', team='qux', status=404) with self.assertRaises(command.CommandException): command.delete_user('bob', team='qux', force=True) def test_user_delete_server_error(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('bob', 'qux', force=True) def test_user_delete_empty(self): self._mock_error('users/delete', status=400, team='qux', message="Username is not valid") with assertRaisesRegex(self, command.CommandException, "Username is not valid"): command.delete_user('', force=True, team='qux') def test_user_delete_no_auth(self): self._mock_error('users/delete', status=401, team='qux') with self.assertRaises(command.CommandException): command.delete_user('bob', force=True, team='qux') def test_user_delete_unknown(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('unknown', force=True, team='qux') def test_user_delete_already(self): self._mock_error('users/delete', status=404, team='qux') with self.assertRaises(command.CommandException): command.delete_user('deleted', team='qux', force=True) def test_user_delete_nonexisting_team(self): self._mock_error('users/delete', status=404, team='nonexisting') with self.assertRaises(command.CommandException): command.delete_user('bob', force=True, team='nonexisting') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_user(self): self.requests_mock.add( responses.GET, '%s/api/audit/bob/' % command.get_registry_url("someteam"), status=201, json={ 'events': [{ 'created': '', 'user': 'bob', 'type': 'user', 'package_owner': '', 'package_name': '', 'package_hash': '', 'extra': '' }] }) command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_package(self): self.requests_mock.add( responses.GET, '%s/api/audit/foo/bar/' % command.get_registry_url("someteam"), status=201, json={ 'events': [{ 'created': '', 'user': 'bob', 'type': 'package', 'package_owner': '', 'package_name': '', 'package_hash': '', 'extra': '' }] }) command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_no_auth_user(self): self._mock_error('audit/bob/', status=401, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_no_auth_package(self): self._mock_error('audit/foo/bar/', status=401, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_audit_no_team(self): with assertRaisesRegex(self, command.CommandException, "Not logged in as a team user"): command.audit('bob') command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_not_admin_user(self): self._mock_error('audit/bob/', status=403, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('bob') @patch('quilt.tools.command._find_logged_in_team', lambda: "someteam") def test_audit_not_admin_package(self): self._mock_error('audit/foo/bar/', status=403, team='someteam', method=responses.GET) with self.assertRaises(command.CommandException): command.audit('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_list(self, mock_stdout): self.requests_mock.add( responses.GET, '%s/api/access/foo/bar' % command.get_registry_url(None), status=201, json={ 'users': ['foo', 'bob'] } ) command.access_list('foo/bar') assert mock_stdout.getvalue() == 'foo\nbob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_list_no_auth(self): self._mock_error('access/foo/bar', status=401, method=responses.GET) with self.assertRaises(command.CommandException): command.access_list('foo/bar') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_remove(self, mock_stdout): self.requests_mock.add( responses.DELETE, '%s/api/access/foo/bar/bob' % command.get_registry_url(None), status=201 ) command.access_remove('foo/bar', 'bob') assert mock_stdout.getvalue() == u'Access removed for bob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_no_auth(self): self._mock_error('access/foo/bar/bob', status=401, method=responses.DELETE) with self.assertRaises(command.CommandException): command.access_remove('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_not_owner(self): self._mock_error('access/foo/bar/bob', status=403, method=responses.DELETE, message="Only the package owner can revoke access") with assertRaisesRegex(self, command.CommandException, "Only the package owner can revoke access"): command.access_remove('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_owner(self): self._mock_error('access/foo/bar/foo', status=403, method=responses.DELETE, message="Cannot revoke the owner's access") with assertRaisesRegex(self, command.CommandException, "Cannot revoke the owner's access"): command.access_remove('foo/bar', 'foo') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_remove_free_plan(self): self._mock_error('access/foo/bar/foo', status=402, method=responses.DELETE, message="Insufficient permissions.") with assertRaisesRegex(self, command.CommandException, "Insufficient permissions."): command.access_remove('foo/bar', 'foo') @patch('quilt.tools.command._find_logged_in_team', lambda: None) @patch('sys.stdout', new_callable=StringIO) def test_access_add(self, mock_stdout): self.requests_mock.add( responses.PUT, '%s/api/access/foo/bar/bob' % command.get_registry_url(None), status=201 ) command.access_add('foo/bar', 'bob') assert mock_stdout.getvalue() == u'Access added for bob\n' @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_add_no_auth(self): self._mock_error('access/foo/bar/bob', status=401, method=responses.PUT) with self.assertRaises(command.CommandException): command.access_add('foo/bar', 'bob') @patch('quilt.tools.command._find_logged_in_team', lambda: None) def test_access_add_not_owner(self): self._mock_error('access/foo/bar/bob', status=403, method=responses.PUT, message="Only the package owner can revoke access") with assertRaisesRegex(self, command.CommandException, "Only the package owner can revoke access"): command.access_add('foo/bar', 'bob') # TODO: work in progress # def test_find_node_by_name(self): # mydir = os.path.dirname(__file__) # build_path = os.path.join(mydir, './build.yml') # command.build('foo/bar', build_path) # # owner, pkg = store.parse_package('foo/bar') # pkgobj = store.PackageStore.find_package(owner, pkg) # assert pkgobj is not None # assert pkgobj.find_node_by_name('') is None # assert pkgobj.find_node_by_name('bar') is None # assert pkgobj.find_node_by_name('foo') is None # assert pkgobj.find_node_by_name('README.md') is None # assert pkgobj.find_node_by_name('data/README') is None # assert pkgobj.find_node_by_name('data/README.md') is None # assert pkgobj.find_node_by_name('README') is not None # tsvnode = pkgobj.find_node_by_name('dataframes/tsv') # assert tsvnode is not None # tsvdf = pkgobj.get_obj(tsvnode) # assert tsvdf is not None # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/tsv', tsvdf) # assert diff is None # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/csv', tsvdf) # assert diff is None # import random # tsvdf['UID1'] = tsvdf['UID1'].apply( # lambda v: v if random.random()>0.01 else ('val'+str(random.random()))) # diff = command.diff_vs_dataframe('foo/bar', 'dataframes/tsv', tsvdf) # assert diff is None def test_log(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') owner = 'foo' package = 'bar' command.build('%s/%s' % (owner, package), build_path) pkg_obj = store.PackageStore.find_package(None, owner, package) self._mock_logs_list(owner, package, pkg_obj.get_hash()) command.log("{owner}/{pkg}".format(owner=owner, pkg=package)) def _mock_logs_list(self, owner, package, pkg_hash): logs_url = "%s/api/log/%s/%s/" % (command.get_registry_url(None), owner, package) resp = dict(logs=[dict( hash=pkg_hash, created=time.time(), author=owner)]) print("MOCKING URL=%s" % logs_url) self.requests_mock.add(responses.GET, logs_url, json.dumps(resp)) def test_generate_buildfile_wo_building(self): mydir = os.path.dirname(__file__) path = os.path.join(mydir, 'data') buildfilename = 'build_test_generate_buildfile_wo_building.yml' buildfilepath = os.path.join(path, buildfilename) assert not os.path.exists(buildfilepath), "%s already exists" % buildfilepath try: command.generate(path, outfilename=buildfilename) assert os.path.exists(buildfilepath), "failed to create %s" % buildfilepath finally: os.remove(buildfilepath) @patch('quilt.tools.command.input') def test_delete_not_confirmed(self, mock_input): mock_input.return_value = 'blah' command.delete('user/test') @patch('quilt.tools.command.input') def test_delete_confirmed(self, mock_input): owner = 'foo' package = 'bar' mock_input.return_value = '%s/%s' % (owner, package) delete_url = "%s/api/package/%s/%s/" % (command.get_registry_url(None), owner, package) self.requests_mock.add(responses.DELETE, delete_url, json.dumps(dict())) command.delete('%s/%s' % (owner, package)) def test_build_from_git(self): git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 6 assert cmd[:5] == ['git', 'clone', '-q', '--depth=1', git_url] # fake git clone by copying test files into destpath srcfile = 'foo.csv' mydir = os.path.dirname(__file__) srcpath = os.path.join(mydir, 'data', srcfile) destpath = os.path.join(cmd[-1], srcfile) shutil.copyfile(srcpath, destpath) with patch('subprocess.check_call', mock_git_clone): command.build('user/test', git_url) from quilt.data.user import test assert hasattr(test, 'foo') assert isinstance(test.foo(), pd.DataFrame) def test_build_from_git_branch(self): branch = 'notmaster' git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 8 assert cmd[:7] == ['git', 'clone', '-q', '--depth=1', '-b', branch, git_url] # fake git clone by copying test files into destpath srcfile = 'foo.csv' mydir = os.path.dirname(__file__) srcpath = os.path.join(mydir, 'data', srcfile) destpath = os.path.join(cmd[-1], srcfile) shutil.copyfile(srcpath, destpath) with patch('subprocess.check_call', mock_git_clone): command.build('user/test', "{url}@{brch}".format(url=git_url, brch=branch)) from quilt.data.user import test assert hasattr(test, 'foo') assert isinstance(test.foo(), pd.DataFrame) def test_build_yaml_syntax_error(self): path = os.path.dirname(__file__) buildfilepath = os.path.join(path, 'build_bad_syntax.yml') with assertRaisesRegex(self, command.CommandException, r'Bad yaml syntax.*build_bad_syntax\.yml'): command.build('user/test', buildfilepath) def test_build_checks_yaml_syntax_error(self): # pylint: disable=C0103 path = os.path.abspath(os.path.dirname(__file__)) buildfilepath = os.path.join(path, 'build_checks_bad_syntax.yml') checksorigpath = os.path.join(path, 'checks_bad_syntax.yml') checksfilepath = os.path.join(path, 'checks.yml') try: origdir = os.curdir os.chdir(path) assert not os.path.exists(checksfilepath) shutil.copy(checksorigpath, checksfilepath) with assertRaisesRegex(self, command.CommandException, r'Bad yaml syntax.*checks\.yml'): command.build('user/test', buildfilepath) finally: os.remove(checksfilepath) os.chdir(origdir) def test_git_clone_fail(self): git_url = 'https://github.com/quiltdata/testdata.git' def mock_git_clone(cmd): # test git command assert len(cmd) == 6 assert cmd[:5] == ['git', 'clone', '-q', '--depth=1', git_url] # fake git clone fail raise Exception() with patch('subprocess.check_call', mock_git_clone): with self.assertRaises(command.CommandException): command.build('user/pkg__test_git_clone_fail', git_url) # TODO: running -n (pytest-xdist) there's leaky state and can throw # either ImportError: cannot import name or ModuleNotFoundError with assertRaisesRegex(self, Exception, r'cannot import|not found|No module named|Could not find'): from quilt.data.user import pkg__test_git_clone_fail def test_logging(self): mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') log_url = '%s/api/log' % (command.get_registry_url(None),) # Successful logging response. with patch('quilt.tools.command._load_config', return_value={}): def callback(request): data = json.loads(request.body) assert data == [dict( type='build', package=hashlib.md5(b'foo/bar').hexdigest(), dry_run=False, env='default', )] return (200, {}, '') self.requests_mock.add_callback(responses.POST, log_url, callback) command.build('foo/bar', build_path) # Failed logging response. with patch('quilt.tools.command._load_config', return_value={}): self.requests_mock.add(responses.POST, log_url, status=500) command.build('foo/bar', build_path) # ConnectionError with patch('quilt.tools.command._load_config', return_value={}): self.requests_mock.add(responses.POST, log_url, body=requests.exceptions.ConnectionError()) command.build('foo/bar', build_path) # Disabled logging. with patch('quilt.tools.command._load_config', return_value={'disable_analytics': True}): self.requests_mock.add(responses.POST, log_url, body=AssertionError('Unexpected logging!')) command.build('foo/bar', build_path) self.requests_mock.reset() # Prevent the "not all requests ..." assert. def test_rm(self): """ Test removing a package. """ mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.rm('foo/bar', force=True) teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) def test_rm_non_existent_package(self): """ Test removing a non-existent package. """ teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) command.rm('foo/bar', force=True) def test_rm_package_w_shared_obj(self): """ Test removing a package that shares an object with another. The other package should still remain. """ mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.build('foo/bar2', build_path) command.rm('foo/bar', force=True) teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) from quilt.data.foo import bar2 assert isinstance(bar2.foo(), pd.DataFrame) def test_rm_subpackage(self): """ Test removing a sub-package (not supported). """ with assertRaisesRegex(self, command.CommandException, "Specify package as"): command.rm('foo/bar/baz', force=True) def test_rm_doesnt_break_cache(self): """ Test building, removing then rebuilding a package. The package should be correctly rebuilt. """ mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) command.rm('foo/bar', force=True) teststore = store.PackageStore(self._store_dir) assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar')) mydir = os.path.dirname(__file__) build_path = os.path.join(mydir, './build_simple.yml') command.build('foo/bar', build_path) from quilt.data.foo import bar assert isinstance(bar.foo(), pd.DataFrame) def test_parse_package_names(self): # good parse strings expected = (None, 'user', 'package') assert command.parse_package('user/package') == expected expected = ('team', 'user', 'package') assert command.parse_package('team:user/package') == expected expected = (None, 'user', 'package', ['foo', 'bar']) assert command.parse_package('user/package/foo/bar', True) == expected expected = ('team', 'user', 'package', ['foo', 'bar']) assert command.parse_package('team:user/package/foo/bar', True) == expected expected = ('team', 'user', 'package', []) assert command.parse_package('team:user/package', True) == expected # bad parse strings with pytest.raises(command.CommandException, message='subdir should be rejected'): command.parse_package('user/package/subdir', allow_subpath=False) with pytest.raises(command.CommandException, match="Invalid user name"): command.parse_package('9user/package') with pytest.raises(command.CommandException, match='Invalid package name'): command.parse_package('user/!package') with pytest.raises(command.CommandException, match='Invalid element in subpath'): command.parse_package('user/package/&subdir', True) with pytest.raises(command.CommandException, message='subdir should be rejected'): command.parse_package('team:user/package/subdir', allow_subpath=False) with pytest.raises(command.CommandException, match='Invalid team name'): command.parse_package('team%:user/package/subdir', allow_subpath=True) with pytest.raises(command.CommandException, match="Invalid user name"): command.parse_package('team:9user/package') with pytest.raises(command.CommandException, match='Invalid package name'): command.parse_package('team:user/!package') with pytest.raises(command.CommandException, match='Invalid element in subpath'): command.parse_package('team:user/package/&subdir', True) # XXX: in this case, should we just strip the trialing slash? with pytest.raises(command.CommandException, match='Invalid element in subpath'): command.parse_package('team:user/package/subdir/', True) def test_parse_package_extended_names(self): # good parse strings expected = ('user/package', None, 'user', 'package', [], None, None, None) assert command.parse_package_extended('user/package') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], None, None, None) assert command.parse_package_extended('user/package/sub/path') == expected expected = ('team:user/package', 'team', 'user', 'package', [], None, None, None) assert command.parse_package_extended('team:user/package') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], None, None, None) assert command.parse_package_extended('team:user/package/sub/path') == expected expected = ('user/package', None, 'user', 'package', [], 'abc123', None, None) assert command.parse_package_extended('user/package:h:abc123') == expected expected = ('user/package', None, 'user', 'package', [], 'abc123', None, None) assert command.parse_package_extended('user/package:hash:abc123') == expected expected = ('user/package', None, 'user', 'package', [], None, '123', None) assert command.parse_package_extended('user/package:v:123') == expected expected = ('user/package', None, 'user', 'package', [], None, '123', None) assert command.parse_package_extended('user/package:version:123') == expected expected = ('user/package', None, 'user', 'package', [], None, None, 'some') assert command.parse_package_extended('user/package:t:some') == expected expected = ('user/package', None, 'user', 'package', [], None, None, 'some') assert command.parse_package_extended('user/package:tag:some') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], 'abc123', None, None) assert command.parse_package_extended('user/package/sub/path:h:abc123') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], 'abc123', None, None) assert command.parse_package_extended('user/package/sub/path:hash:abc123') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], None, '123', None) assert command.parse_package_extended('user/package/sub/path:v:123') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], None, '123', None) assert command.parse_package_extended('user/package/sub/path:version:123') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], None, None, 'some') assert command.parse_package_extended('user/package/sub/path:t:some') == expected expected = ('user/package/sub/path', None, 'user', 'package', ['sub', 'path'], None, None, 'some') assert command.parse_package_extended('user/package/sub/path:tag:some') == expected expected = ('team:user/package', 'team', 'user', 'package', [], 'abc123', None, None) assert command.parse_package_extended('team:user/package:h:abc123') == expected expected = ('team:user/package', 'team', 'user', 'package', [], 'abc123', None, None) assert command.parse_package_extended('team:user/package:hash:abc123') == expected expected = ('team:user/package', 'team', 'user', 'package', [], None, '123', None) assert command.parse_package_extended('team:user/package:v:123') == expected expected = ('team:user/package', 'team', 'user', 'package', [], None, '123', None) assert command.parse_package_extended('team:user/package:version:123') == expected expected = ('team:user/package', 'team', 'user', 'package', [], None, None, 'some') assert command.parse_package_extended('team:user/package:t:some') == expected expected = ('team:user/package', 'team', 'user', 'package', [], None, None, 'some') assert command.parse_package_extended('team:user/package:tag:some') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], 'abc123', None, None) assert command.parse_package_extended('team:user/package/sub/path:h:abc123') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], 'abc123', None, None) assert command.parse_package_extended('team:user/package/sub/path:hash:abc123') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], None, '123', None) assert command.parse_package_extended('team:user/package/sub/path:v:123') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], None, '123', None) assert command.parse_package_extended('team:user/package/sub/path:version:123') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], None, None, 'some') assert command.parse_package_extended('team:user/package/sub/path:t:some') == expected expected = ('team:user/package/sub/path', 'team', 'user', 'package', ['sub', 'path'], None, None, 'some') assert command.parse_package_extended('team:user/package/sub/path:tag:some') == expected # bad parse strings with pytest.raises(command.CommandException): command.parse_package_extended('user/package:a:aaa111') with pytest.raises(command.CommandException): command.parse_package_extended('team:user/package:a:aaa111') with pytest.raises(command.CommandException): command.parse_package_extended('foo:bar:baz')
1
16,237
this string should really be a local constant. it's repeated below.
quiltdata-quilt
py
@@ -194,6 +194,7 @@ namespace OpenTelemetry.Exporter.Jaeger.Implementation int i => new JaegerTag(attribute.Key, JaegerTagType.LONG, vLong: Convert.ToInt64(i)), long l => new JaegerTag(attribute.Key, JaegerTagType.LONG, vLong: l), float f => new JaegerTag(attribute.Key, JaegerTagType.DOUBLE, vDouble: Convert.ToDouble(f)), + short sh => new JaegerTag(attribute.Key, JaegerTagType.LONG, vLong: Convert.ToInt64(sh)), double d => new JaegerTag(attribute.Key, JaegerTagType.DOUBLE, vDouble: d), bool b => new JaegerTag(attribute.Key, JaegerTagType.BOOL, vBool: b), _ => new JaegerTag(attribute.Key, JaegerTagType.STRING, vStr: attribute.Value.ToString()),
1
// <copyright file="JaegerActivityExtensions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; using OpenTelemetry.Internal; using OpenTelemetry.Trace; namespace OpenTelemetry.Exporter.Jaeger.Implementation { internal static class JaegerActivityExtensions { internal const string JaegerErrorFlagTagName = "error"; private const int DaysPerYear = 365; // Number of days in 4 years private const int DaysPer4Years = (DaysPerYear * 4) + 1; // 1461 // Number of days in 100 years private const int DaysPer100Years = (DaysPer4Years * 25) - 1; // 36524 // Number of days in 400 years private const int DaysPer400Years = (DaysPer100Years * 4) + 1; // 146097 // Number of days from 1/1/0001 to 12/31/1969 private const int DaysTo1970 = (DaysPer400Years * 4) + (DaysPer100Years * 3) + (DaysPer4Years * 17) + DaysPerYear; // 719,162 private const long UnixEpochTicks = DaysTo1970 * TimeSpan.TicksPerDay; private const long TicksPerMicrosecond = TimeSpan.TicksPerMillisecond / 1000; private const long UnixEpochMicroseconds = UnixEpochTicks / TicksPerMicrosecond; // 62,135,596,800,000,000 public static JaegerSpan ToJaegerSpan(this Activity activity) { var jaegerTags = new TagEnumerationState { Tags = PooledList<JaegerTag>.Create(), }; activity.EnumerateTags(ref jaegerTags); string peerServiceName = null; if (activity.Kind == ActivityKind.Client || activity.Kind == ActivityKind.Producer) { PeerServiceResolver.Resolve(ref jaegerTags, out peerServiceName, out bool addAsTag); if (peerServiceName != null && addAsTag) { PooledList<JaegerTag>.Add(ref jaegerTags.Tags, new JaegerTag(SemanticConventions.AttributePeerService, JaegerTagType.STRING, vStr: peerServiceName)); } } // The Span.Kind must translate into a tag. // See https://opentracing.io/specification/conventions/ if (activity.Kind != ActivityKind.Internal) { string spanKind = null; if (activity.Kind == ActivityKind.Server) { spanKind = "server"; } else if (activity.Kind == ActivityKind.Client) { spanKind = "client"; } else if (activity.Kind == ActivityKind.Consumer) { spanKind = "consumer"; } else if (activity.Kind == ActivityKind.Producer) { spanKind = "producer"; } if (spanKind != null) { PooledList<JaegerTag>.Add(ref jaegerTags.Tags, new JaegerTag("span.kind", JaegerTagType.STRING, vStr: spanKind)); } } var activitySource = activity.Source; if (!string.IsNullOrEmpty(activitySource.Name)) { PooledList<JaegerTag>.Add(ref jaegerTags.Tags, new JaegerTag("otel.library.name", JaegerTagType.STRING, vStr: activitySource.Name)); if (!string.IsNullOrEmpty(activitySource.Version)) { PooledList<JaegerTag>.Add(ref jaegerTags.Tags, new JaegerTag("otel.library.version", JaegerTagType.STRING, vStr: activitySource.Version)); } } var traceId = Int128.Empty; var spanId = Int128.Empty; var parentSpanId = Int128.Empty; if (activity.IdFormat == ActivityIdFormat.W3C) { // TODO: The check above should be enforced by the usage of the exporter. Perhaps enforce at higher-level. traceId = new Int128(activity.TraceId); spanId = new Int128(activity.SpanId); if (activity.ParentSpanId != default) { parentSpanId = new Int128(activity.ParentSpanId); } } return new JaegerSpan( peerServiceName: peerServiceName, traceIdLow: traceId.Low, traceIdHigh: traceId.High, spanId: spanId.Low, parentSpanId: parentSpanId.Low, operationName: activity.DisplayName, flags: (activity.Context.TraceFlags & ActivityTraceFlags.Recorded) > 0 ? 0x1 : 0, startTime: ToEpochMicroseconds(activity.StartTimeUtc), duration: (long)activity.Duration.TotalMilliseconds * 1000, references: activity.ToJaegerSpanRefs(), tags: jaegerTags.Tags, logs: activity.ToJaegerLogs()); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public static PooledList<JaegerSpanRef> ToJaegerSpanRefs(this Activity activity) { LinkEnumerationState references = default; activity.EnumerateLinks(ref references); return references.SpanRefs; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public static PooledList<JaegerLog> ToJaegerLogs(this Activity activity) { EventEnumerationState logs = default; activity.EnumerateEvents(ref logs); return logs.Logs; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public static JaegerLog ToJaegerLog(this ActivityEvent timedEvent) { var jaegerTags = new EventTagsEnumerationState { Tags = PooledList<JaegerTag>.Create(), }; timedEvent.EnumerateTags(ref jaegerTags); if (!jaegerTags.HasEvent) { // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md#events PooledList<JaegerTag>.Add(ref jaegerTags.Tags, new JaegerTag("event", JaegerTagType.STRING, vStr: timedEvent.Name)); } // TODO: Use the same function as JaegerConversionExtensions or check that the perf here is acceptable. return new JaegerLog(timedEvent.Timestamp.ToEpochMicroseconds(), jaegerTags.Tags); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public static JaegerSpanRef ToJaegerSpanRef(this in ActivityLink link) { var traceId = new Int128(link.Context.TraceId); var spanId = new Int128(link.Context.SpanId); // Assume FOLLOWS_FROM for links, mirrored from Java: https://github.com/open-telemetry/opentelemetry-java/pull/481#discussion_r312577862 var refType = JaegerSpanRefType.FOLLOWS_FROM; return new JaegerSpanRef(refType, traceId.Low, traceId.High, spanId.Low); } public static JaegerTag ToJaegerTag(this KeyValuePair<string, object> attribute) { return attribute.Value switch { string s => new JaegerTag(attribute.Key, JaegerTagType.STRING, vStr: s), int i => new JaegerTag(attribute.Key, JaegerTagType.LONG, vLong: Convert.ToInt64(i)), long l => new JaegerTag(attribute.Key, JaegerTagType.LONG, vLong: l), float f => new JaegerTag(attribute.Key, JaegerTagType.DOUBLE, vDouble: Convert.ToDouble(f)), double d => new JaegerTag(attribute.Key, JaegerTagType.DOUBLE, vDouble: d), bool b => new JaegerTag(attribute.Key, JaegerTagType.BOOL, vBool: b), _ => new JaegerTag(attribute.Key, JaegerTagType.STRING, vStr: attribute.Value.ToString()), }; } public static long ToEpochMicroseconds(this DateTime utcDateTime) { // Truncate sub-microsecond precision before offsetting by the Unix Epoch to avoid // the last digit being off by one for dates that result in negative Unix times long microseconds = utcDateTime.Ticks / TicksPerMicrosecond; return microseconds - UnixEpochMicroseconds; } public static long ToEpochMicroseconds(this DateTimeOffset timestamp) { // Truncate sub-microsecond precision before offsetting by the Unix Epoch to avoid // the last digit being off by one for dates that result in negative Unix times long microseconds = timestamp.UtcDateTime.Ticks / TicksPerMicrosecond; return microseconds - UnixEpochMicroseconds; } private static void ProcessJaegerTagArray(ref PooledList<JaegerTag> tags, KeyValuePair<string, object> activityTag) { if (activityTag.Value is int[] intArray) { foreach (var item in intArray) { JaegerTag jaegerTag = new JaegerTag(activityTag.Key, JaegerTagType.LONG, vLong: Convert.ToInt64(item)); PooledList<JaegerTag>.Add(ref tags, jaegerTag); } } else if (activityTag.Value is string[] stringArray) { foreach (var item in stringArray) { JaegerTag jaegerTag = new JaegerTag(activityTag.Key, JaegerTagType.STRING, vStr: item); PooledList<JaegerTag>.Add(ref tags, jaegerTag); } } else if (activityTag.Value is bool[] boolArray) { foreach (var item in boolArray) { JaegerTag jaegerTag = new JaegerTag(activityTag.Key, JaegerTagType.BOOL, vBool: item); PooledList<JaegerTag>.Add(ref tags, jaegerTag); } } else if (activityTag.Value is double[] doubleArray) { foreach (var item in doubleArray) { JaegerTag jaegerTag = new JaegerTag(activityTag.Key, JaegerTagType.DOUBLE, vDouble: item); PooledList<JaegerTag>.Add(ref tags, jaegerTag); } } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void ProcessJaegerTag(ref TagEnumerationState state, string key, JaegerTag jaegerTag) { if (jaegerTag.VStr != null) { PeerServiceResolver.InspectTag(ref state, key, jaegerTag.VStr); if (key == SpanAttributeConstants.StatusCodeKey) { StatusCode? statusCode = StatusHelper.GetStatusCodeForTagValue(jaegerTag.VStr); if (statusCode == StatusCode.Error) { // Error flag: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md#error-flag PooledList<JaegerTag>.Add(ref state.Tags, new JaegerTag(JaegerErrorFlagTagName, JaegerTagType.BOOL, vBool: true)); } else if (!statusCode.HasValue || statusCode == StatusCode.Unset) { // Unset Status is not sent: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md#status return; } // Normalize status since it is user-driven. jaegerTag = new JaegerTag(key, JaegerTagType.STRING, vStr: StatusHelper.GetTagValueForStatusCode(statusCode.Value)); } else if (key == JaegerErrorFlagTagName) { // Ignore `error` tag if it exists, it will be added based on StatusCode + StatusDescription. return; } } else if (jaegerTag.VLong.HasValue) { PeerServiceResolver.InspectTag(ref state, key, jaegerTag.VLong.Value); } PooledList<JaegerTag>.Add(ref state.Tags, jaegerTag); } private struct TagEnumerationState : IActivityEnumerator<KeyValuePair<string, object>>, PeerServiceResolver.IPeerServiceState { public PooledList<JaegerTag> Tags; public string PeerService { get; set; } public int? PeerServicePriority { get; set; } public string HostName { get; set; } public string IpAddress { get; set; } public long Port { get; set; } public bool ForEach(KeyValuePair<string, object> activityTag) { if (activityTag.Value is Array) { ProcessJaegerTagArray(ref this.Tags, activityTag); } else if (activityTag.Value != null) { ProcessJaegerTag(ref this, activityTag.Key, activityTag.ToJaegerTag()); } return true; } } private struct LinkEnumerationState : IActivityEnumerator<ActivityLink> { public bool Created; public PooledList<JaegerSpanRef> SpanRefs; public bool ForEach(ActivityLink activityLink) { if (!this.Created) { this.SpanRefs = PooledList<JaegerSpanRef>.Create(); this.Created = true; } PooledList<JaegerSpanRef>.Add(ref this.SpanRefs, activityLink.ToJaegerSpanRef()); return true; } } private struct EventEnumerationState : IActivityEnumerator<ActivityEvent> { public bool Created; public PooledList<JaegerLog> Logs; public bool ForEach(ActivityEvent activityEvent) { if (!this.Created) { this.Logs = PooledList<JaegerLog>.Create(); this.Created = true; } PooledList<JaegerLog>.Add(ref this.Logs, activityEvent.ToJaegerLog()); return true; } } private struct EventTagsEnumerationState : IActivityEnumerator<KeyValuePair<string, object>> { public PooledList<JaegerTag> Tags; public bool HasEvent; public bool ForEach(KeyValuePair<string, object> tag) { if (tag.Value is Array) { ProcessJaegerTagArray(ref this.Tags, tag); } else if (tag.Value != null) { PooledList<JaegerTag>.Add(ref this.Tags, tag.ToJaegerTag()); } if (tag.Key == "event") { this.HasEvent = true; } return true; } } } }
1
19,914
Number types are hard . Should we be considering byte/sbyte? What about unsigned variants that would otherwise fit in a signed 64 bit integer like UInt16, UInt32?
open-telemetry-opentelemetry-dotnet
.cs
@@ -170,7 +170,8 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests [MemberData(nameof(HostHeaderData))] public void ValidHostHeadersParsed(string host) { - Assert.True(HttpUtilities.IsValidHostHeader(host)); + HttpUtilities.ValidateHostHeader(host); + Assert.True(true); } public static TheoryData<string> HostHeaderInvalidData
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Text; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Xunit; namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests { public class HttpUtilitiesTest { [Theory] [InlineData("CONNECT / HTTP/1.1", true, "CONNECT", HttpMethod.Connect)] [InlineData("DELETE / HTTP/1.1", true, "DELETE", HttpMethod.Delete)] [InlineData("GET / HTTP/1.1", true, "GET", HttpMethod.Get)] [InlineData("HEAD / HTTP/1.1", true, "HEAD", HttpMethod.Head)] [InlineData("PATCH / HTTP/1.1", true, "PATCH", HttpMethod.Patch)] [InlineData("POST / HTTP/1.1", true, "POST", HttpMethod.Post)] [InlineData("PUT / HTTP/1.1", true, "PUT", HttpMethod.Put)] [InlineData("OPTIONS / HTTP/1.1", true, "OPTIONS", HttpMethod.Options)] [InlineData("TRACE / HTTP/1.1", true, "TRACE", HttpMethod.Trace)] [InlineData("GET/ HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("get / HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("GOT / HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("ABC / HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("PO / HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("PO ST / HTTP/1.1", false, null, HttpMethod.Custom)] [InlineData("short ", false, null, HttpMethod.Custom)] public void GetsKnownMethod(string input, bool expectedResult, string expectedKnownString, HttpMethod expectedMethod) { // Arrange var block = new Span<byte>(Encoding.ASCII.GetBytes(input)); // Act HttpMethod knownMethod; var result = block.GetKnownMethod(out knownMethod, out var length); string toString = null; if (knownMethod != HttpMethod.Custom) { toString = HttpUtilities.MethodToString(knownMethod); } // Assert Assert.Equal(expectedResult, result); Assert.Equal(expectedMethod, knownMethod); Assert.Equal(toString, expectedKnownString); Assert.Equal(length, expectedKnownString?.Length ?? 0); } [Theory] [InlineData("HTTP/1.0\r", true, HttpUtilities.Http10Version, HttpVersion.Http10)] [InlineData("HTTP/1.1\r", true, HttpUtilities.Http11Version, HttpVersion.Http11)] [InlineData("HTTP/3.0\r", false, null, HttpVersion.Unknown)] [InlineData("http/1.0\r", false, null, HttpVersion.Unknown)] [InlineData("http/1.1\r", false, null, HttpVersion.Unknown)] [InlineData("short ", false, null, HttpVersion.Unknown)] public void GetsKnownVersion(string input, bool expectedResult, string expectedKnownString, HttpVersion version) { // Arrange var block = new Span<byte>(Encoding.ASCII.GetBytes(input)); // Act var result = block.GetKnownVersion(out HttpVersion knownVersion, out var length); string toString = null; if (knownVersion != HttpVersion.Unknown) { toString = HttpUtilities.VersionToString(knownVersion); } // Assert Assert.Equal(version, knownVersion); Assert.Equal(expectedResult, result); Assert.Equal(expectedKnownString, toString); Assert.Equal(expectedKnownString?.Length ?? 0, length); } [Theory] [InlineData("HTTP/1.0\r", "HTTP/1.0")] [InlineData("HTTP/1.1\r", "HTTP/1.1")] public void KnownVersionsAreInterned(string input, string expected) { TestKnownStringsInterning(input, expected, span => { HttpUtilities.GetKnownVersion(span, out var version, out var _); return HttpUtilities.VersionToString(version); }); } [Theory] [InlineData("https://host/", "https://")] [InlineData("http://host/", "http://")] public void KnownSchemesAreInterned(string input, string expected) { TestKnownStringsInterning(input, expected, span => { HttpUtilities.GetKnownHttpScheme(span, out var scheme); return HttpUtilities.SchemeToString(scheme); }); } [Theory] [InlineData("CONNECT / HTTP/1.1", "CONNECT")] [InlineData("DELETE / HTTP/1.1", "DELETE")] [InlineData("GET / HTTP/1.1", "GET")] [InlineData("HEAD / HTTP/1.1", "HEAD")] [InlineData("PATCH / HTTP/1.1", "PATCH")] [InlineData("POST / HTTP/1.1", "POST")] [InlineData("PUT / HTTP/1.1", "PUT")] [InlineData("OPTIONS / HTTP/1.1", "OPTIONS")] [InlineData("TRACE / HTTP/1.1", "TRACE")] public void KnownMethodsAreInterned(string input, string expected) { TestKnownStringsInterning(input, expected, span => { HttpUtilities.GetKnownMethod(span, out var method, out var length); return HttpUtilities.MethodToString(method); }); } private void TestKnownStringsInterning(string input, string expected, Func<byte[], string> action) { // Act var knownString1 = action(Encoding.ASCII.GetBytes(input)); var knownString2 = action(Encoding.ASCII.GetBytes(input)); // Assert Assert.Equal(knownString1, expected); Assert.Same(knownString1, knownString2); } public static TheoryData<string> HostHeaderData { get { return new TheoryData<string>() { "z", "1", "y:1", "1:1", "[ABCdef]", "[abcDEF]:0", "[abcdef:127.2355.1246.114]:0", "[::1]:80", "127.0.0.1:80", "900.900.900.900:9523547852", "foo", "foo:234", "foo.bar.baz", "foo.BAR.baz:46245", "foo.ba-ar.baz:46245", "-foo:1234", "xn--asdfaf:134", "-", "_", "~", "!", "$", "'", "(", ")", }; } } [Theory] [MemberData(nameof(HostHeaderData))] public void ValidHostHeadersParsed(string host) { Assert.True(HttpUtilities.IsValidHostHeader(host)); } public static TheoryData<string> HostHeaderInvalidData { get { // see https://tools.ietf.org/html/rfc7230#section-5.4 var data = new TheoryData<string>() { "[]", // Too short "[::]", // Too short "[ghijkl]", // Non-hex "[afd:adf:123", // Incomplete "[afd:adf]123", // Missing : "[afd:adf]:", // Missing port digits "[afd adf]", // Space "[ad-314]", // dash ":1234", // Missing host "a:b:c", // Missing [] "::1", // Missing [] "::", // Missing everything "abcd:1abcd", // Letters in port "abcd:1.2", // Dot in port "1.2.3.4:", // Missing port digits "1.2 .4", // Space }; // These aren't allowed anywhere in the host header var invalid = "\"#%*+,/;<=>?@[]\\^`{}|"; foreach (var ch in invalid) { data.Add(ch.ToString()); } invalid = "!\"#$%&'()*+,/;<=>?@[]\\^_`{}|~-"; foreach (var ch in invalid) { data.Add("[abd" + ch + "]:1234"); } invalid = "!\"#$%&'()*+,/;<=>?@[]\\^_`{}|~:abcABC-."; foreach (var ch in invalid) { data.Add("a.b.c:" + ch); } return data; } } [Theory] [MemberData(nameof(HostHeaderInvalidData))] public void InvalidHostHeadersRejected(string host) { Assert.False(HttpUtilities.IsValidHostHeader(host)); } } }
1
14,938
`Assert.True(true)`... xunit used to have Assert.DoesNotThrow
aspnet-KestrelHttpServer
.cs
@@ -3,8 +3,9 @@ if ENV["S3_BUCKET_NAME"] && ENV["S3_ACCESS_KEY_ID"] && ENV["S3_SECRET_ACCESS_KEY bucket: ENV["S3_BUCKET_NAME"], s3_credentials: { access_key_id: ENV["S3_ACCESS_KEY_ID"], - secret_access_key: ENV["S3_SECRET_ACCESS_KEY"] + secret_access_key: ENV["S3_SECRET_ACCESS_KEY"], }, + s3_region: "us-east-1", s3_permissions: :private, storage: :s3, )
1
if ENV["S3_BUCKET_NAME"] && ENV["S3_ACCESS_KEY_ID"] && ENV["S3_SECRET_ACCESS_KEY"] Paperclip::Attachment.default_options.merge!( bucket: ENV["S3_BUCKET_NAME"], s3_credentials: { access_key_id: ENV["S3_ACCESS_KEY_ID"], secret_access_key: ENV["S3_SECRET_ACCESS_KEY"] }, s3_permissions: :private, storage: :s3, ) end
1
16,487
should this be an env var like the other `S3_*` values?
18F-C2
rb
@@ -9,3 +9,12 @@ export function assign(obj, props) { for (let i in props) obj[i] = props[i]; return /** @type {O & P} */ (obj); } + +/** + * Remove a child node from its parent if attached. + * @param {Node} node The node to remove + */ +export function removeNode(node) { + let parentNode = node.parentNode; + if (parentNode) parentNode.removeChild(node); +}
1
/** * Assign properties from `props` to `obj` * @template O, P The obj and props types * @param {O} obj The object to copy properties to * @param {P} props The object to copy properties from * @returns {O & P} */ export function assign(obj, props) { for (let i in props) obj[i] = props[i]; return /** @type {O & P} */ (obj); }
1
12,608
If you do go this route, I think it would be worth adding a note here that IE 11 is the only reason why we're not using the more obvious `node.remove()`.
preactjs-preact
js
@@ -26,7 +26,7 @@ class RecommendationChecker(checkers.BaseChecker): "consider-iterating-dictionary", "Emitted when the keys of a dictionary are iterated through the .keys() " "method. It is enough to just iterate through the dictionary itself, as " - 'in "for key in dictionary".', + 'in "for key in dictionary" or "if key in dictionary".', ), "C0206": ( "Consider iterating with .items()",
1
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/main/LICENSE from typing import Union, cast import astroid from astroid import nodes from pylint import checkers, interfaces from pylint.checkers import utils class RecommendationChecker(checkers.BaseChecker): __implements__ = (interfaces.IAstroidChecker,) name = "refactoring" msgs = { "C0200": ( "Consider using enumerate instead of iterating with range and len", "consider-using-enumerate", "Emitted when code that iterates with range and len is " "encountered. Such code can be simplified by using the " "enumerate builtin.", ), "C0201": ( "Consider iterating the dictionary directly instead of calling .keys()", "consider-iterating-dictionary", "Emitted when the keys of a dictionary are iterated through the .keys() " "method. It is enough to just iterate through the dictionary itself, as " 'in "for key in dictionary".', ), "C0206": ( "Consider iterating with .items()", "consider-using-dict-items", "Emitted when iterating over the keys of a dictionary and accessing the " "value by index lookup. " "Both the key and value can be accessed by iterating using the .items() " "method of the dictionary instead.", ), "C0207": ( "Use %s instead", "use-maxsplit-arg", "Emitted when accessing only the first or last element of str.split(). " "The first and last element can be accessed by using " "str.split(sep, maxsplit=1)[0] or str.rsplit(sep, maxsplit=1)[-1] " "instead.", ), "C0208": ( "Use a sequence type when iterating over values", "use-sequence-for-iteration", "When iterating over values, sequence types (e.g., ``lists``, ``tuples``, ``ranges``) " "are more efficient than ``sets``.", ), "C0209": ( "Formatting a regular string which could be a f-string", "consider-using-f-string", "Used when we detect a string that is being formatted with format() or % " "which could potentially be a f-string. The use of f-strings is preferred.", ), } @staticmethod def _is_builtin(node, function): inferred = utils.safe_infer(node) if not inferred: return False return utils.is_builtin_object(inferred) and inferred.name == function @utils.check_messages("consider-iterating-dictionary", "use-maxsplit-arg") def visit_call(self, node: nodes.Call) -> None: self._check_consider_iterating_dictionary(node) self._check_use_maxsplit_arg(node) def _check_consider_iterating_dictionary(self, node: nodes.Call) -> None: if not isinstance(node.func, nodes.Attribute): return if node.func.attrname != "keys": return if not isinstance(node.parent, (nodes.For, nodes.Comprehension)): return inferred = utils.safe_infer(node.func) if not isinstance(inferred, astroid.BoundMethod) or not isinstance( inferred.bound, nodes.Dict ): return if isinstance(node.parent, (nodes.For, nodes.Comprehension)): self.add_message("consider-iterating-dictionary", node=node) def _check_use_maxsplit_arg(self, node: nodes.Call) -> None: """Add message when accessing first or last elements of a str.split() or str.rsplit().""" # Check if call is split() or rsplit() if not ( isinstance(node.func, nodes.Attribute) and node.func.attrname in ("split", "rsplit") and isinstance(utils.safe_infer(node.func), astroid.BoundMethod) ): return try: utils.get_argument_from_call(node, 0, "sep") except utils.NoSuchArgumentError: return try: # Ignore if maxsplit arg has been set utils.get_argument_from_call(node, 1, "maxsplit") return except utils.NoSuchArgumentError: pass if isinstance(node.parent, nodes.Subscript): try: subscript_value = utils.get_subscript_const_value(node.parent).value except utils.InferredTypeError: return # Check for cases where variable (Name) subscripts may be mutated within a loop if isinstance(node.parent.slice, nodes.Name): # Check if loop present within the scope of the node scope = node.scope() for loop_node in scope.nodes_of_class((nodes.For, nodes.While)): loop_node = cast(nodes.NodeNG, loop_node) if not loop_node.parent_of(node): continue # Check if var is mutated within loop (Assign/AugAssign) for assignment_node in loop_node.nodes_of_class(nodes.AugAssign): assignment_node = cast(nodes.AugAssign, assignment_node) if node.parent.slice.name == assignment_node.target.name: return for assignment_node in loop_node.nodes_of_class(nodes.Assign): assignment_node = cast(nodes.Assign, assignment_node) if node.parent.slice.name in [ n.name for n in assignment_node.targets ]: return if subscript_value in (-1, 0): fn_name = node.func.attrname new_fn = "rsplit" if subscript_value == -1 else "split" new_name = ( node.func.as_string().rsplit(fn_name, maxsplit=1)[0] + new_fn + f"({node.args[0].as_string()}, maxsplit=1)[{subscript_value}]" ) self.add_message("use-maxsplit-arg", node=node, args=(new_name,)) @utils.check_messages( "consider-using-enumerate", "consider-using-dict-items", "use-sequence-for-iteration", ) def visit_for(self, node: nodes.For) -> None: self._check_consider_using_enumerate(node) self._check_consider_using_dict_items(node) self._check_use_sequence_for_iteration(node) def _check_consider_using_enumerate(self, node: nodes.For) -> None: """Emit a convention whenever range and len are used for indexing.""" # Verify that we have a `range([start], len(...), [stop])` call and # that the object which is iterated is used as a subscript in the # body of the for. # Is it a proper range call? if not isinstance(node.iter, nodes.Call): return if not self._is_builtin(node.iter.func, "range"): return if not node.iter.args: return is_constant_zero = ( isinstance(node.iter.args[0], nodes.Const) and node.iter.args[0].value == 0 ) if len(node.iter.args) == 2 and not is_constant_zero: return if len(node.iter.args) > 2: return # Is it a proper len call? if not isinstance(node.iter.args[-1], nodes.Call): return second_func = node.iter.args[-1].func if not self._is_builtin(second_func, "len"): return len_args = node.iter.args[-1].args if not len_args or len(len_args) != 1: return iterating_object = len_args[0] if isinstance(iterating_object, nodes.Name): expected_subscript_val_type = nodes.Name elif isinstance(iterating_object, nodes.Attribute): expected_subscript_val_type = nodes.Attribute else: return # If we're defining __iter__ on self, enumerate won't work scope = node.scope() if ( isinstance(iterating_object, nodes.Name) and iterating_object.name == "self" and scope.name == "__iter__" ): return # Verify that the body of the for loop uses a subscript # with the object that was iterated. This uses some heuristics # in order to make sure that the same object is used in the # for body. for child in node.body: for subscript in child.nodes_of_class(nodes.Subscript): subscript = cast(nodes.Subscript, subscript) if not isinstance(subscript.value, expected_subscript_val_type): continue value = subscript.slice if not isinstance(value, nodes.Name): continue if subscript.value.scope() != node.scope(): # Ignore this subscript if it's not in the same # scope. This means that in the body of the for # loop, another scope was created, where the same # name for the iterating object was used. continue if value.name == node.target.name and ( isinstance(subscript.value, nodes.Name) and iterating_object.name == subscript.value.name or isinstance(subscript.value, nodes.Attribute) and iterating_object.attrname == subscript.value.attrname ): self.add_message("consider-using-enumerate", node=node) return def _check_consider_using_dict_items(self, node: nodes.For) -> None: """Add message when accessing dict values by index lookup.""" # Verify that we have a .keys() call and # that the object which is iterated is used as a subscript in the # body of the for. iterating_object_name = utils.get_iterating_dictionary_name(node) if iterating_object_name is None: return # Verify that the body of the for loop uses a subscript # with the object that was iterated. This uses some heuristics # in order to make sure that the same object is used in the # for body. for child in node.body: for subscript in child.nodes_of_class(nodes.Subscript): subscript = cast(nodes.Subscript, subscript) if not isinstance(subscript.value, (nodes.Name, nodes.Attribute)): continue value = subscript.slice if ( not isinstance(value, nodes.Name) or value.name != node.target.name or iterating_object_name != subscript.value.as_string() ): continue last_definition_lineno = value.lookup(value.name)[1][-1].lineno if last_definition_lineno > node.lineno: # Ignore this subscript if it has been redefined after # the for loop. This checks for the line number using .lookup() # to get the line number where the iterating object was last # defined and compare that to the for loop's line number continue if ( isinstance(subscript.parent, nodes.Assign) and subscript in subscript.parent.targets or isinstance(subscript.parent, nodes.AugAssign) and subscript == subscript.parent.target ): # Ignore this subscript if it is the target of an assignment # Early termination as dict index lookup is necessary return self.add_message("consider-using-dict-items", node=node) return @utils.check_messages( "consider-using-dict-items", "use-sequence-for-iteration", ) def visit_comprehension(self, node: nodes.Comprehension) -> None: self._check_consider_using_dict_items_comprehension(node) self._check_use_sequence_for_iteration(node) def _check_consider_using_dict_items_comprehension( self, node: nodes.Comprehension ) -> None: """Add message when accessing dict values by index lookup.""" iterating_object_name = utils.get_iterating_dictionary_name(node) if iterating_object_name is None: return for child in node.parent.get_children(): for subscript in child.nodes_of_class(nodes.Subscript): subscript = cast(nodes.Subscript, subscript) if not isinstance(subscript.value, (nodes.Name, nodes.Attribute)): continue value = subscript.slice if ( not isinstance(value, nodes.Name) or value.name != node.target.name or iterating_object_name != subscript.value.as_string() ): continue self.add_message("consider-using-dict-items", node=node) return def _check_use_sequence_for_iteration( self, node: Union[nodes.For, nodes.Comprehension] ) -> None: """Check if code iterates over an in-place defined set.""" if isinstance(node.iter, nodes.Set): self.add_message("use-sequence-for-iteration", node=node.iter) @utils.check_messages("consider-using-f-string") def visit_const(self, node: nodes.Const) -> None: if node.pytype() == "builtins.str" and not isinstance( node.parent, nodes.JoinedStr ): self._detect_replacable_format_call(node) def _detect_replacable_format_call(self, node: nodes.Const) -> None: """Check whether a string is used in a call to format() or '%' and whether it can be replaced by a f-string""" if ( isinstance(node.parent, nodes.Attribute) and node.parent.attrname == "format" ): # Allow assigning .format to a variable if isinstance(node.parent.parent, nodes.Assign): return if node.parent.parent.args: for arg in node.parent.parent.args: # If star expressions with more than 1 element are being used if isinstance(arg, nodes.Starred): inferred = utils.safe_infer(arg.value) if ( isinstance(inferred, astroid.List) and len(inferred.elts) > 1 ): return elif node.parent.parent.keywords: keyword_args = [ i[0] for i in utils.parse_format_method_string(node.value)[0] ] for keyword in node.parent.parent.keywords: # If keyword is used multiple times if keyword_args.count(keyword.arg) > 1: return keyword = utils.safe_infer(keyword.value) # If lists of more than one element are being unpacked if isinstance(keyword, nodes.Dict): if len(keyword.items) > 1 and len(keyword_args) > 1: return # If all tests pass, then raise message self.add_message( "consider-using-f-string", node=node, line=node.lineno, col_offset=node.col_offset, ) elif isinstance(node.parent, nodes.BinOp) and node.parent.op == "%": inferred_right = utils.safe_infer(node.parent.right) # If dicts or lists of length > 1 are used if isinstance(inferred_right, nodes.Dict): if len(inferred_right.items) > 1: return elif isinstance(inferred_right, nodes.List): if len(inferred_right.elts) > 1: return # If all tests pass, then raise message self.add_message( "consider-using-f-string", node=node, line=node.lineno, col_offset=node.col_offset, )
1
15,816
The added text doesn't really match why it's possible. Maybe it would be better to name that as a separate case for this checker? (e.g. dict lookup is quicker than list comparison)
PyCQA-pylint
py
@@ -63,7 +63,7 @@ class Image implements EntityFileUploadInterface protected $position; /** - * @var \Datetime + * @var \DateTime * * @ORM\Column(type="datetime") */
1
<?php namespace Shopsys\FrameworkBundle\Component\Image; use DateTime; use Doctrine\ORM\Mapping as ORM; use Shopsys\FrameworkBundle\Component\FileUpload\EntityFileUploadInterface; use Shopsys\FrameworkBundle\Component\FileUpload\FileForUpload; use Shopsys\FrameworkBundle\Component\FileUpload\FileNamingConvention; use Shopsys\FrameworkBundle\Component\Image\Config\ImageConfig; /** * @ORM\Table(name="images", indexes={@ORM\Index(columns={"entity_name", "entity_id", "type"})}) * @ORM\Entity */ class Image implements EntityFileUploadInterface { /** @access protected */ const UPLOAD_KEY = 'image'; /** * @var int * * @ORM\Column(type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="IDENTITY") */ protected $id; /** * @var string * * @ORM\Column(type="string", length=100) */ protected $entityName; /** * @var int * * @ORM\Column(type="integer") */ protected $entityId; /** * @var string * * @ORM\Column(type="string", length=100, nullable=true) */ protected $type; /** * @var string * * @ORM\Column(type="string", length=5) */ protected $extension; /** * @var int * * @ORM\Column(type="integer", nullable=true) */ protected $position; /** * @var \Datetime * * @ORM\Column(type="datetime") */ protected $modifiedAt; /** * @var string|null */ protected $temporaryFilename; /** * @param string $entityName * @param int $entityId * @param string|null $type * @param string|null $temporaryFilename */ public function __construct($entityName, $entityId, $type, $temporaryFilename) { $this->entityName = $entityName; $this->entityId = $entityId; $this->type = $type; $this->setTemporaryFilename($temporaryFilename); } /** * @return \Shopsys\FrameworkBundle\Component\FileUpload\FileForUpload[] */ public function getTemporaryFilesForUpload() { $files = []; if ($this->temporaryFilename !== null) { $files[static::UPLOAD_KEY] = new FileForUpload( $this->temporaryFilename, true, $this->entityName, $this->type . '/' . ImageConfig::ORIGINAL_SIZE_NAME, FileNamingConvention::TYPE_ID ); } return $files; } /** * @param string $key * @param string $originalFilename */ public function setFileAsUploaded($key, $originalFilename) { if ($key === static::UPLOAD_KEY) { $this->extension = pathinfo($originalFilename, PATHINFO_EXTENSION); } else { throw new \Shopsys\FrameworkBundle\Component\FileUpload\Exception\InvalidFileKeyException($key); } } /** * @param string|null $temporaryFilename */ public function setTemporaryFilename($temporaryFilename) { $this->temporaryFilename = $temporaryFilename; // workaround: Entity must be changed so that preUpdate and postUpdate are called $this->modifiedAt = new DateTime(); } /** * @param int $position */ public function setPosition($position) { $this->position = $position; } /** * @return string */ public function getFilename() { return $this->id . '.' . $this->extension; } /** * @return int */ public function getId() { return $this->id; } /** * @return string */ public function getEntityName() { return $this->entityName; } /** * @return int */ public function getEntityId() { return $this->entityId; } /** * @return string|null */ public function getType() { return $this->type; } /** * @return string */ public function getExtension() { return $this->extension; } /** * @return \DateTime */ public function getModifiedAt() { return $this->modifiedAt; } /** * @param string $entityName * @param int $entityId */ public function checkForDelete(string $entityName, int $entityId) { if ($this->entityName !== $entityName || $this->entityId !== $entityId) { throw new \Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException( sprintf( 'Entity %s with ID %s does not own image with ID %s', $entityName, $entityId, $this->id ) ); } } }
1
16,299
just whether we are not missing some doctrine extension because above this attribute is nullable position and it shows int instead of int|null but maybe for lvl2 it is OK
shopsys-shopsys
php
@@ -59,6 +59,13 @@ public class NodeStatus { } } + public boolean hasCapability(Capabilities caps) { + long count = slots.stream() + .filter(slot -> slot.isSupporting(caps)) + .count(); + return count > 0; + } + public boolean hasCapacity() { return slots.stream().anyMatch(slot -> !slot.getSession().isPresent()); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.data; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import org.openqa.selenium.Capabilities; import org.openqa.selenium.internal.Require; import org.openqa.selenium.json.JsonInput; import org.openqa.selenium.json.TypeToken; import java.net.URI; import java.time.Instant; import java.util.Map; import java.util.Objects; import java.util.Set; public class NodeStatus { private final NodeId nodeId; private final URI externalUri; private final int maxSessionCount; private final Set<Slot> slots; private final Availability availability; public NodeStatus( NodeId nodeId, URI externalUri, int maxSessionCount, Set<Slot> slots, Availability availability) { this.nodeId = Require.nonNull("Node id", nodeId); this.externalUri = Require.nonNull("URI", externalUri); this.maxSessionCount = Require.positive("Max session count", maxSessionCount, "Make sure that a driver is available on $PATH"); this.slots = ImmutableSet.copyOf(Require.nonNull("Slots", slots)); this.availability = Require.nonNull("Availability", availability); ImmutableSet.Builder<Session> sessions = ImmutableSet.builder(); for (Slot slot : slots) { slot.getSession().ifPresent(sessions::add); } } public boolean hasCapacity() { return slots.stream().anyMatch(slot -> !slot.getSession().isPresent()); } public boolean hasCapacity(Capabilities caps) { long count = slots.stream() .filter(slot -> !slot.getSession().isPresent()) .filter(slot -> slot.isSupporting(caps)) .count(); return count > 0; } public NodeId getId() { return nodeId; } public URI getUri() { return externalUri; } public int getMaxSessionCount() { return maxSessionCount; } public Set<Slot> getSlots() { return slots; } public Availability getAvailability() { return availability; } public float getLoad() { float inUse = slots.parallelStream() .filter(slot -> slot.getSession().isPresent()) .count(); return (inUse / (float) maxSessionCount) * 100f; } public long getLastSessionCreated() { return slots.parallelStream() .map(Slot::getLastStarted) .mapToLong(Instant::toEpochMilli) .max() .orElse(0); } @Override public boolean equals(Object o) { if (!(o instanceof NodeStatus)) { return false; } NodeStatus that = (NodeStatus) o; return Objects.equals(this.nodeId, that.nodeId) && Objects.equals(this.externalUri, that.externalUri) && this.maxSessionCount == that.maxSessionCount && Objects.equals(this.slots, that.slots) && Objects.equals(this.availability, that.availability); } @Override public int hashCode() { return Objects.hash(nodeId, externalUri, maxSessionCount, slots); } private Map<String, Object> toJson() { return new ImmutableMap.Builder<String, Object>() .put("id", nodeId) .put("uri", externalUri) .put("maxSessions", maxSessionCount) .put("slots", slots) .put("availability", availability) .build(); } public static NodeStatus fromJson(JsonInput input) { NodeId nodeId = null; URI uri = null; int maxSessions = 0; Set<Slot> slots = null; Availability availability = null; input.beginObject(); while (input.hasNext()) { switch (input.nextName()) { case "availability": availability = input.read(Availability.class); break; case "id": nodeId = input.read(NodeId.class); break; case "maxSessions": maxSessions = input.read(Integer.class); break; case "slots": slots = input.read(new TypeToken<Set<Slot>>(){}.getType()); break; case "uri": uri = input.read(URI.class); break; default: input.skipValue(); break; } } input.endObject(); return new NodeStatus( nodeId, uri, maxSessions, slots, availability); } }
1
18,174
Prefer `Stream.anyMatch` instead of iterating over all slots.
SeleniumHQ-selenium
js
@@ -1030,7 +1030,7 @@ bool StatelessValidation::manual_PreCallValidateCreateImage(VkDevice device, con "vkCreateImage(): Tiling is VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT but pNext must have " "either VkImageDrmFormatModifierListCreateInfoEXT or " "VkImageDrmFormatModifierExplicitCreateInfoEXT in the pNext chain"); - } else if (drm_format_mod_list != nullptr) { + } else if (drm_format_mod_explict != nullptr) { image_create_drm_format_modifiers.push_back(drm_format_mod_explict->drmFormatModifier); } else if (drm_format_mod_list != nullptr) { for (uint32_t i = 0; i < drm_format_mod_list->drmFormatModifierCount; i++) {
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (C) 2015-2021 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Mark Lobodzinski <[email protected]> * Author: John Zulauf <[email protected]> */ #include <cmath> #include "chassis.h" #include "stateless_validation.h" #include "layer_chassis_dispatch.h" static const int kMaxParamCheckerStringLength = 256; template <typename T> inline bool in_inclusive_range(const T &value, const T &min, const T &max) { // Using only < for generality and || for early abort return !((value < min) || (max < value)); } read_lock_guard_t StatelessValidation::read_lock() { return read_lock_guard_t(validation_object_mutex, std::defer_lock); } write_lock_guard_t StatelessValidation::write_lock() { return write_lock_guard_t(validation_object_mutex, std::defer_lock); } static std::unordered_map<VkCommandBuffer, VkCommandPool> secondary_cb_map{}; static ReadWriteLock secondary_cb_map_mutex; static read_lock_guard_t cb_read_lock() { return read_lock_guard_t(secondary_cb_map_mutex); } static write_lock_guard_t cb_write_lock() { return write_lock_guard_t(secondary_cb_map_mutex); } bool StatelessValidation::validate_string(const char *apiName, const ParameterName &stringName, const std::string &vuid, const char *validateString) const { bool skip = false; VkStringErrorFlags result = vk_string_validate(kMaxParamCheckerStringLength, validateString); if (result == VK_STRING_ERROR_NONE) { return skip; } else if (result & VK_STRING_ERROR_LENGTH) { skip = LogError(device, vuid, "%s: string %s exceeds max length %d", apiName, stringName.get_name().c_str(), kMaxParamCheckerStringLength); } else if (result & VK_STRING_ERROR_BAD_DATA) { skip = LogError(device, vuid, "%s: string %s contains invalid characters or is badly formed", apiName, stringName.get_name().c_str()); } return skip; } bool StatelessValidation::validate_api_version(uint32_t api_version, uint32_t effective_api_version) const { bool skip = false; uint32_t api_version_nopatch = VK_MAKE_VERSION(VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), 0); if (api_version_nopatch != effective_api_version) { if ((api_version_nopatch < VK_API_VERSION_1_0) && (api_version != 0)) { skip |= LogError(instance, "VUID-VkApplicationInfo-apiVersion-04010", "Invalid CreateInstance->pCreateInfo->pApplicationInfo.apiVersion number (0x%08x). " "Using VK_API_VERSION_%" PRIu32 "_%" PRIu32 ".", api_version, VK_VERSION_MAJOR(effective_api_version), VK_VERSION_MINOR(effective_api_version)); } else { skip |= LogWarning(instance, kVUIDUndefined, "Unrecognized CreateInstance->pCreateInfo->pApplicationInfo.apiVersion number (0x%08x). " "Assuming VK_API_VERSION_%" PRIu32 "_%" PRIu32 ".", api_version, VK_VERSION_MAJOR(effective_api_version), VK_VERSION_MINOR(effective_api_version)); } } return skip; } bool StatelessValidation::validate_instance_extensions(const VkInstanceCreateInfo *pCreateInfo) const { bool skip = false; // Create and use a local instance extension object, as an actual instance has not been created yet uint32_t specified_version = (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0); InstanceExtensions local_instance_extensions; local_instance_extensions.InitFromInstanceCreateInfo(specified_version, pCreateInfo); for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { skip |= validate_extension_reqs(local_instance_extensions, "VUID-vkCreateInstance-ppEnabledExtensionNames-01388", "instance", pCreateInfo->ppEnabledExtensionNames[i]); } return skip; } bool StatelessValidation::SupportedByPdev(const VkPhysicalDevice physical_device, const std::string ext_name) const { if (instance_extensions.vk_khr_get_physical_device_properties_2) { // Struct is legal IF it's supported const auto &dev_exts_enumerated = device_extensions_enumerated.find(physical_device); if (dev_exts_enumerated == device_extensions_enumerated.end()) return true; auto enum_iter = dev_exts_enumerated->second.find(ext_name); if (enum_iter != dev_exts_enumerated->second.cend()) { return true; } } return false; } bool StatelessValidation::validate_validation_features(const VkInstanceCreateInfo *pCreateInfo, const VkValidationFeaturesEXT *validation_features) const { bool skip = false; bool debug_printf = false; bool gpu_assisted = false; bool reserve_slot = false; for (uint32_t i = 0; i < validation_features->enabledValidationFeatureCount; i++) { switch (validation_features->pEnabledValidationFeatures[i]) { case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT: gpu_assisted = true; break; case VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT: debug_printf = true; break; case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT: reserve_slot = true; break; default: break; } } if (reserve_slot && !gpu_assisted) { skip |= LogError(instance, "VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967", "If VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT is in pEnabledValidationFeatures, " "VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT must also be in pEnabledValidationFeatures."); } if (gpu_assisted && debug_printf) { skip |= LogError(instance, "VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968", "If VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT is in pEnabledValidationFeatures, " "VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT must not also be in pEnabledValidationFeatures."); } return skip; } template <typename ExtensionState> ExtEnabled extension_state_by_name(const ExtensionState &extensions, const char *extension_name) { if (!extension_name) return kNotEnabled; // null strings specify nothing auto info = ExtensionState::get_info(extension_name); ExtEnabled state = info.state ? extensions.*(info.state) : kNotEnabled; // unknown extensions can't be enabled in extension struct return state; } bool StatelessValidation::manual_PreCallValidateCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) const { bool skip = false; // Note: From the spec-- // Providing a NULL VkInstanceCreateInfo::pApplicationInfo or providing an apiVersion of 0 is equivalent to providing // an apiVersion of VK_MAKE_VERSION(1, 0, 0). (a.k.a. VK_API_VERSION_1_0) uint32_t local_api_version = (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0; skip |= validate_api_version(local_api_version, api_version); skip |= validate_instance_extensions(pCreateInfo); const auto *validation_features = LvlFindInChain<VkValidationFeaturesEXT>(pCreateInfo->pNext); if (validation_features) skip |= validate_validation_features(pCreateInfo, validation_features); return skip; } void StatelessValidation::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance, VkResult result) { auto instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map); // Copy extension data into local object if (result != VK_SUCCESS) return; this->instance_extensions = instance_data->instance_extensions; } void StatelessValidation::CommonPostCallRecordEnumeratePhysicalDevice(const VkPhysicalDevice *phys_devices, const int count) { // Assume phys_devices is valid assert(phys_devices); for (int i = 0; i < count; ++i) { const auto &phys_device = phys_devices[i]; if (0 == physical_device_properties_map.count(phys_device)) { auto phys_dev_props = new VkPhysicalDeviceProperties; DispatchGetPhysicalDeviceProperties(phys_device, phys_dev_props); physical_device_properties_map[phys_device] = phys_dev_props; // Enumerate the Device Ext Properties to save the PhysicalDevice supported extension state uint32_t ext_count = 0; std::unordered_set<std::string> dev_exts_enumerated{}; std::vector<VkExtensionProperties> ext_props{}; instance_dispatch_table.EnumerateDeviceExtensionProperties(phys_device, nullptr, &ext_count, nullptr); ext_props.resize(ext_count); instance_dispatch_table.EnumerateDeviceExtensionProperties(phys_device, nullptr, &ext_count, ext_props.data()); for (uint32_t j = 0; j < ext_count; j++) { dev_exts_enumerated.insert(ext_props[j].extensionName); } device_extensions_enumerated[phys_device] = std::move(dev_exts_enumerated); } } } void StatelessValidation::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) { return; } if (pPhysicalDeviceCount && pPhysicalDevices) { CommonPostCallRecordEnumeratePhysicalDevice(pPhysicalDevices, *pPhysicalDeviceCount); } } void StatelessValidation::PostCallRecordEnumeratePhysicalDeviceGroups( VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties, VkResult result) { if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) { return; } if (pPhysicalDeviceGroupCount && pPhysicalDeviceGroupProperties) { for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) { const auto &group = pPhysicalDeviceGroupProperties[i]; CommonPostCallRecordEnumeratePhysicalDevice(group.physicalDevices, group.physicalDeviceCount); } } } void StatelessValidation::PreCallRecordDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { for (auto it = physical_device_properties_map.begin(); it != physical_device_properties_map.end();) { delete (it->second); it = physical_device_properties_map.erase(it); } }; void StatelessValidation::PostCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { auto device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); if (result != VK_SUCCESS) return; ValidationObject *validation_data = GetValidationObject(device_data->object_dispatch, LayerObjectTypeParameterValidation); StatelessValidation *stateless_validation = static_cast<StatelessValidation *>(validation_data); // Parmeter validation also uses extension data stateless_validation->device_extensions = this->device_extensions; VkPhysicalDeviceProperties device_properties = {}; // Need to get instance and do a getlayerdata call... DispatchGetPhysicalDeviceProperties(physicalDevice, &device_properties); memcpy(&stateless_validation->device_limits, &device_properties.limits, sizeof(VkPhysicalDeviceLimits)); if (device_extensions.vk_nv_shading_rate_image) { // Get the needed shading rate image limits auto shading_rate_image_props = LvlInitStruct<VkPhysicalDeviceShadingRateImagePropertiesNV>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&shading_rate_image_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props; } if (device_extensions.vk_nv_mesh_shader) { // Get the needed mesh shader limits auto mesh_shader_props = LvlInitStruct<VkPhysicalDeviceMeshShaderPropertiesNV>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&mesh_shader_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.mesh_shader_props = mesh_shader_props; } if (device_extensions.vk_nv_ray_tracing) { // Get the needed ray tracing limits auto ray_tracing_props = LvlInitStruct<VkPhysicalDeviceRayTracingPropertiesNV>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&ray_tracing_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.ray_tracing_propsNV = ray_tracing_props; } if (device_extensions.vk_khr_ray_tracing_pipeline) { // Get the needed ray tracing limits auto ray_tracing_props = LvlInitStruct<VkPhysicalDeviceRayTracingPipelinePropertiesKHR>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&ray_tracing_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.ray_tracing_propsKHR = ray_tracing_props; } if (device_extensions.vk_khr_acceleration_structure) { // Get the needed ray tracing acc structure limits auto acc_structure_props = LvlInitStruct<VkPhysicalDeviceAccelerationStructurePropertiesKHR>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&acc_structure_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.acc_structure_props = acc_structure_props; } if (device_extensions.vk_ext_transform_feedback) { // Get the needed transform feedback limits auto transform_feedback_props = LvlInitStruct<VkPhysicalDeviceTransformFeedbackPropertiesEXT>(); auto prop2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&transform_feedback_props); DispatchGetPhysicalDeviceProperties2KHR(physicalDevice, &prop2); phys_dev_ext_props.transform_feedback_props = transform_feedback_props; } stateless_validation->phys_dev_ext_props = this->phys_dev_ext_props; // Save app-enabled features in this device's validation object // The enabled features can come from either pEnabledFeatures, or from the pNext chain const auto *features2 = LvlFindInChain<VkPhysicalDeviceFeatures2>(pCreateInfo->pNext); safe_VkPhysicalDeviceFeatures2 tmp_features2_state; tmp_features2_state.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; if (features2) { tmp_features2_state.features = features2->features; } else if (pCreateInfo->pEnabledFeatures) { tmp_features2_state.features = *pCreateInfo->pEnabledFeatures; } else { tmp_features2_state.features = {}; } // Use pCreateInfo->pNext to get full chain stateless_validation->device_createinfo_pnext = SafePnextCopy(pCreateInfo->pNext); stateless_validation->physical_device_features2 = tmp_features2_state; } bool StatelessValidation::manual_PreCallValidateCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const { bool skip = false; for (size_t i = 0; i < pCreateInfo->enabledLayerCount; i++) { skip |= validate_string("vkCreateDevice", "pCreateInfo->ppEnabledLayerNames", "VUID-VkDeviceCreateInfo-ppEnabledLayerNames-parameter", pCreateInfo->ppEnabledLayerNames[i]); } // If this device supports VK_KHR_portability_subset, it must be enabled const std::string portability_extension_name("VK_KHR_portability_subset"); const auto &dev_extensions = device_extensions_enumerated.at(physicalDevice); const bool portability_supported = dev_extensions.count(portability_extension_name) != 0; bool portability_requested = false; for (size_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { skip |= validate_string("vkCreateDevice", "pCreateInfo->ppEnabledExtensionNames", "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-parameter", pCreateInfo->ppEnabledExtensionNames[i]); skip |= validate_extension_reqs(device_extensions, "VUID-vkCreateDevice-ppEnabledExtensionNames-01387", "device", pCreateInfo->ppEnabledExtensionNames[i]); if (portability_extension_name == pCreateInfo->ppEnabledExtensionNames[i]) { portability_requested = true; } } if (portability_supported && !portability_requested) { skip |= LogError(physicalDevice, "VUID-VkDeviceCreateInfo-pProperties-04451", "vkCreateDevice: VK_KHR_portability_subset must be enabled because physical device %s supports it", report_data->FormatHandle(physicalDevice).c_str()); } { bool maint1 = IsExtEnabled(extension_state_by_name(device_extensions, VK_KHR_MAINTENANCE1_EXTENSION_NAME)); bool negative_viewport = IsExtEnabled(extension_state_by_name(device_extensions, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)); if (maint1 && negative_viewport) { skip |= LogError(device, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374", "VkDeviceCreateInfo->ppEnabledExtensionNames must not simultaneously include VK_KHR_maintenance1 and " "VK_AMD_negative_viewport_height."); } } { bool khr_bda = IsExtEnabled(extension_state_by_name(device_extensions, VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME)); bool ext_bda = IsExtEnabled(extension_state_by_name(device_extensions, VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME)); if (khr_bda && ext_bda) { skip |= LogError(device, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-03328", "VkDeviceCreateInfo->ppEnabledExtensionNames must not contain both VK_KHR_buffer_device_address and " "VK_EXT_buffer_device_address."); } } if (pCreateInfo->pNext != NULL && pCreateInfo->pEnabledFeatures) { // Check for get_physical_device_properties2 struct const auto *features2 = LvlFindInChain<VkPhysicalDeviceFeatures2>(pCreateInfo->pNext); if (features2) { // Cannot include VkPhysicalDeviceFeatures2 and have non-null pEnabledFeatures skip |= LogError(device, "VUID-VkDeviceCreateInfo-pNext-00373", "VkDeviceCreateInfo->pNext includes a VkPhysicalDeviceFeatures2 struct when " "pCreateInfo->pEnabledFeatures is non-NULL."); } } auto features2 = LvlFindInChain<VkPhysicalDeviceFeatures2>(pCreateInfo->pNext); const VkPhysicalDeviceFeatures *features = features2 ? &features2->features : pCreateInfo->pEnabledFeatures; const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(pCreateInfo->pNext); if (features && robustness2_features && robustness2_features->robustBufferAccess2 && !features->robustBufferAccess) { skip |= LogError(device, "VUID-VkPhysicalDeviceRobustness2FeaturesEXT-robustBufferAccess2-04000", "If robustBufferAccess2 is enabled then robustBufferAccess must be enabled."); } const auto *raytracing_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(pCreateInfo->pNext); if (raytracing_features && raytracing_features->rayTracingPipelineShaderGroupHandleCaptureReplayMixed && !raytracing_features->rayTracingPipelineShaderGroupHandleCaptureReplay) { skip |= LogError( device, "VUID-VkPhysicalDeviceRayTracingPipelineFeaturesKHR-rayTracingPipelineShaderGroupHandleCaptureReplayMixed-03575", "If rayTracingPipelineShaderGroupHandleCaptureReplayMixed is VK_TRUE, rayTracingPipelineShaderGroupHandleCaptureReplay " "must also be VK_TRUE."); } auto vertex_attribute_divisor_features = LvlFindInChain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext); if (vertex_attribute_divisor_features && (!device_extensions.vk_ext_vertex_attribute_divisor)) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "VkDeviceCreateInfo->pNext includes a VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT " "struct, VK_EXT_vertex_attribute_divisor must be enabled when it creates a device."); } const auto *vulkan_11_features = LvlFindInChain<VkPhysicalDeviceVulkan11Features>(pCreateInfo->pNext); if (vulkan_11_features) { const VkBaseOutStructure *current = reinterpret_cast<const VkBaseOutStructure *>(pCreateInfo->pNext); while (current) { if (current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-pNext-02829", "If the pNext chain includes a VkPhysicalDeviceVulkan11Features structure, then it must not include a " "VkPhysicalDevice16BitStorageFeatures, VkPhysicalDeviceMultiviewFeatures, " "VkPhysicalDeviceVariablePointersFeatures, VkPhysicalDeviceProtectedMemoryFeatures, " "VkPhysicalDeviceSamplerYcbcrConversionFeatures, or VkPhysicalDeviceShaderDrawParametersFeatures structure"); break; } current = reinterpret_cast<const VkBaseOutStructure *>(current->pNext); } // Check features are enabled if matching extension is passed in as well for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { const char *extension = pCreateInfo->ppEnabledExtensionNames[i]; if ((0 == strncmp(extension, VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && (vulkan_11_features->shaderDrawParameters == VK_FALSE)) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-04476", "vkCreateDevice(): %s is enabled but VkPhysicalDeviceVulkan11Features::shaderDrawParameters is not VK_TRUE.", VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME); } } } const auto *vulkan_12_features = LvlFindInChain<VkPhysicalDeviceVulkan12Features>(pCreateInfo->pNext); if (vulkan_12_features) { const VkBaseOutStructure *current = reinterpret_cast<const VkBaseOutStructure *>(pCreateInfo->pNext); while (current) { if (current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES || current->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-pNext-02830", "If the pNext chain includes a VkPhysicalDeviceVulkan12Features structure, then it must not include a " "VkPhysicalDevice8BitStorageFeatures, VkPhysicalDeviceShaderAtomicInt64Features, " "VkPhysicalDeviceShaderFloat16Int8Features, VkPhysicalDeviceDescriptorIndexingFeatures, " "VkPhysicalDeviceScalarBlockLayoutFeatures, VkPhysicalDeviceImagelessFramebufferFeatures, " "VkPhysicalDeviceUniformBufferStandardLayoutFeatures, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, " "VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, VkPhysicalDeviceHostQueryResetFeatures, " "VkPhysicalDeviceTimelineSemaphoreFeatures, VkPhysicalDeviceBufferDeviceAddressFeatures, or " "VkPhysicalDeviceVulkanMemoryModelFeatures structure"); break; } current = reinterpret_cast<const VkBaseOutStructure *>(current->pNext); } // Check features are enabled if matching extension is passed in as well for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { const char *extension = pCreateInfo->ppEnabledExtensionNames[i]; if ((0 == strncmp(extension, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && (vulkan_12_features->drawIndirectCount == VK_FALSE)) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02831", "vkCreateDevice(): %s is enabled but VkPhysicalDeviceVulkan12Features::drawIndirectCount is not VK_TRUE.", VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); } if ((0 == strncmp(extension, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && (vulkan_12_features->samplerMirrorClampToEdge == VK_FALSE)) { skip |= LogError(instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02832", "vkCreateDevice(): %s is enabled but VkPhysicalDeviceVulkan12Features::samplerMirrorClampToEdge " "is not VK_TRUE.", VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME); } if ((0 == strncmp(extension, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && (vulkan_12_features->descriptorIndexing == VK_FALSE)) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02833", "vkCreateDevice(): %s is enabled but VkPhysicalDeviceVulkan12Features::descriptorIndexing is not VK_TRUE.", VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } if ((0 == strncmp(extension, VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && (vulkan_12_features->samplerFilterMinmax == VK_FALSE)) { skip |= LogError( instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02834", "vkCreateDevice(): %s is enabled but VkPhysicalDeviceVulkan12Features::samplerFilterMinmax is not VK_TRUE.", VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME); } if ((0 == strncmp(extension, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, VK_MAX_EXTENSION_NAME_SIZE)) && ((vulkan_12_features->shaderOutputViewportIndex == VK_FALSE) || (vulkan_12_features->shaderOutputLayer == VK_FALSE))) { skip |= LogError(instance, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02835", "vkCreateDevice(): %s is enabled but both VkPhysicalDeviceVulkan12Features::shaderOutputViewportIndex " "and VkPhysicalDeviceVulkan12Features::shaderOutputLayer are not VK_TRUE.", VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); } } } // Validate pCreateInfo->pQueueCreateInfos if (pCreateInfo->pQueueCreateInfos) { std::unordered_set<uint32_t> set; for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) { const VkDeviceQueueCreateInfo &queue_create_info = pCreateInfo->pQueueCreateInfos[i]; const uint32_t requested_queue_family = queue_create_info.queueFamilyIndex; if (requested_queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError(physicalDevice, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family " "index value.", i); } else if (set.count(requested_queue_family)) { skip |= LogError(physicalDevice, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex (=%" PRIu32 ") is not unique within pCreateInfo->pQueueCreateInfos array.", i, requested_queue_family); } else { set.insert(requested_queue_family); } if (queue_create_info.pQueuePriorities != nullptr) { for (uint32_t j = 0; j < queue_create_info.queueCount; ++j) { const float queue_priority = queue_create_info.pQueuePriorities[j]; if (!(queue_priority >= 0.f) || !(queue_priority <= 1.f)) { skip |= LogError(physicalDevice, "VUID-VkDeviceQueueCreateInfo-pQueuePriorities-00383", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].pQueuePriorities[%" PRIu32 "] (=%f) is not between 0 and 1 (inclusive).", i, j, queue_priority); } } } // Need to know if protectedMemory feature is passed in preCall to creating the device VkBool32 protected_memory = VK_FALSE; const VkPhysicalDeviceProtectedMemoryFeatures *protected_features = LvlFindInChain<VkPhysicalDeviceProtectedMemoryFeatures>(pCreateInfo->pNext); if (protected_features) { protected_memory = protected_features->protectedMemory; } else if (vulkan_11_features) { protected_memory = vulkan_11_features->protectedMemory; } if ((queue_create_info.flags == VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) && (protected_memory == VK_FALSE)) { skip |= LogError(physicalDevice, "VUID-VkDeviceQueueCreateInfo-flags-02861", "vkCreateDevice: pCreateInfo->flags set to VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT without the " "protectedMemory feature being set as well."); } } } // feature dependencies for VK_KHR_variable_pointers const auto *variable_pointers_features = LvlFindInChain<VkPhysicalDeviceVariablePointersFeatures>(pCreateInfo->pNext); VkBool32 variable_pointers = VK_FALSE; VkBool32 variable_pointers_storage_buffer = VK_FALSE; if (vulkan_11_features) { variable_pointers = vulkan_11_features->variablePointers; variable_pointers_storage_buffer = vulkan_11_features->variablePointersStorageBuffer; } else if (variable_pointers_features) { variable_pointers = variable_pointers_features->variablePointers; variable_pointers_storage_buffer = variable_pointers_features->variablePointersStorageBuffer; } if ((variable_pointers == VK_TRUE) && (variable_pointers_storage_buffer == VK_FALSE)) { skip |= LogError(instance, "VUID-VkPhysicalDeviceVariablePointersFeatures-variablePointers-01431", "If variablePointers is VK_TRUE then variablePointersStorageBuffer also needs to be VK_TRUE"); } // feature dependencies for VK_KHR_multiview const auto *multiview_features = LvlFindInChain<VkPhysicalDeviceMultiviewFeatures>(pCreateInfo->pNext); VkBool32 multiview = VK_FALSE; VkBool32 multiview_geometry_shader = VK_FALSE; VkBool32 multiview_tessellation_shader = VK_FALSE; if (vulkan_11_features) { multiview = vulkan_11_features->multiview; multiview_geometry_shader = vulkan_11_features->multiviewGeometryShader; multiview_tessellation_shader = vulkan_11_features->multiviewTessellationShader; } else if (multiview_features) { multiview = multiview_features->multiview; multiview_geometry_shader = multiview_features->multiviewGeometryShader; multiview_tessellation_shader = multiview_features->multiviewTessellationShader; } if ((multiview == VK_FALSE) && (multiview_geometry_shader == VK_TRUE)) { skip |= LogError(instance, "VUID-VkPhysicalDeviceMultiviewFeatures-multiviewGeometryShader-00580", "If multiviewGeometryShader is VK_TRUE then multiview also needs to be VK_TRUE"); } if ((multiview == VK_FALSE) && (multiview_tessellation_shader == VK_TRUE)) { skip |= LogError(instance, "VUID-VkPhysicalDeviceMultiviewFeatures-multiviewTessellationShader-00581", "If multiviewTessellationShader is VK_TRUE then multiview also needs to be VK_TRUE"); } return skip; } bool StatelessValidation::require_device_extension(bool flag, char const *function_name, char const *extension_name) const { if (!flag) { return LogError(device, kVUID_PVError_ExtensionNotEnabled, "%s() called even though the %s extension was not enabled for this VkDevice.", function_name, extension_name); } return false; } bool StatelessValidation::manual_PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) const { bool skip = false; if (pCreateInfo != nullptr) { skip |= ValidateGreaterThanZero(pCreateInfo->size, "pCreateInfo->size", "VUID-VkBufferCreateInfo-size-00912", "vkCreateBuffer"); // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) { // If sharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1 if (pCreateInfo->queueFamilyIndexCount <= 1) { skip |= LogError(device, "VUID-VkBufferCreateInfo-sharingMode-00914", "vkCreateBuffer: if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->queueFamilyIndexCount must be greater than 1."); } // If sharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of // queueFamilyIndexCount uint32_t values if (pCreateInfo->pQueueFamilyIndices == nullptr) { skip |= LogError(device, "VUID-VkBufferCreateInfo-sharingMode-00913", "vkCreateBuffer: if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->pQueueFamilyIndices must be a pointer to an array of " "pCreateInfo->queueFamilyIndexCount uint32_t values."); } } if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && (!physical_device_features.sparseBinding)) { skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-00915", "vkCreateBuffer(): the sparseBinding device feature is disabled: Buffers cannot be created with the " "VK_BUFFER_CREATE_SPARSE_BINDING_BIT set."); } if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) && (!physical_device_features.sparseResidencyBuffer)) { skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-00916", "vkCreateBuffer(): the sparseResidencyBuffer device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT set."); } if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT) && (!physical_device_features.sparseResidencyAliased)) { skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-00917", "vkCreateBuffer(): the sparseResidencyAliased device feature is disabled: Buffers cannot be created with " "the VK_BUFFER_CREATE_SPARSE_ALIASED_BIT set."); } // If flags contains VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT or VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, it must also contain // VK_BUFFER_CREATE_SPARSE_BINDING_BIT if (((pCreateInfo->flags & (VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT)) != 0) && ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-00918", "vkCreateBuffer: if pCreateInfo->flags contains VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT or " "VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, it must also contain VK_BUFFER_CREATE_SPARSE_BINDING_BIT."); } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) const { bool skip = false; if (pCreateInfo != nullptr) { const VkFormat image_format = pCreateInfo->format; // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) { // If sharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1 if (pCreateInfo->queueFamilyIndexCount <= 1) { skip |= LogError(device, "VUID-VkImageCreateInfo-sharingMode-00942", "vkCreateImage(): if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->queueFamilyIndexCount must be greater than 1."); } // If sharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of // queueFamilyIndexCount uint32_t values if (pCreateInfo->pQueueFamilyIndices == nullptr) { skip |= LogError(device, "VUID-VkImageCreateInfo-sharingMode-00941", "vkCreateImage(): if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->pQueueFamilyIndices must be a pointer to an array of " "pCreateInfo->queueFamilyIndexCount uint32_t values."); } } skip |= ValidateGreaterThanZero(pCreateInfo->extent.width, "pCreateInfo->extent.width", "VUID-VkImageCreateInfo-extent-00944", "vkCreateImage"); skip |= ValidateGreaterThanZero(pCreateInfo->extent.height, "pCreateInfo->extent.height", "VUID-VkImageCreateInfo-extent-00945", "vkCreateImage"); skip |= ValidateGreaterThanZero(pCreateInfo->extent.depth, "pCreateInfo->extent.depth", "VUID-VkImageCreateInfo-extent-00946", "vkCreateImage"); skip |= ValidateGreaterThanZero(pCreateInfo->mipLevels, "pCreateInfo->mipLevels", "VUID-VkImageCreateInfo-mipLevels-00947", "vkCreateImage"); skip |= ValidateGreaterThanZero(pCreateInfo->arrayLayers, "pCreateInfo->arrayLayers", "VUID-VkImageCreateInfo-arrayLayers-00948", "vkCreateImage"); // InitialLayout must be PREINITIALIZED or UNDEFINED if ((pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_UNDEFINED) && (pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_PREINITIALIZED)) { skip |= LogError( device, "VUID-VkImageCreateInfo-initialLayout-00993", "vkCreateImage(): initialLayout is %s, must be VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREINITIALIZED.", string_VkImageLayout(pCreateInfo->initialLayout)); } // If imageType is VK_IMAGE_TYPE_1D, both extent.height and extent.depth must be 1 if ((pCreateInfo->imageType == VK_IMAGE_TYPE_1D) && ((pCreateInfo->extent.height != 1) || (pCreateInfo->extent.depth != 1))) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00956", "vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_1D, both pCreateInfo->extent.height and " "pCreateInfo->extent.depth must be 1."); } if (pCreateInfo->imageType == VK_IMAGE_TYPE_2D) { if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) { if (pCreateInfo->extent.width != pCreateInfo->extent.height) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954", "vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, but " "pCreateInfo->extent.width (=%" PRIu32 ") and pCreateInfo->extent.height (=%" PRIu32 ") are not equal.", pCreateInfo->extent.width, pCreateInfo->extent.height); } if (pCreateInfo->arrayLayers < 6) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954", "vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, but " "pCreateInfo->arrayLayers (=%" PRIu32 ") is not greater than or equal to 6.", pCreateInfo->arrayLayers); } } if (pCreateInfo->extent.depth != 1) { skip |= LogError( device, "VUID-VkImageCreateInfo-imageType-00957", "vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_2D, pCreateInfo->extent.depth must be 1."); } } // 3D image may have only 1 layer if ((pCreateInfo->imageType == VK_IMAGE_TYPE_3D) && (pCreateInfo->arrayLayers != 1)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00961", "vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_3D, pCreateInfo->arrayLayers must be 1."); } if (0 != (pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)) { VkImageUsageFlags legal_flags = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); // At least one of the legal attachment bits must be set if (0 == (pCreateInfo->usage & legal_flags)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00966", "vkCreateImage(): Transient attachment image without a compatible attachment flag set."); } // No flags other than the legal attachment bits may be set legal_flags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; if (0 != (pCreateInfo->usage & ~legal_flags)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00963", "vkCreateImage(): Transient attachment image with incompatible usage flags set."); } } // mipLevels must be less than or equal to the number of levels in the complete mipmap chain uint32_t max_dim = std::max(std::max(pCreateInfo->extent.width, pCreateInfo->extent.height), pCreateInfo->extent.depth); // Max mip levels is different for corner-sampled images vs normal images. uint32_t max_mip_levels = (pCreateInfo->flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) ? static_cast<uint32_t>(ceil(log2(max_dim))) : static_cast<uint32_t>(floor(log2(max_dim)) + 1); if (max_dim > 0 && pCreateInfo->mipLevels > max_mip_levels) { skip |= LogError(device, "VUID-VkImageCreateInfo-mipLevels-00958", "vkCreateImage(): pCreateInfo->mipLevels must be less than or equal to " "floor(log2(max(pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->extent.depth)))+1."); } if ((pCreateInfo->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT) && (pCreateInfo->imageType != VK_IMAGE_TYPE_3D)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00950", "vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT but " "pCreateInfo->imageType is not VK_IMAGE_TYPE_3D."); } if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && (!physical_device_features.sparseBinding)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00969", "vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_BINDING_BIT, but the " "VkPhysicalDeviceFeatures::sparseBinding feature is disabled."); } if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) && (!physical_device_features.sparseResidencyAliased)) { skip |= LogError( device, "VUID-VkImageCreateInfo-flags-01924", "vkCreateImage(): the sparseResidencyAliased device feature is disabled: Images cannot be created with the " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT set."); } // If flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, it must also contain // VK_IMAGE_CREATE_SPARSE_BINDING_BIT if (((pCreateInfo->flags & (VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) != 0) && ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00987", "vkCreateImage: if pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, it must also contain VK_IMAGE_CREATE_SPARSE_BINDING_BIT."); } // Check for combinations of attributes that are incompatible with having VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) != 0) { // Linear tiling is unsupported if (VK_IMAGE_TILING_LINEAR == pCreateInfo->tiling) { skip |= LogError(device, "VUID-VkImageCreateInfo-tiling-04121", "vkCreateImage: if pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image " "tiling of VK_IMAGE_TILING_LINEAR is not supported"); } // Sparse 1D image isn't valid if (VK_IMAGE_TYPE_1D == pCreateInfo->imageType) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00970", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 1D image."); } // Sparse 2D image when device doesn't support it if ((VK_FALSE == physical_device_features.sparseResidencyImage2D) && (VK_IMAGE_TYPE_2D == pCreateInfo->imageType)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00971", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 2D image if corresponding " "feature is not enabled on the device."); } // Sparse 3D image when device doesn't support it if ((VK_FALSE == physical_device_features.sparseResidencyImage3D) && (VK_IMAGE_TYPE_3D == pCreateInfo->imageType)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00972", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 3D image if corresponding " "feature is not enabled on the device."); } // Multi-sample 2D image when device doesn't support it if (VK_IMAGE_TYPE_2D == pCreateInfo->imageType) { if ((VK_FALSE == physical_device_features.sparseResidency2Samples) && (VK_SAMPLE_COUNT_2_BIT == pCreateInfo->samples)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00973", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 2-sample image if " "corresponding feature is not enabled on the device."); } else if ((VK_FALSE == physical_device_features.sparseResidency4Samples) && (VK_SAMPLE_COUNT_4_BIT == pCreateInfo->samples)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00974", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 4-sample image if " "corresponding feature is not enabled on the device."); } else if ((VK_FALSE == physical_device_features.sparseResidency8Samples) && (VK_SAMPLE_COUNT_8_BIT == pCreateInfo->samples)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00975", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 8-sample image if " "corresponding feature is not enabled on the device."); } else if ((VK_FALSE == physical_device_features.sparseResidency16Samples) && (VK_SAMPLE_COUNT_16_BIT == pCreateInfo->samples)) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00976", "vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 16-sample image if " "corresponding feature is not enabled on the device."); } } } if (pCreateInfo->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) { if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) { skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-02082", "vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, " "imageType must be VK_IMAGE_TYPE_2D."); } if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02083", "vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, " "samples must be VK_SAMPLE_COUNT_1_BIT."); } if (pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL) { skip |= LogError(device, "VUID-VkImageCreateInfo-tiling-02084", "vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, " "tiling must be VK_IMAGE_TILING_OPTIMAL."); } } if (pCreateInfo->flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) { if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D && pCreateInfo->imageType != VK_IMAGE_TYPE_3D) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02050", "vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, " "imageType must be VK_IMAGE_TYPE_2D or VK_IMAGE_TYPE_3D."); } if ((pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) || FormatIsDepthOrStencil(image_format)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02051", "vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, " "it must not also contain VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT and format (%s) must not be a " "depth/stencil format.", string_VkFormat(image_format)); } if (pCreateInfo->imageType == VK_IMAGE_TYPE_2D && (pCreateInfo->extent.width == 1 || pCreateInfo->extent.height == 1)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02052", "vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV and " "imageType is VK_IMAGE_TYPE_2D, extent.width and extent.height must be " "greater than 1."); } else if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D && (pCreateInfo->extent.width == 1 || pCreateInfo->extent.height == 1 || pCreateInfo->extent.depth == 1)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02053", "vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV and " "imageType is VK_IMAGE_TYPE_3D, extent.width, extent.height, and extent.depth " "must be greater than 1."); } } if (((pCreateInfo->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) != 0) && (FormatHasDepth(image_format) == false)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01533", "vkCreateImage(): if flags contain VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT the " "format (%s) must be a depth or depth/stencil format.", string_VkFormat(image_format)); } const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(pCreateInfo->pNext); if (image_stencil_struct != nullptr) { if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0) { VkImageUsageFlags legal_flags = (VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); // No flags other than the legal attachment bits may be set legal_flags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; if ((image_stencil_struct->stencilUsage & ~legal_flags) != 0) { skip |= LogError(device, "VUID-VkImageStencilUsageCreateInfo-stencilUsage-02539", "vkCreateImage(): in pNext chain, VkImageStencilUsageCreateInfo::stencilUsage includes " "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, it must not include bits other than " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"); } } if (FormatIsDepthOrStencil(image_format)) { if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) != 0) { if (pCreateInfo->extent.width > device_limits.maxFramebufferWidth) { skip |= LogError( device, "VUID-VkImageCreateInfo-Format-02536", "vkCreateImage(): Depth-stencil image contains VkImageStencilUsageCreateInfo structure with " "stencilUsage including VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT and image width (%u) exceeds device " "maxFramebufferWidth (%u)", pCreateInfo->extent.width, device_limits.maxFramebufferWidth); } if (pCreateInfo->extent.height > device_limits.maxFramebufferHeight) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02537", "vkCreateImage(): Depth-stencil image contains VkImageStencilUsageCreateInfo structure with " "stencilUsage including VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT and image height (%u) exceeds device " "maxFramebufferHeight (%u)", pCreateInfo->extent.height, device_limits.maxFramebufferHeight); } } if (!physical_device_features.shaderStorageImageMultisample && ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_STORAGE_BIT) != 0) && (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT)) { skip |= LogError(device, "VUID-VkImageCreateInfo-format-02538", "vkCreateImage(): Depth-stencil image contains VkImageStencilUsageCreateInfo structure with " "stencilUsage including VK_IMAGE_USAGE_STORAGE_BIT and the multisampled storage images feature is " "not enabled, image samples must be VK_SAMPLE_COUNT_1_BIT"); } if (((pCreateInfo->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0) && ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0)) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02795", "vkCreateImage(): Depth-stencil image in which usage includes VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT " "contains VkImageStencilUsageCreateInfo structure, VkImageStencilUsageCreateInfo::stencilUsage must " "also include VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT"); } else if (((pCreateInfo->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) && ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0)) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02796", "vkCreateImage(): Depth-stencil image in which usage does not include " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT " "contains VkImageStencilUsageCreateInfo structure, VkImageStencilUsageCreateInfo::stencilUsage must " "also not include VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT"); } if (((pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0) && ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) == 0)) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02797", "vkCreateImage(): Depth-stencil image in which usage includes VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT " "contains VkImageStencilUsageCreateInfo structure, VkImageStencilUsageCreateInfo::stencilUsage must " "also include VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT"); } else if (((pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) == 0) && ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0)) { skip |= LogError( device, "VUID-VkImageCreateInfo-format-02798", "vkCreateImage(): Depth-stencil image in which usage does not include " "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT " "contains VkImageStencilUsageCreateInfo structure, VkImageStencilUsageCreateInfo::stencilUsage must " "also not include VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT"); } } } if ((!physical_device_features.shaderStorageImageMultisample) && ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT) != 0) && (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT)) { skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00968", "vkCreateImage(): usage contains VK_IMAGE_USAGE_STORAGE_BIT and the multisampled storage images " "feature is not enabled, image samples must be VK_SAMPLE_COUNT_1_BIT"); } std::vector<uint64_t> image_create_drm_format_modifiers; if (device_extensions.vk_ext_image_drm_format_modifier) { const auto drm_format_mod_list = LvlFindInChain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext); const auto drm_format_mod_explict = LvlFindInChain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext); if (pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { if (((drm_format_mod_list != nullptr) && (drm_format_mod_explict != nullptr)) || ((drm_format_mod_list == nullptr) && (drm_format_mod_explict == nullptr))) { skip |= LogError(device, "VUID-VkImageCreateInfo-tiling-02261", "vkCreateImage(): Tiling is VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT but pNext must have " "either VkImageDrmFormatModifierListCreateInfoEXT or " "VkImageDrmFormatModifierExplicitCreateInfoEXT in the pNext chain"); } else if (drm_format_mod_list != nullptr) { image_create_drm_format_modifiers.push_back(drm_format_mod_explict->drmFormatModifier); } else if (drm_format_mod_list != nullptr) { for (uint32_t i = 0; i < drm_format_mod_list->drmFormatModifierCount; i++) { image_create_drm_format_modifiers.push_back(*drm_format_mod_list->pDrmFormatModifiers); } } } else if ((drm_format_mod_list != nullptr) || (drm_format_mod_explict != nullptr)) { skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02262", "vkCreateImage(): Tiling is not VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT but there is a " "VkImageDrmFormatModifierListCreateInfoEXT or VkImageDrmFormatModifierExplicitCreateInfoEXT " "in the pNext chain"); } } static const uint64_t drm_format_mod_linear = 0; bool image_create_maybe_linear = false; if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) { image_create_maybe_linear = true; } else if (pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL) { image_create_maybe_linear = false; } else if (pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { image_create_maybe_linear = (std::find(image_create_drm_format_modifiers.begin(), image_create_drm_format_modifiers.end(), drm_format_mod_linear) != image_create_drm_format_modifiers.end()); } // If multi-sample, validate type, usage, tiling and mip levels. if ((pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) && ((pCreateInfo->imageType != VK_IMAGE_TYPE_2D) || (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) || (pCreateInfo->mipLevels != 1) || image_create_maybe_linear)) { skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02257", "vkCreateImage(): Multi-sample image with incompatible type, usage, tiling, or mips."); } if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) && ((pCreateInfo->mipLevels != 1) || (pCreateInfo->arrayLayers != 1) || (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) || image_create_maybe_linear)) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02259", "vkCreateImage(): Multi-device image with incompatible type, usage, tiling, or mips."); } if (pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) { if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02557", "vkCreateImage: if usage includes VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, " "imageType must be VK_IMAGE_TYPE_2D."); } if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02558", "vkCreateImage: if usage includes VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, " "samples must be VK_SAMPLE_COUNT_1_BIT."); } } if (pCreateInfo->flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) { if (pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02565", "vkCreateImage: if usage includes VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT, " "tiling must be VK_IMAGE_TILING_OPTIMAL."); } if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02566", "vkCreateImage: if flags includes VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT, " "imageType must be VK_IMAGE_TYPE_2D."); } if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02567", "vkCreateImage: if flags includes VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT, " "flags must not include VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT."); } if (pCreateInfo->mipLevels != 1) { skip |= LogError(device, "VUID-VkImageCreateInfo-flags-02568", "vkCreateImage: if flags includes VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT, mipLevels (%d) must be 1.", pCreateInfo->mipLevels); } } const auto swapchain_create_info = LvlFindInChain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext); if (swapchain_create_info != nullptr) { if (swapchain_create_info->swapchain != VK_NULL_HANDLE) { // All the following fall under the same VU that checks that the swapchain image uses parameters limited by the // table in #swapchain-wsi-image-create-info. Breaking up into multiple checks allows for more useful information // returned why this error occured. Check for matching Swapchain flags is done later in state tracking validation const char *vuid = "VUID-VkImageSwapchainCreateInfoKHR-swapchain-00995"; const char *base_message = "vkCreateImage(): The image used for creating a presentable swapchain image"; if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) { // also implicitly forces the check above that extent.depth is 1 skip |= LogError(device, vuid, "%s must have a imageType value VK_IMAGE_TYPE_2D instead of %s.", base_message, string_VkImageType(pCreateInfo->imageType)); } if (pCreateInfo->mipLevels != 1) { skip |= LogError(device, vuid, "%s must have a mipLevels value of 1 instead of %u.", base_message, pCreateInfo->mipLevels); } if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, vuid, "%s must have a samples value of VK_SAMPLE_COUNT_1_BIT instead of %s.", base_message, string_VkSampleCountFlagBits(pCreateInfo->samples)); } if (pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL) { skip |= LogError(device, vuid, "%s must have a tiling value of VK_IMAGE_TILING_OPTIMAL instead of %s.", base_message, string_VkImageTiling(pCreateInfo->tiling)); } if (pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_UNDEFINED) { skip |= LogError(device, vuid, "%s must have a initialLayout value of VK_IMAGE_LAYOUT_UNDEFINED instead of %s.", base_message, string_VkImageLayout(pCreateInfo->initialLayout)); } const VkImageCreateFlags valid_flags = (VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT | VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT); if ((pCreateInfo->flags & ~valid_flags) != 0) { skip |= LogError(device, vuid, "%s flags are %" PRIu32 "and must only have valid flags set.", base_message, pCreateInfo->flags); } } } // If Chroma subsampled format ( _420_ or _422_ ) if (FormatIsXChromaSubsampled(image_format) && (SafeModulo(pCreateInfo->extent.width, 2) != 0)) { skip |= LogError(device, "VUID-VkImageCreateInfo-format-04712", "vkCreateImage(): The format (%s) is X Chroma Subsampled (has _422 or _420 suffix) so the width (=%" PRIu32 ") must be a multiple of 2.", string_VkFormat(image_format), pCreateInfo->extent.width); } if (FormatIsYChromaSubsampled(image_format) && (SafeModulo(pCreateInfo->extent.height, 2) != 0)) { skip |= LogError(device, "VUID-VkImageCreateInfo-format-04713", "vkCreateImage(): The format (%s) is Y Chroma Subsampled (has _420 suffix) so the height (=%" PRIu32 ") must be a multiple of 2.", string_VkFormat(image_format), pCreateInfo->extent.height); } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView) const { bool skip = false; if (pCreateInfo != nullptr) { // Validate feature set if using CUBE_ARRAY if ((pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) && (physical_device_features.imageCubeArray == false)) { skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-viewType-01004", "vkCreateImageView(): pCreateInfo->viewType can't be VK_IMAGE_VIEW_TYPE_CUBE_ARRAY without " "enabling the imageCubeArray feature."); } if (pCreateInfo->subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) { if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE && pCreateInfo->subresourceRange.layerCount != 6) { skip |= LogError(device, "VUID-VkImageViewCreateInfo-viewType-02960", "vkCreateImageView(): subresourceRange.layerCount (%d) must be 6 or VK_REMAINING_ARRAY_LAYERS.", pCreateInfo->subresourceRange.layerCount); } if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && (pCreateInfo->subresourceRange.layerCount % 6) != 0) { skip |= LogError( device, "VUID-VkImageViewCreateInfo-viewType-02961", "vkCreateImageView(): subresourceRange.layerCount (%d) must be a multiple of 6 or VK_REMAINING_ARRAY_LAYERS.", pCreateInfo->subresourceRange.layerCount); } } auto astc_decode_mode = LvlFindInChain<VkImageViewASTCDecodeModeEXT>(pCreateInfo->pNext); if ((device_extensions.vk_ext_astc_decode_mode) && (astc_decode_mode != nullptr)) { if ((astc_decode_mode->decodeMode != VK_FORMAT_R16G16B16A16_SFLOAT) && (astc_decode_mode->decodeMode != VK_FORMAT_R8G8B8A8_UNORM) && (astc_decode_mode->decodeMode != VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)) { skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-decodeMode-02230", "vkCreateImageView(): VkImageViewASTCDecodeModeEXT::decodeMode must be " "VK_FORMAT_R16G16B16A16_SFLOAT, VK_FORMAT_R8G8B8A8_UNORM, or VK_FORMAT_E5B9G9R9_UFLOAT_PACK32."); } if (FormatIsCompressed_ASTC(pCreateInfo->format) == false) { skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-format-04084", "vkCreateImageView(): is using a VkImageViewASTCDecodeModeEXT but the image view format is %s and " "not an ASTC format.", string_VkFormat(pCreateInfo->format)); } } auto ycbcr_conversion = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext); if (ycbcr_conversion != nullptr) { if (ycbcr_conversion->conversion != VK_NULL_HANDLE) { if (IsIdentitySwizzle(pCreateInfo->components) == false) { skip |= LogError( device, "VUID-VkImageViewCreateInfo-pNext-01970", "vkCreateImageView(): If there is a VkSamplerYcbcrConversion, the imageView must " "be created with the identity swizzle. Here are the actual swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", string_VkComponentSwizzle(pCreateInfo->components.r), string_VkComponentSwizzle(pCreateInfo->components.g), string_VkComponentSwizzle(pCreateInfo->components.b), string_VkComponentSwizzle(pCreateInfo->components.a)); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateViewport(const VkViewport &viewport, const char *fn_name, const ParameterName &parameter_name, VkCommandBuffer object) const { bool skip = false; // Note: for numerical correctness // - float comparisons should expect NaN (comparison always false). // - VkPhysicalDeviceLimits::maxViewportDimensions is uint32_t, not float -> careful. const auto f_lte_u32_exact = [](const float v1_f, const uint32_t v2_u32) { if (std::isnan(v1_f)) return false; if (v1_f <= 0.0f) return true; float intpart; const float fract = modff(v1_f, &intpart); assert(std::numeric_limits<float>::radix == 2); const float u32_max_plus1 = ldexpf(1.0f, 32); // hopefully exact if (intpart >= u32_max_plus1) return false; uint32_t v1_u32 = static_cast<uint32_t>(intpart); if (v1_u32 < v2_u32) { return true; } else if (v1_u32 == v2_u32 && fract == 0.0f) { return true; } else { return false; } }; const auto f_lte_u32_direct = [](const float v1_f, const uint32_t v2_u32) { const float v2_f = static_cast<float>(v2_u32); // not accurate for > radix^digits; and undefined rounding mode return (v1_f <= v2_f); }; // width bool width_healthy = true; const auto max_w = device_limits.maxViewportDimensions[0]; if (!(viewport.width > 0.0f)) { width_healthy = false; skip |= LogError(object, "VUID-VkViewport-width-01770", "%s: %s.width (=%f) is not greater than 0.0.", fn_name, parameter_name.get_name().c_str(), viewport.width); } else if (!(f_lte_u32_exact(viewport.width, max_w) || f_lte_u32_direct(viewport.width, max_w))) { width_healthy = false; skip |= LogError(object, "VUID-VkViewport-width-01771", "%s: %s.width (=%f) exceeds VkPhysicalDeviceLimits::maxViewportDimensions[0] (=%" PRIu32 ").", fn_name, parameter_name.get_name().c_str(), viewport.width, max_w); } // height bool height_healthy = true; const bool negative_height_enabled = device_extensions.vk_khr_maintenance1 || device_extensions.vk_amd_negative_viewport_height; const auto max_h = device_limits.maxViewportDimensions[1]; if (!negative_height_enabled && !(viewport.height > 0.0f)) { height_healthy = false; skip |= LogError(object, "VUID-VkViewport-height-01772", "%s: %s.height (=%f) is not greater 0.0.", fn_name, parameter_name.get_name().c_str(), viewport.height); } else if (!(f_lte_u32_exact(fabsf(viewport.height), max_h) || f_lte_u32_direct(fabsf(viewport.height), max_h))) { height_healthy = false; skip |= LogError(object, "VUID-VkViewport-height-01773", "%s: Absolute value of %s.height (=%f) exceeds VkPhysicalDeviceLimits::maxViewportDimensions[1] (=%" PRIu32 ").", fn_name, parameter_name.get_name().c_str(), viewport.height, max_h); } // x bool x_healthy = true; if (!(viewport.x >= device_limits.viewportBoundsRange[0])) { x_healthy = false; skip |= LogError(object, "VUID-VkViewport-x-01774", "%s: %s.x (=%f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name, parameter_name.get_name().c_str(), viewport.x, device_limits.viewportBoundsRange[0]); } // x + width if (x_healthy && width_healthy) { const float right_bound = viewport.x + viewport.width; if (!(right_bound <= device_limits.viewportBoundsRange[1])) { skip |= LogError( object, "VUID-VkViewport-x-01232", "%s: %s.x + %s.width (=%f + %f = %f) is greater than VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).", fn_name, parameter_name.get_name().c_str(), parameter_name.get_name().c_str(), viewport.x, viewport.width, right_bound, device_limits.viewportBoundsRange[1]); } } // y bool y_healthy = true; if (!(viewport.y >= device_limits.viewportBoundsRange[0])) { y_healthy = false; skip |= LogError(object, "VUID-VkViewport-y-01775", "%s: %s.y (=%f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name, parameter_name.get_name().c_str(), viewport.y, device_limits.viewportBoundsRange[0]); } else if (negative_height_enabled && !(viewport.y <= device_limits.viewportBoundsRange[1])) { y_healthy = false; skip |= LogError(object, "VUID-VkViewport-y-01776", "%s: %s.y (=%f) exceeds VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).", fn_name, parameter_name.get_name().c_str(), viewport.y, device_limits.viewportBoundsRange[1]); } // y + height if (y_healthy && height_healthy) { const float boundary = viewport.y + viewport.height; if (!(boundary <= device_limits.viewportBoundsRange[1])) { skip |= LogError(object, "VUID-VkViewport-y-01233", "%s: %s.y + %s.height (=%f + %f = %f) exceeds VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).", fn_name, parameter_name.get_name().c_str(), parameter_name.get_name().c_str(), viewport.y, viewport.height, boundary, device_limits.viewportBoundsRange[1]); } else if (negative_height_enabled && !(boundary >= device_limits.viewportBoundsRange[0])) { skip |= LogError(object, "VUID-VkViewport-y-01777", "%s: %s.y + %s.height (=%f + %f = %f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name, parameter_name.get_name().c_str(), parameter_name.get_name().c_str(), viewport.y, viewport.height, boundary, device_limits.viewportBoundsRange[0]); } } // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!device_extensions.vk_ext_depth_range_unrestricted) { // minDepth if (!(viewport.minDepth >= 0.0) || !(viewport.minDepth <= 1.0)) { // Also VUID-VkViewport-minDepth-02540 skip |= LogError(object, "VUID-VkViewport-minDepth-01234", "%s: VK_EXT_depth_range_unrestricted extension is not enabled and %s.minDepth (=%f) is not within the " "[0.0, 1.0] range.", fn_name, parameter_name.get_name().c_str(), viewport.minDepth); } // maxDepth if (!(viewport.maxDepth >= 0.0) || !(viewport.maxDepth <= 1.0)) { // Also VUID-VkViewport-maxDepth-02541 skip |= LogError(object, "VUID-VkViewport-maxDepth-01235", "%s: VK_EXT_depth_range_unrestricted extension is not enabled and %s.maxDepth (=%f) is not within the " "[0.0, 1.0] range.", fn_name, parameter_name.get_name().c_str(), viewport.maxDepth); } } return skip; } struct SampleOrderInfo { VkShadingRatePaletteEntryNV shadingRate; uint32_t width; uint32_t height; }; // All palette entries with more than one pixel per fragment static SampleOrderInfo sample_order_infos[] = { {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 1, 2}, {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV, 2, 1}, {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV, 2, 2}, {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV, 4, 2}, {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV, 2, 4}, {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 4}, }; bool StatelessValidation::ValidateCoarseSampleOrderCustomNV(const VkCoarseSampleOrderCustomNV *order) const { bool skip = false; SampleOrderInfo *sample_order_info; uint32_t info_idx = 0; for (sample_order_info = nullptr; info_idx < ARRAY_SIZE(sample_order_infos); ++info_idx) { if (sample_order_infos[info_idx].shadingRate == order->shadingRate) { sample_order_info = &sample_order_infos[info_idx]; break; } } if (sample_order_info == nullptr) { skip |= LogError(device, "VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073", "VkCoarseSampleOrderCustomNV shadingRate must be a shading rate " "that generates fragments with more than one pixel."); return skip; } if (order->sampleCount == 0 || (order->sampleCount & (order->sampleCount - 1)) || !(order->sampleCount & device_limits.framebufferNoAttachmentsSampleCounts)) { skip |= LogError(device, "VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074", "VkCoarseSampleOrderCustomNV sampleCount (=%" PRIu32 ") must " "correspond to a sample count enumerated in VkSampleCountFlags whose corresponding bit " "is set in framebufferNoAttachmentsSampleCounts.", order->sampleCount); } if (order->sampleLocationCount != order->sampleCount * sample_order_info->width * sample_order_info->height) { skip |= LogError(device, "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075", "VkCoarseSampleOrderCustomNV sampleLocationCount (=%" PRIu32 ") must " "be equal to the product of sampleCount (=%" PRIu32 "), the fragment width for shadingRate " "(=%" PRIu32 "), and the fragment height for shadingRate (=%" PRIu32 ").", order->sampleLocationCount, order->sampleCount, sample_order_info->width, sample_order_info->height); } if (order->sampleLocationCount > phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples) { skip |= LogError( device, "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076", "VkCoarseSampleOrderCustomNV sampleLocationCount (=%" PRIu32 ") must " "be less than or equal to VkPhysicalDeviceShadingRateImagePropertiesNV shadingRateMaxCoarseSamples (=%" PRIu32 ").", order->sampleLocationCount, phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples); } // Accumulate a bitmask tracking which (x,y,sample) tuples are seen. Expect // the first width*height*sampleCount bits to all be set. Note: There is no // guarantee that 64 bits is enough, but practically it's unlikely for an // implementation to support more than 32 bits for samplemask. assert(phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples <= 64); uint64_t sample_locations_mask = 0; for (uint32_t i = 0; i < order->sampleLocationCount; ++i) { const VkCoarseSampleLocationNV *sample_loc = &order->pSampleLocations[i]; if (sample_loc->pixelX >= sample_order_info->width) { skip |= LogError(device, "VUID-VkCoarseSampleLocationNV-pixelX-02078", "pixelX must be less than the width (in pixels) of the fragment."); } if (sample_loc->pixelY >= sample_order_info->height) { skip |= LogError(device, "VUID-VkCoarseSampleLocationNV-pixelY-02079", "pixelY must be less than the height (in pixels) of the fragment."); } if (sample_loc->sample >= order->sampleCount) { skip |= LogError(device, "VUID-VkCoarseSampleLocationNV-sample-02080", "sample must be less than the number of coverage samples in each pixel belonging to the fragment."); } uint32_t idx = sample_loc->sample + order->sampleCount * (sample_loc->pixelX + sample_order_info->width * sample_loc->pixelY); sample_locations_mask |= 1ULL << idx; } uint64_t expected_mask = (order->sampleLocationCount == 64) ? ~0ULL : ((1ULL << order->sampleLocationCount) - 1); if (sample_locations_mask != expected_mask) { skip |= LogError( device, "VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077", "The array pSampleLocations must contain exactly one entry for " "every combination of valid values for pixelX, pixelY, and sample in the structure VkCoarseSampleOrderCustomNV."); } return skip; } bool StatelessValidation::manual_PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) const { bool skip = false; if (pCreateInfos != nullptr) { for (uint32_t i = 0; i < createInfoCount; ++i) { bool has_dynamic_viewport = false; bool has_dynamic_scissor = false; bool has_dynamic_line_width = false; bool has_dynamic_depth_bias = false; bool has_dynamic_blend_constant = false; bool has_dynamic_depth_bounds = false; bool has_dynamic_stencil_compare = false; bool has_dynamic_stencil_write = false; bool has_dynamic_stencil_reference = false; bool has_dynamic_viewport_w_scaling_nv = false; bool has_dynamic_discard_rectangle_ext = false; bool has_dynamic_sample_locations_ext = false; bool has_dynamic_exclusive_scissor_nv = false; bool has_dynamic_shading_rate_palette_nv = false; bool has_dynamic_viewport_course_sample_order_nv = false; bool has_dynamic_line_stipple = false; bool has_dynamic_cull_mode = false; bool has_dynamic_front_face = false; bool has_dynamic_primitive_topology = false; bool has_dynamic_viewport_with_count = false; bool has_dynamic_scissor_with_count = false; bool has_dynamic_vertex_input_binding_stride = false; bool has_dynamic_depth_test_enable = false; bool has_dynamic_depth_write_enable = false; bool has_dynamic_depth_compare_op = false; bool has_dynamic_depth_bounds_test_enable = false; bool has_dynamic_stencil_test_enable = false; bool has_dynamic_stencil_op = false; if (pCreateInfos[i].pDynamicState != nullptr) { const auto &dynamic_state_info = *pCreateInfos[i].pDynamicState; for (uint32_t state_index = 0; state_index < dynamic_state_info.dynamicStateCount; ++state_index) { const auto &dynamic_state = dynamic_state_info.pDynamicStates[state_index]; if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT) { if (has_dynamic_viewport == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_viewport = true; } if (dynamic_state == VK_DYNAMIC_STATE_SCISSOR) { if (has_dynamic_scissor == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_SCISSOR was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_scissor = true; } if (dynamic_state == VK_DYNAMIC_STATE_LINE_WIDTH) { if (has_dynamic_line_width == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_LINE_WIDTH was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_line_width = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_BIAS) { if (has_dynamic_depth_bias == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_BIAS was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_bias = true; } if (dynamic_state == VK_DYNAMIC_STATE_BLEND_CONSTANTS) { if (has_dynamic_blend_constant == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_BLEND_CONSTANTS was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_blend_constant = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_BOUNDS) { if (has_dynamic_depth_bounds == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_BOUNDS was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_bounds = true; } if (dynamic_state == VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK) { if (has_dynamic_stencil_compare == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK was listed twice in " "the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_stencil_compare = true; } if (dynamic_state == VK_DYNAMIC_STATE_STENCIL_WRITE_MASK) { if (has_dynamic_stencil_write == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_STENCIL_WRITE_MASK was listed twice in " "the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_stencil_write = true; } if (dynamic_state == VK_DYNAMIC_STATE_STENCIL_REFERENCE) { if (has_dynamic_stencil_reference == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_STENCIL_REFERENCE was listed twice in " "the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_stencil_reference = true; } if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV) { if (has_dynamic_viewport_w_scaling_nv == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV was listed twice " "in the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_viewport_w_scaling_nv = true; } if (dynamic_state == VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT) { if (has_dynamic_discard_rectangle_ext == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT was listed twice " "in the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_discard_rectangle_ext = true; } if (dynamic_state == VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) { if (has_dynamic_sample_locations_ext == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT was listed twice in " "the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_sample_locations_ext = true; } if (dynamic_state == VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV) { if (has_dynamic_exclusive_scissor_nv == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV was listed twice in " "the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_exclusive_scissor_nv = true; } if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV) { if (has_dynamic_shading_rate_palette_nv == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV was " "listed twice in the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_shading_rate_palette_nv = true; } if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV) { if (has_dynamic_viewport_course_sample_order_nv == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV was " "listed twice in the pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_viewport_course_sample_order_nv = true; } if (dynamic_state == VK_DYNAMIC_STATE_LINE_STIPPLE_EXT) { if (has_dynamic_line_stipple == true) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_LINE_STIPPLE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_line_stipple = true; } if (dynamic_state == VK_DYNAMIC_STATE_CULL_MODE_EXT) { if (has_dynamic_cull_mode) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_CULL_MODE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_cull_mode = true; } if (dynamic_state == VK_DYNAMIC_STATE_FRONT_FACE_EXT) { if (has_dynamic_front_face) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_FRONT_FACE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_front_face = true; } if (dynamic_state == VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) { if (has_dynamic_primitive_topology) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_primitive_topology = true; } if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) { if (has_dynamic_viewport_with_count) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_viewport_with_count = true; } if (dynamic_state == VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) { if (has_dynamic_scissor_with_count) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_scissor_with_count = true; } if (dynamic_state == VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) { if (has_dynamic_vertex_input_binding_stride) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT was " "listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_vertex_input_binding_stride = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) { if (has_dynamic_depth_test_enable) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_test_enable = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) { if (has_dynamic_depth_write_enable) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_write_enable = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) { if (has_dynamic_depth_compare_op) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_compare_op = true; } if (dynamic_state == VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) { if (has_dynamic_depth_bounds_test_enable) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_depth_bounds_test_enable = true; } if (dynamic_state == VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) { if (has_dynamic_stencil_test_enable) { skip |= LogError( device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_stencil_test_enable = true; } if (dynamic_state == VK_DYNAMIC_STATE_STENCIL_OP_EXT) { if (has_dynamic_stencil_op) { skip |= LogError(device, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_STENCIL_OP_EXT was listed twice in the " "pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } has_dynamic_stencil_op = true; } if (dynamic_state == VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR) { // Not allowed for graphics pipelines skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03578", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR was listed the " "pCreateInfos[%d].pDynamicState->pDynamicStates[%d] but not allowed in graphic pipelines.", i, state_index); } } } if (has_dynamic_viewport_with_count && has_dynamic_viewport) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04132", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT and " "VK_DYNAMIC_STATE_VIEWPORT both listed in pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } if (has_dynamic_scissor_with_count && has_dynamic_scissor) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04133", "vkCreateGraphicsPipelines: VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT and VK_DYNAMIC_STATE_SCISSOR " "both listed in pCreateInfos[%d].pDynamicState->pDynamicStates array", i); } auto feedback_struct = LvlFindInChain<VkPipelineCreationFeedbackCreateInfoEXT>(pCreateInfos[i].pNext); if ((feedback_struct != nullptr) && (feedback_struct->pipelineStageCreationFeedbackCount != pCreateInfos[i].stageCount)) { skip |= LogError(device, "VUID-VkPipelineCreationFeedbackCreateInfoEXT-pipelineStageCreationFeedbackCount-02668", "vkCreateGraphicsPipelines(): in pCreateInfo[%" PRIu32 "], VkPipelineCreationFeedbackEXT::pipelineStageCreationFeedbackCount" "(=%" PRIu32 ") must equal VkGraphicsPipelineCreateInfo::stageCount(=%" PRIu32 ").", i, feedback_struct->pipelineStageCreationFeedbackCount, pCreateInfos[i].stageCount); } // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml // Collect active stages and other information // Only want to loop through pStages once uint32_t active_shaders = 0; bool has_eval = false; bool has_control = false; if (pCreateInfos[i].pStages != nullptr) { for (uint32_t stage_index = 0; stage_index < pCreateInfos[i].stageCount; ++stage_index) { active_shaders |= pCreateInfos[i].pStages[stage_index].stage; if (pCreateInfos[i].pStages[stage_index].stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) { has_control = true; } else if (pCreateInfos[i].pStages[stage_index].stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) { has_eval = true; } skip |= validate_string( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pStages[%i].pName", ParameterName::IndexVector{i, stage_index}), "VUID-VkGraphicsPipelineCreateInfo-pStages-parameter", pCreateInfos[i].pStages[stage_index].pName); } } if ((active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) && (active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && (pCreateInfos[i].pTessellationState != nullptr)) { skip |= validate_struct_type("vkCreateGraphicsPipelines", "pCreateInfos[i].pTessellationState", "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO", pCreateInfos[i].pTessellationState, VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, false, kVUIDUndefined, "VUID-VkPipelineTessellationStateCreateInfo-sType-sType"); const VkStructureType allowed_structs_vk_pipeline_tessellation_state_create_info[] = { VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO}; skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", "pCreateInfos[i].pTessellationState->pNext", "VkPipelineTessellationDomainOriginStateCreateInfo", pCreateInfos[i].pTessellationState->pNext, ARRAY_SIZE(allowed_structs_vk_pipeline_tessellation_state_create_info), allowed_structs_vk_pipeline_tessellation_state_create_info, GeneratedVulkanHeaderVersion, "VUID-VkPipelineTessellationStateCreateInfo-pNext-pNext", "VUID-VkPipelineTessellationStateCreateInfo-sType-unique"); skip |= validate_reserved_flags("vkCreateGraphicsPipelines", "pCreateInfos[i].pTessellationState->flags", pCreateInfos[i].pTessellationState->flags, "VUID-VkPipelineTessellationStateCreateInfo-flags-zerobitmask"); } if (!(active_shaders & VK_SHADER_STAGE_MESH_BIT_NV) && (pCreateInfos[i].pInputAssemblyState != nullptr)) { skip |= validate_struct_type("vkCreateGraphicsPipelines", "pCreateInfos[i].pInputAssemblyState", "VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO", pCreateInfos[i].pInputAssemblyState, VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, false, kVUIDUndefined, "VUID-VkPipelineInputAssemblyStateCreateInfo-sType-sType"); skip |= validate_struct_pnext("vkCreateGraphicsPipelines", "pCreateInfos[i].pInputAssemblyState->pNext", NULL, pCreateInfos[i].pInputAssemblyState->pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkPipelineInputAssemblyStateCreateInfo-pNext-pNext", nullptr); skip |= validate_reserved_flags("vkCreateGraphicsPipelines", "pCreateInfos[i].pInputAssemblyState->flags", pCreateInfos[i].pInputAssemblyState->flags, "VUID-VkPipelineInputAssemblyStateCreateInfo-flags-zerobitmask"); skip |= validate_ranged_enum("vkCreateGraphicsPipelines", "pCreateInfos[i].pInputAssemblyState->topology", "VkPrimitiveTopology", AllVkPrimitiveTopologyEnums, pCreateInfos[i].pInputAssemblyState->topology, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-parameter"); skip |= validate_bool32("vkCreateGraphicsPipelines", "pCreateInfos[i].pInputAssemblyState->primitiveRestartEnable", pCreateInfos[i].pInputAssemblyState->primitiveRestartEnable); } if (!(active_shaders & VK_SHADER_STAGE_MESH_BIT_NV) && (pCreateInfos[i].pVertexInputState != nullptr)) { auto const &vertex_input_state = pCreateInfos[i].pVertexInputState; if (pCreateInfos[i].pVertexInputState->flags != 0) { skip |= LogError(device, "VUID-VkPipelineVertexInputStateCreateInfo-flags-zerobitmask", "vkCreateGraphicsPipelines: pararameter " "pCreateInfos[%d].pVertexInputState->flags (%u) is reserved and must be zero.", i, vertex_input_state->flags); } const VkStructureType allowed_structs_vk_pipeline_vertex_input_state_create_info[] = { VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT}; skip |= validate_struct_pnext("vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState->pNext", "VkPipelineVertexInputDivisorStateCreateInfoEXT", pCreateInfos[i].pVertexInputState->pNext, 1, allowed_structs_vk_pipeline_vertex_input_state_create_info, GeneratedVulkanHeaderVersion, "VUID-VkPipelineVertexInputStateCreateInfo-pNext-pNext", "VUID-VkPipelineVertexInputStateCreateInfo-sType-unique"); skip |= validate_struct_type("vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState", "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO", vertex_input_state, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, false, kVUIDUndefined, "VUID-VkPipelineVertexInputStateCreateInfo-sType-sType"); skip |= validate_array("vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState->vertexBindingDescriptionCount", "pCreateInfos[i].pVertexInputState->pVertexBindingDescriptions", pCreateInfos[i].pVertexInputState->vertexBindingDescriptionCount, &pCreateInfos[i].pVertexInputState->pVertexBindingDescriptions, false, true, kVUIDUndefined, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-parameter"); skip |= validate_array( "vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState->vertexAttributeDescriptionCount", "pCreateInfos[i]->pVertexAttributeDescriptions", vertex_input_state->vertexAttributeDescriptionCount, &vertex_input_state->pVertexAttributeDescriptions, false, true, kVUIDUndefined, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-parameter"); if (pCreateInfos[i].pVertexInputState->pVertexBindingDescriptions != NULL) { for (uint32_t vertex_binding_description_index = 0; vertex_binding_description_index < pCreateInfos[i].pVertexInputState->vertexBindingDescriptionCount; ++vertex_binding_description_index) { skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState->pVertexBindingDescriptions[j].inputRate", "VkVertexInputRate", AllVkVertexInputRateEnums, pCreateInfos[i] .pVertexInputState->pVertexBindingDescriptions[vertex_binding_description_index] .inputRate, "VUID-VkVertexInputBindingDescription-inputRate-parameter"); } } if (pCreateInfos[i].pVertexInputState->pVertexAttributeDescriptions != NULL) { for (uint32_t vertex_attribute_description_index = 0; vertex_attribute_description_index < pCreateInfos[i].pVertexInputState->vertexAttributeDescriptionCount; ++vertex_attribute_description_index) { skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", "pCreateInfos[i].pVertexInputState->pVertexAttributeDescriptions[i].format", "VkFormat", AllVkFormatEnums, pCreateInfos[i] .pVertexInputState->pVertexAttributeDescriptions[vertex_attribute_description_index] .format, "VUID-VkVertexInputAttributeDescription-format-parameter"); } } if (vertex_input_state->vertexBindingDescriptionCount > device_limits.maxVertexInputBindings) { skip |= LogError(device, "VUID-VkPipelineVertexInputStateCreateInfo-vertexBindingDescriptionCount-00613", "vkCreateGraphicsPipelines: pararameter " "pCreateInfo[%d].pVertexInputState->vertexBindingDescriptionCount (%u) is " "greater than VkPhysicalDeviceLimits::maxVertexInputBindings (%u).", i, vertex_input_state->vertexBindingDescriptionCount, device_limits.maxVertexInputBindings); } if (vertex_input_state->vertexAttributeDescriptionCount > device_limits.maxVertexInputAttributes) { skip |= LogError(device, "VUID-VkPipelineVertexInputStateCreateInfo-vertexAttributeDescriptionCount-00614", "vkCreateGraphicsPipelines: pararameter " "pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptionCount (%u) is " "greater than VkPhysicalDeviceLimits::maxVertexInputAttributes (%u).", i, vertex_input_state->vertexAttributeDescriptionCount, device_limits.maxVertexInputAttributes); } std::unordered_set<uint32_t> vertex_bindings(vertex_input_state->vertexBindingDescriptionCount); for (uint32_t d = 0; d < vertex_input_state->vertexBindingDescriptionCount; ++d) { auto const &vertex_bind_desc = vertex_input_state->pVertexBindingDescriptions[d]; auto const &binding_it = vertex_bindings.find(vertex_bind_desc.binding); if (binding_it != vertex_bindings.cend()) { skip |= LogError(device, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616", "vkCreateGraphicsPipelines: parameter " "pCreateInfo[%d].pVertexInputState->pVertexBindingDescription[%d].binding " "(%" PRIu32 ") is not distinct.", i, d, vertex_bind_desc.binding); } vertex_bindings.insert(vertex_bind_desc.binding); if (vertex_bind_desc.binding >= device_limits.maxVertexInputBindings) { skip |= LogError(device, "VUID-VkVertexInputBindingDescription-binding-00618", "vkCreateGraphicsPipelines: parameter " "pCreateInfos[%u].pVertexInputState->pVertexBindingDescriptions[%u].binding (%u) is " "greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings (%u).", i, d, vertex_bind_desc.binding, device_limits.maxVertexInputBindings); } if (vertex_bind_desc.stride > device_limits.maxVertexInputBindingStride) { skip |= LogError(device, "VUID-VkVertexInputBindingDescription-stride-00619", "vkCreateGraphicsPipelines: parameter " "pCreateInfos[%u].pVertexInputState->pVertexBindingDescriptions[%u].stride (%u) is greater " "than VkPhysicalDeviceLimits::maxVertexInputBindingStride (%u).", i, d, vertex_bind_desc.stride, device_limits.maxVertexInputBindingStride); } } std::unordered_set<uint32_t> attribute_locations(vertex_input_state->vertexAttributeDescriptionCount); for (uint32_t d = 0; d < vertex_input_state->vertexAttributeDescriptionCount; ++d) { auto const &vertex_attrib_desc = vertex_input_state->pVertexAttributeDescriptions[d]; auto const &location_it = attribute_locations.find(vertex_attrib_desc.location); if (location_it != attribute_locations.cend()) { skip |= LogError( device, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-00617", "vkCreateGraphicsPipelines: parameter " "pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].location (%u) is not distinct.", i, d, vertex_attrib_desc.location); } attribute_locations.insert(vertex_attrib_desc.location); auto const &binding_it = vertex_bindings.find(vertex_attrib_desc.binding); if (binding_it == vertex_bindings.cend()) { skip |= LogError( device, "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615", "vkCreateGraphicsPipelines: parameter " " pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].binding (%u) does not exist " "in any pCreateInfo[%d].pVertexInputState->pVertexBindingDescription.", i, d, vertex_attrib_desc.binding, i); } if (vertex_attrib_desc.location >= device_limits.maxVertexInputAttributes) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-location-00620", "vkCreateGraphicsPipelines: parameter " "pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].location (%u) is " "greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes (%u).", i, d, vertex_attrib_desc.location, device_limits.maxVertexInputAttributes); } if (vertex_attrib_desc.binding >= device_limits.maxVertexInputBindings) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-binding-00621", "vkCreateGraphicsPipelines: parameter " "pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].binding (%u) is " "greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings (%u).", i, d, vertex_attrib_desc.binding, device_limits.maxVertexInputBindings); } if (vertex_attrib_desc.offset > device_limits.maxVertexInputAttributeOffset) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-offset-00622", "vkCreateGraphicsPipelines: parameter " "pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].offset (%u) is " "greater than VkPhysicalDeviceLimits::maxVertexInputAttributeOffset (%u).", i, d, vertex_attrib_desc.offset, device_limits.maxVertexInputAttributeOffset); } } } // pTessellationState is ignored without both tessellation control and tessellation evaluation shaders stages if (has_control && has_eval) { if (pCreateInfos[i].pTessellationState == nullptr) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731", "vkCreateGraphicsPipelines: if pCreateInfos[%d].pStages includes a tessellation control " "shader stage and a tessellation evaluation shader stage, " "pCreateInfos[%d].pTessellationState must not be NULL.", i, i); } else { const VkStructureType allowed_type = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO; skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pTessellationState->pNext", ParameterName::IndexVector{i}), "VkPipelineTessellationDomainOriginStateCreateInfo", pCreateInfos[i].pTessellationState->pNext, 1, &allowed_type, GeneratedVulkanHeaderVersion, "VUID-VkGraphicsPipelineCreateInfo-pNext-pNext", "VUID-VkGraphicsPipelineCreateInfo-sType-unique"); skip |= validate_reserved_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pTessellationState->flags", ParameterName::IndexVector{i}), pCreateInfos[i].pTessellationState->flags, "VUID-VkPipelineTessellationStateCreateInfo-flags-zerobitmask"); if (pCreateInfos[i].pTessellationState->patchControlPoints == 0 || pCreateInfos[i].pTessellationState->patchControlPoints > device_limits.maxTessellationPatchSize) { skip |= LogError(device, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214", "vkCreateGraphicsPipelines: invalid parameter " "pCreateInfos[%d].pTessellationState->patchControlPoints value %u. patchControlPoints " "should be >0 and <=%u.", i, pCreateInfos[i].pTessellationState->patchControlPoints, device_limits.maxTessellationPatchSize); } } } // pViewportState, pMultisampleState, pDepthStencilState, and pColorBlendState ignored when rasterization is disabled if ((pCreateInfos[i].pRasterizationState != nullptr) && (pCreateInfos[i].pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { if (pCreateInfos[i].pViewportState == nullptr) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750", "vkCreateGraphicsPipelines: Rasterization is enabled (pCreateInfos[%" PRIu32 "].pRasterizationState->rasterizerDiscardEnable is VK_FALSE), but pCreateInfos[%" PRIu32 "].pViewportState (=NULL) is not a valid pointer.", i, i); } else { const auto &viewport_state = *pCreateInfos[i].pViewportState; if (viewport_state.sType != VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-sType-sType", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->sType is not VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO.", i); } const VkStructureType allowed_structs_vk_pipeline_viewport_state_create_info[] = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV, }; skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pViewportState->pNext", ParameterName::IndexVector{i}), "VkPipelineViewportSwizzleStateCreateInfoNV, VkPipelineViewportWScalingStateCreateInfoNV, " "VkPipelineViewportExclusiveScissorStateCreateInfoNV, VkPipelineViewportShadingRateImageStateCreateInfoNV, " "VkPipelineViewportCoarseSampleOrderStateCreateInfoNV", viewport_state.pNext, ARRAY_SIZE(allowed_structs_vk_pipeline_viewport_state_create_info), allowed_structs_vk_pipeline_viewport_state_create_info, 65, "VUID-VkPipelineViewportStateCreateInfo-pNext-pNext", "VUID-VkPipelineViewportStateCreateInfo-sType-unique"); skip |= validate_reserved_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pViewportState->flags", ParameterName::IndexVector{i}), viewport_state.flags, "VUID-VkPipelineViewportStateCreateInfo-flags-zerobitmask"); auto exclusive_scissor_struct = LvlFindInChain<VkPipelineViewportExclusiveScissorStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext); auto shading_rate_image_struct = LvlFindInChain<VkPipelineViewportShadingRateImageStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext); auto coarse_sample_order_struct = LvlFindInChain<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext); const auto vp_swizzle_struct = LvlFindInChain<VkPipelineViewportSwizzleStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext); const auto vp_w_scaling_struct = LvlFindInChain<VkPipelineViewportWScalingStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext); if (!physical_device_features.multiViewport) { if (!has_dynamic_viewport_with_count && (viewport_state.viewportCount != 1)) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is " "disabled, but pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ") is not 1.", i, viewport_state.viewportCount); } if (!has_dynamic_scissor_with_count && (viewport_state.scissorCount != 1)) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is " "disabled, but pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32 ") is not 1.", i, viewport_state.scissorCount); } if (exclusive_scissor_struct && (exclusive_scissor_struct->exclusiveScissorCount != 0 && exclusive_scissor_struct->exclusiveScissorCount != 1)) { skip |= LogError( device, "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027", "vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is " "disabled, but pCreateInfos[%" PRIu32 "] VkPipelineViewportExclusiveScissorStateCreateInfoNV::exclusiveScissorCount (=%" PRIu32 ") is not 1.", i, exclusive_scissor_struct->exclusiveScissorCount); } if (shading_rate_image_struct && (shading_rate_image_struct->viewportCount != 0 && shading_rate_image_struct->viewportCount != 1)) { skip |= LogError(device, "VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054", "vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is " "disabled, but pCreateInfos[%" PRIu32 "] VkPipelineViewportShadingRateImageStateCreateInfoNV::viewportCount (=%" PRIu32 ") is neither 0 nor 1.", i, shading_rate_image_struct->viewportCount); } } else { // multiViewport enabled if (viewport_state.viewportCount == 0) { if (!has_dynamic_viewport_with_count) { skip |= LogError( device, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->viewportCount is 0.", i); } } else if (viewport_state.viewportCount > device_limits.maxViewports) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", i, viewport_state.viewportCount, device_limits.maxViewports); } else if (has_dynamic_viewport_with_count) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03379", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ") must be zero when VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT is used.", i, viewport_state.viewportCount); } if (viewport_state.scissorCount == 0) { if (!has_dynamic_scissor_with_count) { skip |= LogError( device, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount is 0.", i); } } else if (viewport_state.scissorCount > device_limits.maxViewports) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", i, viewport_state.scissorCount, device_limits.maxViewports); } else if (has_dynamic_scissor_with_count) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03380", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32 ") must be zero when VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT is used.", i, viewport_state.viewportCount); } } if (exclusive_scissor_struct && exclusive_scissor_struct->exclusiveScissorCount > device_limits.maxViewports) { skip |= LogError(device, "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02028", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] exclusiveScissorCount (=%" PRIu32 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", i, exclusive_scissor_struct->exclusiveScissorCount, device_limits.maxViewports); } if (shading_rate_image_struct && shading_rate_image_struct->viewportCount > device_limits.maxViewports) { skip |= LogError(device, "VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02055", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] VkPipelineViewportShadingRateImageStateCreateInfoNV viewportCount (=%" PRIu32 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", i, shading_rate_image_struct->viewportCount, device_limits.maxViewports); } if (viewport_state.scissorCount != viewport_state.viewportCount && !(has_dynamic_viewport_with_count || has_dynamic_scissor_with_count)) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32 ") is not identical to pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ").", i, viewport_state.scissorCount, i, viewport_state.viewportCount); } if (exclusive_scissor_struct && exclusive_scissor_struct->exclusiveScissorCount != 0 && exclusive_scissor_struct->exclusiveScissorCount != viewport_state.viewportCount) { skip |= LogError(device, "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] exclusiveScissorCount (=%" PRIu32 ") must be zero or identical to pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ").", i, exclusive_scissor_struct->exclusiveScissorCount, i, viewport_state.viewportCount); } if (shading_rate_image_struct && shading_rate_image_struct->shadingRateImageEnable && shading_rate_image_struct->viewportCount != viewport_state.viewportCount) { skip |= LogError( device, "VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056", "vkCreateGraphicsPipelines: If shadingRateImageEnable is enabled, pCreateInfos[%" PRIu32 "] " "VkPipelineViewportShadingRateImageStateCreateInfoNV viewportCount (=%" PRIu32 ") must identical to pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ").", i, shading_rate_image_struct->viewportCount, i, viewport_state.viewportCount); } if (!has_dynamic_viewport && viewport_state.viewportCount > 0 && viewport_state.pViewports == nullptr) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "vkCreateGraphicsPipelines: The viewport state is static (pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_VIEWPORT), but pCreateInfos[%" PRIu32 "].pViewportState->pViewports (=NULL) is an invalid pointer.", i, i); } if (!has_dynamic_scissor && viewport_state.scissorCount > 0 && viewport_state.pScissors == nullptr) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748", "vkCreateGraphicsPipelines: The scissor state is static (pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_SCISSOR), but pCreateInfos[%" PRIu32 "].pViewportState->pScissors (=NULL) is an invalid pointer.", i, i); } if (!has_dynamic_exclusive_scissor_nv && exclusive_scissor_struct && exclusive_scissor_struct->exclusiveScissorCount > 0 && exclusive_scissor_struct->pExclusiveScissors == nullptr) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04056", "vkCreateGraphicsPipelines: The exclusive scissor state is static (pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV), but " "pCreateInfos[%" PRIu32 "] pExclusiveScissors (=NULL) is an invalid pointer.", i, i); } if (!has_dynamic_shading_rate_palette_nv && shading_rate_image_struct && shading_rate_image_struct->viewportCount > 0 && shading_rate_image_struct->pShadingRatePalettes == nullptr) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04057", "vkCreateGraphicsPipelines: The shading rate palette state is static (pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV), " "but pCreateInfos[%" PRIu32 "] pShadingRatePalettes (=NULL) is an invalid pointer.", i, i); } if (vp_swizzle_struct) { if (vp_swizzle_struct->viewportCount != viewport_state.viewportCount) { skip |= LogError(device, "VUID-VkPipelineViewportSwizzleStateCreateInfoNV-viewportCount-01215", "vkCreateGraphicsPipelines: The viewport swizzle state vieport count of %" PRIu32 " does " "not match the viewport count of %" PRIu32 " in VkPipelineViewportStateCreateInfo.", vp_swizzle_struct->viewportCount, viewport_state.viewportCount); } } // validate the VkViewports if (!has_dynamic_viewport && viewport_state.pViewports) { for (uint32_t viewport_i = 0; viewport_i < viewport_state.viewportCount; ++viewport_i) { const auto &viewport = viewport_state.pViewports[viewport_i]; // will crash on invalid ptr const char *fn_name = "vkCreateGraphicsPipelines"; skip |= manual_PreCallValidateViewport(viewport, fn_name, ParameterName("pCreateInfos[%i].pViewportState->pViewports[%i]", ParameterName::IndexVector{i, viewport_i}), VkCommandBuffer(0)); } } if (has_dynamic_viewport_w_scaling_nv && !device_extensions.vk_nv_clip_space_w_scaling) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but " "VK_NV_clip_space_w_scaling extension is not enabled.", i); } if (has_dynamic_discard_rectangle_ext && !device_extensions.vk_ext_discard_rectangles) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but " "VK_EXT_discard_rectangles extension is not enabled.", i); } if (has_dynamic_sample_locations_ext && !device_extensions.vk_ext_sample_locations) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but " "VK_EXT_sample_locations extension is not enabled.", i); } if (has_dynamic_exclusive_scissor_nv && !device_extensions.vk_nv_scissor_exclusive) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV, but " "VK_NV_scissor_exclusive extension is not enabled.", i); } if (coarse_sample_order_struct && coarse_sample_order_struct->sampleOrderType != VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV && coarse_sample_order_struct->customSampleOrderCount != 0) { skip |= LogError(device, "VUID-VkPipelineViewportCoarseSampleOrderStateCreateInfoNV-sampleOrderType-02072", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] " "VkPipelineViewportCoarseSampleOrderStateCreateInfoNV sampleOrderType is not " "VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV and customSampleOrderCount is not 0.", i); } if (coarse_sample_order_struct) { for (uint32_t order_i = 0; order_i < coarse_sample_order_struct->customSampleOrderCount; ++order_i) { skip |= ValidateCoarseSampleOrderCustomNV(&coarse_sample_order_struct->pCustomSampleOrders[order_i]); } } if (vp_w_scaling_struct && (vp_w_scaling_struct->viewportWScalingEnable == VK_TRUE)) { if (vp_w_scaling_struct->viewportCount != viewport_state.viewportCount) { skip |= LogError(device, "VUID-VkPipelineViewportStateCreateInfo-viewportWScalingEnable-01726", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] " "VkPipelineViewportWScalingStateCreateInfoNV.viewportCount (=%" PRIu32 ") " "is not equal to VkPipelineViewportStateCreateInfo.viewportCount (=%" PRIu32 ").", i, vp_w_scaling_struct->viewportCount, viewport_state.viewportCount); } if (!has_dynamic_viewport_w_scaling_nv && !vp_w_scaling_struct->pViewportWScalings) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01715", "vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] " "VkPipelineViewportWScalingStateCreateInfoNV.pViewportWScalings (=NULL) is not a valid array.", i); } } } if (pCreateInfos[i].pMultisampleState == nullptr) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751", "vkCreateGraphicsPipelines: if pCreateInfos[%d].pRasterizationState->rasterizerDiscardEnable " "is VK_FALSE, pCreateInfos[%d].pMultisampleState must not be NULL.", i, i); } else { const VkStructureType valid_next_stypes[] = {LvlTypeMap<VkPipelineCoverageModulationStateCreateInfoNV>::kSType, LvlTypeMap<VkPipelineCoverageReductionStateCreateInfoNV>::kSType, LvlTypeMap<VkPipelineCoverageToColorStateCreateInfoNV>::kSType, LvlTypeMap<VkPipelineSampleLocationsStateCreateInfoEXT>::kSType}; const char *valid_struct_names = "VkPipelineCoverageModulationStateCreateInfoNV, VkPipelineCoverageToColorStateCreateInfoNV, " "VkPipelineSampleLocationsStateCreateInfoEXT"; skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->pNext", ParameterName::IndexVector{i}), valid_struct_names, pCreateInfos[i].pMultisampleState->pNext, 4, valid_next_stypes, GeneratedVulkanHeaderVersion, "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext", "VUID-VkPipelineMultisampleStateCreateInfo-sType-unique"); skip |= validate_reserved_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->flags", ParameterName::IndexVector{i}), pCreateInfos[i].pMultisampleState->flags, "VUID-VkPipelineMultisampleStateCreateInfo-flags-zerobitmask"); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->sampleShadingEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pMultisampleState->sampleShadingEnable); skip |= validate_array( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->rasterizationSamples", ParameterName::IndexVector{i}), ParameterName("pCreateInfos[%i].pMultisampleState->pSampleMask", ParameterName::IndexVector{i}), pCreateInfos[i].pMultisampleState->rasterizationSamples, &pCreateInfos[i].pMultisampleState->pSampleMask, true, false, kVUIDUndefined, kVUIDUndefined); skip |= validate_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->rasterizationSamples", ParameterName::IndexVector{i}), "VkSampleCountFlagBits", AllVkSampleCountFlagBits, pCreateInfos[i].pMultisampleState->rasterizationSamples, kRequiredSingleBit, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-parameter"); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->alphaToCoverageEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pMultisampleState->alphaToCoverageEnable); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pMultisampleState->alphaToOneEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pMultisampleState->alphaToOneEnable); if (pCreateInfos[i].pMultisampleState->sType != VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-sType-sType", "vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pMultisampleState->sType must be " "VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO", i); } if (pCreateInfos[i].pMultisampleState->sampleShadingEnable == VK_TRUE) { if (!physical_device_features.sampleRateShading) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784", "vkCreateGraphicsPipelines(): parameter " "pCreateInfos[%d].pMultisampleState->sampleShadingEnable.", i); } // TODO Add documentation issue about when minSampleShading must be in range and when it is ignored // For now a "least noise" test *only* when sampleShadingEnable is VK_TRUE. if (!in_inclusive_range(pCreateInfos[i].pMultisampleState->minSampleShading, 0.F, 1.0F)) { skip |= LogError( device, "VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", "vkCreateGraphicsPipelines(): parameter pCreateInfos[%d].pMultisampleState->minSampleShading.", i); } } const auto *line_state = LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(pCreateInfos[i].pRasterizationState->pNext); if (line_state) { if ((line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT || line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT)) { if (pCreateInfos[i].pMultisampleState->alphaToCoverageEnable) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-lineRasterizationMode-02766", "vkCreateGraphicsPipelines(): Bresenham/Smooth line rasterization not supported with " "pCreateInfos[%d].pMultisampleState->alphaToCoverageEnable == VK_TRUE.", i); } if (pCreateInfos[i].pMultisampleState->alphaToOneEnable) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-lineRasterizationMode-02766", "vkCreateGraphicsPipelines(): Bresenham/Smooth line rasterization not supported with " "pCreateInfos[%d].pMultisampleState->alphaToOneEnable == VK_TRUE.", i); } if (pCreateInfos[i].pMultisampleState->sampleShadingEnable) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-lineRasterizationMode-02766", "vkCreateGraphicsPipelines(): Bresenham/Smooth line rasterization not supported with " "pCreateInfos[%d].pMultisampleState->sampleShadingEnable == VK_TRUE.", i); } } if (line_state->stippledLineEnable && !has_dynamic_line_stipple) { if (line_state->lineStippleFactor < 1 || line_state->lineStippleFactor > 256) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stippledLineEnable-02767", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineStippleFactor = %d must be in the " "range [1,256].", i, line_state->lineStippleFactor); } } const auto *line_features = LvlFindInChain<VkPhysicalDeviceLineRasterizationFeaturesEXT>(device_createinfo_pnext); if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT && (!line_features || !line_features->rectangularLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02768", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT requires the rectangularLines feature.", i); } if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT && (!line_features || !line_features->bresenhamLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02769", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT requires the bresenhamLines feature.", i); } if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT && (!line_features || !line_features->smoothLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02770", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT requires the smoothLines feature.", i); } if (line_state->stippledLineEnable) { if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT && (!line_features || !line_features->stippledRectangularLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02771", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT with stipple requires the " "stippledRectangularLines feature.", i); } if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT && (!line_features || !line_features->stippledBresenhamLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02772", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT with stipple requires the " "stippledBresenhamLines feature.", i); } if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT && (!line_features || !line_features->stippledSmoothLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02773", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT with stipple requires the " "stippledSmoothLines feature.", i); } if (line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT && (!line_features || !line_features->stippledSmoothLines || !device_limits.strictLines)) { skip |= LogError(device, "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02774", "vkCreateGraphicsPipelines(): pCreateInfos[%d] lineRasterizationMode = " "VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT with stipple requires the " "stippledRectangularLines and strictLines features.", i); } } } } bool uses_color_attachment = false; bool uses_depthstencil_attachment = false; { std::unique_lock<std::mutex> lock(renderpass_map_mutex); const auto subpasses_uses_it = renderpasses_states.find(pCreateInfos[i].renderPass); if (subpasses_uses_it != renderpasses_states.end()) { const auto &subpasses_uses = subpasses_uses_it->second; if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[i].subpass)) { uses_color_attachment = true; } if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[i].subpass)) { uses_depthstencil_attachment = true; } } lock.unlock(); } if (pCreateInfos[i].pDepthStencilState != nullptr && uses_depthstencil_attachment) { skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->pNext", ParameterName::IndexVector{i}), NULL, pCreateInfos[i].pDepthStencilState->pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkPipelineDepthStencilStateCreateInfo-pNext-pNext", nullptr); skip |= validate_reserved_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->flags", ParameterName::IndexVector{i}), pCreateInfos[i].pDepthStencilState->flags, "VUID-VkPipelineDepthStencilStateCreateInfo-flags-zerobitmask"); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->depthTestEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pDepthStencilState->depthTestEnable); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->depthWriteEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pDepthStencilState->depthWriteEnable); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->depthCompareOp", ParameterName::IndexVector{i}), "VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->depthCompareOp, "VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter"); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->depthBoundsTestEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pDepthStencilState->depthBoundsTestEnable); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->stencilTestEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pDepthStencilState->stencilTestEnable); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->front.failOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.failOp, "VUID-VkStencilOpState-failOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->front.passOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.passOp, "VUID-VkStencilOpState-passOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->front.depthFailOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.depthFailOp, "VUID-VkStencilOpState-depthFailOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->front.compareOp", ParameterName::IndexVector{i}), "VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->front.compareOp, "VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->back.failOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.failOp, "VUID-VkStencilOpState-failOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->back.passOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.passOp, "VUID-VkStencilOpState-passOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->back.depthFailOp", ParameterName::IndexVector{i}), "VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.depthFailOp, "VUID-VkStencilOpState-depthFailOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pDepthStencilState->back.compareOp", ParameterName::IndexVector{i}), "VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->back.compareOp, "VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter"); if (pCreateInfos[i].pDepthStencilState->sType != VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-sType-sType", "vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pDepthStencilState->sType must be " "VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO", i); } } const VkStructureType allowed_structs_vk_pipeline_color_blend_state_create_info[] = { VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT}; if (pCreateInfos[i].pColorBlendState != nullptr && uses_color_attachment) { skip |= validate_struct_type("vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState", ParameterName::IndexVector{i}), "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO", pCreateInfos[i].pColorBlendState, VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, false, kVUIDUndefined, "VUID-VkPipelineColorBlendStateCreateInfo-sType-sType"); skip |= validate_struct_pnext( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pNext", ParameterName::IndexVector{i}), "VkPipelineColorBlendAdvancedStateCreateInfoEXT", pCreateInfos[i].pColorBlendState->pNext, ARRAY_SIZE(allowed_structs_vk_pipeline_color_blend_state_create_info), allowed_structs_vk_pipeline_color_blend_state_create_info, GeneratedVulkanHeaderVersion, "VUID-VkPipelineColorBlendStateCreateInfo-pNext-pNext", "VUID-VkPipelineColorBlendStateCreateInfo-sType-unique"); skip |= validate_reserved_flags( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->flags", ParameterName::IndexVector{i}), pCreateInfos[i].pColorBlendState->flags, "VUID-VkPipelineColorBlendStateCreateInfo-flags-zerobitmask"); skip |= validate_bool32( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->logicOpEnable", ParameterName::IndexVector{i}), pCreateInfos[i].pColorBlendState->logicOpEnable); skip |= validate_array( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->attachmentCount", ParameterName::IndexVector{i}), ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments", ParameterName::IndexVector{i}), pCreateInfos[i].pColorBlendState->attachmentCount, &pCreateInfos[i].pColorBlendState->pAttachments, false, true, kVUIDUndefined, kVUIDUndefined); if (pCreateInfos[i].pColorBlendState->pAttachments != NULL) { for (uint32_t attachment_index = 0; attachment_index < pCreateInfos[i].pColorBlendState->attachmentCount; ++attachment_index) { skip |= validate_bool32("vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].blendEnable", ParameterName::IndexVector{i, attachment_index}), pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].blendEnable); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].srcColorBlendFactor", ParameterName::IndexVector{i, attachment_index}), "VkBlendFactor", AllVkBlendFactorEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].srcColorBlendFactor, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].dstColorBlendFactor", ParameterName::IndexVector{i, attachment_index}), "VkBlendFactor", AllVkBlendFactorEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].dstColorBlendFactor, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].colorBlendOp", ParameterName::IndexVector{i, attachment_index}), "VkBlendOp", AllVkBlendOpEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].colorBlendOp, "VUID-VkPipelineColorBlendAttachmentState-colorBlendOp-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].srcAlphaBlendFactor", ParameterName::IndexVector{i, attachment_index}), "VkBlendFactor", AllVkBlendFactorEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].srcAlphaBlendFactor, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].dstAlphaBlendFactor", ParameterName::IndexVector{i, attachment_index}), "VkBlendFactor", AllVkBlendFactorEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].dstAlphaBlendFactor, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-parameter"); skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].alphaBlendOp", ParameterName::IndexVector{i, attachment_index}), "VkBlendOp", AllVkBlendOpEnums, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].alphaBlendOp, "VUID-VkPipelineColorBlendAttachmentState-alphaBlendOp-parameter"); skip |= validate_flags("vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].colorWriteMask", ParameterName::IndexVector{i, attachment_index}), "VkColorComponentFlagBits", AllVkColorComponentFlagBits, pCreateInfos[i].pColorBlendState->pAttachments[attachment_index].colorWriteMask, kOptionalFlags, "VUID-VkPipelineColorBlendAttachmentState-colorWriteMask-parameter"); } } if (pCreateInfos[i].pColorBlendState->sType != VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO) { skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-sType-sType", "vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pColorBlendState->sType must be " "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO", i); } // If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value if (pCreateInfos[i].pColorBlendState->logicOpEnable == VK_TRUE) { skip |= validate_ranged_enum( "vkCreateGraphicsPipelines", ParameterName("pCreateInfos[%i].pColorBlendState->logicOp", ParameterName::IndexVector{i}), "VkLogicOp", AllVkLogicOpEnums, pCreateInfos[i].pColorBlendState->logicOp, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607"); } } } const VkPipelineCreateFlags flags = pCreateInfos[i].flags; if (flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { if (pCreateInfos[i].basePipelineIndex != -1) { if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-00724", "vkCreateGraphicsPipelines parameter, pCreateInfos[%u]->basePipelineHandle, must be " "VK_NULL_HANDLE if pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag " "and pCreateInfos->basePipelineIndex is not -1.", i); } } if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) { if (pCreateInfos[i].basePipelineIndex != -1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-00725", "vkCreateGraphicsPipelines parameter, pCreateInfos[%u]->basePipelineIndex, must be -1 if " "pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and " "pCreateInfos->basePipelineHandle is not VK_NULL_HANDLE.", i); } } else { if (static_cast<uint32_t>(pCreateInfos[i].basePipelineIndex) >= createInfoCount) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-00723", "vkCreateGraphicsPipelines parameter pCreateInfos[%u]->basePipelineIndex (%d) must be a valid" "index into the pCreateInfos array, of size %d.", i, pCreateInfos[i].basePipelineIndex, createInfoCount); } } } if (pCreateInfos[i].pRasterizationState) { if (!device_extensions.vk_nv_fill_rectangle) { if (pCreateInfos[i].pRasterizationState->polygonMode == VK_POLYGON_MODE_FILL_RECTANGLE_NV) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01414", "vkCreateGraphicsPipelines parameter, VkPolygonMode " "pCreateInfos->pRasterizationState->polygonMode cannot be VK_POLYGON_MODE_FILL_RECTANGLE_NV " "if the extension VK_NV_fill_rectangle is not enabled."); } else if ((pCreateInfos[i].pRasterizationState->polygonMode != VK_POLYGON_MODE_FILL) && (physical_device_features.fillModeNonSolid == false)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01413", "vkCreateGraphicsPipelines parameter, VkPolygonMode " "pCreateInfos[%u]->pRasterizationState->polygonMode cannot be VK_POLYGON_MODE_POINT or " "VK_POLYGON_MODE_LINE if VkPhysicalDeviceFeatures->fillModeNonSolid is false.", i); } } else { if ((pCreateInfos[i].pRasterizationState->polygonMode != VK_POLYGON_MODE_FILL) && (pCreateInfos[i].pRasterizationState->polygonMode != VK_POLYGON_MODE_FILL_RECTANGLE_NV) && (physical_device_features.fillModeNonSolid == false)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01507", "vkCreateGraphicsPipelines parameter, VkPolygonMode " "pCreateInfos[%u]->pRasterizationState->polygonMode must be VK_POLYGON_MODE_FILL or " "VK_POLYGON_MODE_FILL_RECTANGLE_NV if VkPhysicalDeviceFeatures->fillModeNonSolid is false.", i); } } if (!has_dynamic_line_width && !physical_device_features.wideLines && (pCreateInfos[i].pRasterizationState->lineWidth != 1.0f)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749", "The line width state is static (pCreateInfos[%" PRIu32 "].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_LINE_WIDTH) and " "VkPhysicalDeviceFeatures::wideLines is disabled, but pCreateInfos[%" PRIu32 "].pRasterizationState->lineWidth (=%f) is not 1.0.", i, i, pCreateInfos[i].pRasterizationState->lineWidth); } } // Validate no flags not allowed are used if ((flags & VK_PIPELINE_CREATE_DISPATCH_BASE) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-flags-00764", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include VK_PIPELINE_CREATE_DISPATCH_BASE", i); } if ((flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-flags-03371", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include VK_PIPELINE_CREATE_LIBRARY_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03372", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03373", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03374", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03375", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03376", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03377", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR", i); } if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR) != 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-flags-03577", "vkCreateGraphicsPipelines(): pCreateInfos[%u]->flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR", i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) const { bool skip = false; for (uint32_t i = 0; i < createInfoCount; i++) { skip |= validate_string("vkCreateComputePipelines", ParameterName("pCreateInfos[%i].stage.pName", ParameterName::IndexVector{i}), "VUID-VkPipelineShaderStageCreateInfo-pName-parameter", pCreateInfos[i].stage.pName); auto feedback_struct = LvlFindInChain<VkPipelineCreationFeedbackCreateInfoEXT>(pCreateInfos[i].pNext); if ((feedback_struct != nullptr) && (feedback_struct->pipelineStageCreationFeedbackCount != 1)) { skip |= LogError(device, "VUID-VkPipelineCreationFeedbackCreateInfoEXT-pipelineStageCreationFeedbackCount-02669", "vkCreateComputePipelines(): in pCreateInfo[%" PRIu32 "], VkPipelineCreationFeedbackEXT::pipelineStageCreationFeedbackCount must equal 1, found %" PRIu32 ".", i, feedback_struct->pipelineStageCreationFeedbackCount); } // Make sure compute stage is selected if (pCreateInfos[i].stage.stage != VK_SHADER_STAGE_COMPUTE_BIT) { skip |= LogError(device, "VUID-VkComputePipelineCreateInfo-stage-00701", "vkCreateComputePipelines(): the pCreateInfo[%u].stage.stage (%s) is not VK_SHADER_STAGE_COMPUTE_BIT", i, string_VkShaderStageFlagBits(pCreateInfos[i].stage.stage)); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR) { skip |= LogError(device, "VUID-VkComputePipelineCreateInfo-flags-03370", "vkCreateComputePipelines(): flags must not include VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR"); } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const { bool skip = false; if (pCreateInfo != nullptr) { const auto &features = physical_device_features; const auto &limits = device_limits; if (pCreateInfo->anisotropyEnable == VK_TRUE) { if (!in_inclusive_range(pCreateInfo->maxAnisotropy, 1.0F, limits.maxSamplerAnisotropy)) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-anisotropyEnable-01071", "vkCreateSampler(): value of %s must be in range [1.0, %f] %s, but %f found.", "pCreateInfo->maxAnisotropy", limits.maxSamplerAnisotropy, "VkPhysicalDeviceLimits::maxSamplerAnistropy", pCreateInfo->maxAnisotropy); } // Anistropy cannot be enabled in sampler unless enabled as a feature if (features.samplerAnisotropy == VK_FALSE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-anisotropyEnable-01070", "vkCreateSampler(): Anisotropic sampling feature is not enabled, %s must be VK_FALSE.", "pCreateInfo->anisotropyEnable"); } } if (pCreateInfo->unnormalizedCoordinates == VK_TRUE) { if (pCreateInfo->minFilter != pCreateInfo->magFilter) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", "vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, " "pCreateInfo->minFilter (%s) and pCreateInfo->magFilter (%s) must be equal.", string_VkFilter(pCreateInfo->minFilter), string_VkFilter(pCreateInfo->magFilter)); } if (pCreateInfo->mipmapMode != VK_SAMPLER_MIPMAP_MODE_NEAREST) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01073", "vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, " "pCreateInfo->mipmapMode (%s) must be VK_SAMPLER_MIPMAP_MODE_NEAREST.", string_VkSamplerMipmapMode(pCreateInfo->mipmapMode)); } if (pCreateInfo->minLod != 0.0f || pCreateInfo->maxLod != 0.0f) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", "vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, " "pCreateInfo->minLod (%f) and pCreateInfo->maxLod (%f) must both be zero.", pCreateInfo->minLod, pCreateInfo->maxLod); } if ((pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01075", "vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, " "pCreateInfo->addressModeU (%s) and pCreateInfo->addressModeV (%s) must both be " "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER.", string_VkSamplerAddressMode(pCreateInfo->addressModeU), string_VkSamplerAddressMode(pCreateInfo->addressModeV)); } if (pCreateInfo->anisotropyEnable == VK_TRUE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076", "vkCreateSampler(): pCreateInfo->anisotropyEnable and pCreateInfo->unnormalizedCoordinates must " "not both be VK_TRUE."); } if (pCreateInfo->compareEnable == VK_TRUE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01077", "vkCreateSampler(): pCreateInfo->compareEnable and pCreateInfo->unnormalizedCoordinates must " "not both be VK_TRUE."); } } // If compareEnable is VK_TRUE, compareOp must be a valid VkCompareOp value if (pCreateInfo->compareEnable == VK_TRUE) { skip |= validate_ranged_enum("vkCreateSampler", "pCreateInfo->compareOp", "VkCompareOp", AllVkCompareOpEnums, pCreateInfo->compareOp, "VUID-VkSamplerCreateInfo-compareEnable-01080"); const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext); if (sampler_reduction != nullptr) { if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-compareEnable-01423", "copmareEnable is true so the sampler reduction mode must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE."); } } } // If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, borderColor must be a // valid VkBorderColor value if ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) { skip |= validate_ranged_enum("vkCreateSampler", "pCreateInfo->borderColor", "VkBorderColor", AllVkBorderColorEnums, pCreateInfo->borderColor, "VUID-VkSamplerCreateInfo-addressModeU-01078"); } // If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, the // VK_KHR_sampler_mirror_clamp_to_edge extension must be enabled if (!device_extensions.vk_khr_sampler_mirror_clamp_to_edge && ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) || (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) || (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE))) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): A VkSamplerAddressMode value is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE " "but the VK_KHR_sampler_mirror_clamp_to_edge extension has not been enabled."); } // Checks for the IMG cubic filtering extension if (device_extensions.vk_img_filter_cubic) { if ((pCreateInfo->anisotropyEnable == VK_TRUE) && ((pCreateInfo->minFilter == VK_FILTER_CUBIC_IMG) || (pCreateInfo->magFilter == VK_FILTER_CUBIC_IMG))) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-magFilter-01081", "vkCreateSampler(): Anisotropic sampling must not be VK_TRUE when either minFilter or magFilter " "are VK_FILTER_CUBIC_IMG."); } } // Check for valid Lod range if (pCreateInfo->minLod > pCreateInfo->maxLod) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-maxLod-01973", "vkCreateSampler(): minLod (%f) is greater than maxLod (%f)", pCreateInfo->minLod, pCreateInfo->maxLod); } // Check mipLodBias to device limit if (pCreateInfo->mipLodBias > limits.maxSamplerLodBias) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-mipLodBias-01069", "vkCreateSampler(): mipLodBias (%f) is greater than VkPhysicalDeviceLimits::maxSamplerLodBias (%f)", pCreateInfo->mipLodBias, limits.maxSamplerLodBias); } const auto *sampler_conversion = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext); if (sampler_conversion != nullptr) { if ((pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) || (pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) || (pCreateInfo->addressModeW != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) || (pCreateInfo->anisotropyEnable != VK_FALSE) || (pCreateInfo->unnormalizedCoordinates != VK_FALSE)) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-addressModeU-01646", "vkCreateSampler(): SamplerYCbCrConversion is enabled: " "addressModeU (%s), addressModeV (%s), addressModeW (%s) must be CLAMP_TO_EDGE, and anisotropyEnable (%s) " "and unnormalizedCoordinates (%s) must be VK_FALSE.", string_VkSamplerAddressMode(pCreateInfo->addressModeU), string_VkSamplerAddressMode(pCreateInfo->addressModeV), string_VkSamplerAddressMode(pCreateInfo->addressModeW), pCreateInfo->anisotropyEnable ? "VK_TRUE" : "VK_FALSE", pCreateInfo->unnormalizedCoordinates ? "VK_TRUE" : "VK_FALSE"); } } if (pCreateInfo->flags & VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT) { if (pCreateInfo->minFilter != pCreateInfo->magFilter) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02574", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->minFilter (%s) and pCreateInfo->magFilter (%s) must be equal.", string_VkFilter(pCreateInfo->minFilter), string_VkFilter(pCreateInfo->magFilter)); } if (pCreateInfo->mipmapMode != VK_SAMPLER_MIPMAP_MODE_NEAREST) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02575", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->mipmapMode (%s) must be VK_SAMPLER_MIPMAP_MODE_NEAREST.", string_VkSamplerMipmapMode(pCreateInfo->mipmapMode)); } if (pCreateInfo->minLod != 0.0 || pCreateInfo->maxLod != 0.0) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02576", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->minLod (%f) and pCreateInfo->maxLod (%f) must be zero.", pCreateInfo->minLod, pCreateInfo->maxLod); } if (((pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) && (pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) || ((pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE) && (pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER))) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02577", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->addressModeU (%s) and pCreateInfo->addressModeV (%s) must be " "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER", string_VkSamplerAddressMode(pCreateInfo->addressModeU), string_VkSamplerAddressMode(pCreateInfo->addressModeV)); } if (pCreateInfo->anisotropyEnable) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02578", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->anisotropyEnable must be VK_FALSE"); } if (pCreateInfo->compareEnable) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02579", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->compareEnable must be VK_FALSE"); } if (pCreateInfo->unnormalizedCoordinates) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-flags-02580", "vkCreateSampler(): when flags includes VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, " "pCreateInfo->unnormalizedCoordinates must be VK_FALSE"); } } } if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) { if (!device_extensions.vk_ext_custom_border_color) { skip |= LogError(device, kVUID_PVError_ExtensionNotEnabled, "VkSamplerCreateInfo->borderColor is %s but %s is not enabled.\n", string_VkBorderColor(pCreateInfo->borderColor), VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME); } auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext); if (!custom_create_info) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-borderColor-04011", "VkSamplerCreateInfo->borderColor is set to %s but there is no VkSamplerCustomBorderColorCreateInfoEXT " "struct in pNext chain.\n", string_VkBorderColor(pCreateInfo->borderColor)); } else { if ((custom_create_info->format != VK_FORMAT_UNDEFINED) && ((pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT && !FormatIsSampledInt(custom_create_info->format)) || (pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT && !FormatIsSampledFloat(custom_create_info->format)))) { skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04013", "VkSamplerCreateInfo->borderColor is %s but VkSamplerCustomBorderColorCreateInfoEXT.format = %s " "whose type does not match\n", string_VkBorderColor(pCreateInfo->borderColor), string_VkFormat(custom_create_info->format)); ; } } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) const { bool skip = false; // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml if ((pCreateInfo != nullptr) && (pCreateInfo->pBindings != nullptr)) { for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) { if (pCreateInfo->pBindings[i].descriptorCount != 0) { if (((pCreateInfo->pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) || (pCreateInfo->pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) && (pCreateInfo->pBindings[i].pImmutableSamplers != nullptr)) { for (uint32_t descriptor_index = 0; descriptor_index < pCreateInfo->pBindings[i].descriptorCount; ++descriptor_index) { if (pCreateInfo->pBindings[i].pImmutableSamplers[descriptor_index] == VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", "vkCreateDescriptorSetLayout: required parameter " "pCreateInfo->pBindings[%d].pImmutableSamplers[%d] specified as VK_NULL_HANDLE", i, descriptor_index); } } } // If descriptorCount is not 0, stageFlags must be a valid combination of VkShaderStageFlagBits values if ((pCreateInfo->pBindings[i].stageFlags != 0) && ((pCreateInfo->pBindings[i].stageFlags & (~AllVkShaderStageFlagBits)) != 0)) { skip |= LogError(device, "VUID-VkDescriptorSetLayoutBinding-descriptorCount-00283", "vkCreateDescriptorSetLayout(): if pCreateInfo->pBindings[%d].descriptorCount is not 0, " "pCreateInfo->pBindings[%d].stageFlags must be a valid combination of VkShaderStageFlagBits " "values.", i, i); } if ((pCreateInfo->pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) && (pCreateInfo->pBindings[i].stageFlags != 0) && (pCreateInfo->pBindings[i].stageFlags != VK_SHADER_STAGE_FRAGMENT_BIT)) { skip |= LogError(device, "VUID-VkDescriptorSetLayoutBinding-descriptorType-01510", "vkCreateDescriptorSetLayout(): if pCreateInfo->pBindings[%d].descriptorCount is not 0 and " "descriptorType is VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT then pCreateInfo->pBindings[%d].stageFlags " "must be 0 or VK_SHADER_STAGE_FRAGMENT_BIT but is currently %s", i, i, string_VkShaderStageFlags(pCreateInfo->pBindings[i].stageFlags).c_str()); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets) const { // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml // This is an array of handles, where the elements are allowed to be VK_NULL_HANDLE, and does not require any validation beyond // validate_array() return validate_array("vkFreeDescriptorSets", "descriptorSetCount", "pDescriptorSets", descriptorSetCount, &pDescriptorSets, true, true, kVUIDUndefined, kVUIDUndefined); } bool StatelessValidation::validate_WriteDescriptorSet(const char *vkCallingFunction, const uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, const bool validateDstSet) const { bool skip = false; if (pDescriptorWrites != NULL) { for (uint32_t i = 0; i < descriptorWriteCount; ++i) { // descriptorCount must be greater than 0 if (pDescriptorWrites[i].descriptorCount == 0) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorCount-arraylength", "%s(): parameter pDescriptorWrites[%d].descriptorCount must be greater than 0.", vkCallingFunction, i); } // If called from vkCmdPushDescriptorSetKHR, the dstSet member is ignored. if (validateDstSet) { // dstSet must be a valid VkDescriptorSet handle skip |= validate_required_handle(vkCallingFunction, ParameterName("pDescriptorWrites[%i].dstSet", ParameterName::IndexVector{i}), pDescriptorWrites[i].dstSet); } if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) { // If descriptorType is VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, // VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, // pImageInfo must be a pointer to an array of descriptorCount valid VkDescriptorImageInfo structures. // Valid imageView handles are checked in ObjectLifetimes::ValidateDescriptorWrite. if (pDescriptorWrites[i].pImageInfo == nullptr) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-00322", "%s(): if pDescriptorWrites[%d].descriptorType is " "VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, " "VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or " "VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, pDescriptorWrites[%d].pImageInfo must not be NULL.", vkCallingFunction, i, i); } else if (pDescriptorWrites[i].descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER) { // If descriptorType is VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, // VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, the imageLayout // member of any given element of pImageInfo must be a valid VkImageLayout for (uint32_t descriptor_index = 0; descriptor_index < pDescriptorWrites[i].descriptorCount; ++descriptor_index) { skip |= validate_ranged_enum(vkCallingFunction, ParameterName("pDescriptorWrites[%i].pImageInfo[%i].imageLayout", ParameterName::IndexVector{i, descriptor_index}), "VkImageLayout", AllVkImageLayoutEnums, pDescriptorWrites[i].pImageInfo[descriptor_index].imageLayout, kVUIDUndefined); } } } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { // If descriptorType is VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, pBufferInfo must be a // pointer to an array of descriptorCount valid VkDescriptorBufferInfo structures // Valid buffer handles are checked in ObjectLifetimes::ValidateDescriptorWrite. if (pDescriptorWrites[i].pBufferInfo == nullptr) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-00324", "%s(): if pDescriptorWrites[%d].descriptorType is " "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, " "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, " "pDescriptorWrites[%d].pBufferInfo must not be NULL.", vkCallingFunction, i, i); } else { const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(device_createinfo_pnext); if (robustness2_features && robustness2_features->nullDescriptor) { for (uint32_t descriptor_index = 0; descriptor_index < pDescriptorWrites[i].descriptorCount; ++descriptor_index) { if (pDescriptorWrites[i].pBufferInfo[descriptor_index].buffer == VK_NULL_HANDLE && (pDescriptorWrites[i].pBufferInfo[descriptor_index].offset != 0 || pDescriptorWrites[i].pBufferInfo[descriptor_index].range != VK_WHOLE_SIZE)) { skip |= LogError(device, "VUID-VkDescriptorBufferInfo-buffer-02999", "%s(): if pDescriptorWrites[%d].buffer is VK_NULL_HANDLE, " "offset (%" PRIu64 ") must be zero and range (%" PRIu64 ") must be VK_WHOLE_SIZE.", vkCallingFunction, i, pDescriptorWrites[i].pBufferInfo[descriptor_index].offset, pDescriptorWrites[i].pBufferInfo[descriptor_index].range); } } } } } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) { // Valid bufferView handles are checked in ObjectLifetimes::ValidateDescriptorWrite. } if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) { VkDeviceSize uniform_alignment = device_limits.minUniformBufferOffsetAlignment; for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { if (pDescriptorWrites[i].pBufferInfo != NULL) { if (SafeModulo(pDescriptorWrites[i].pBufferInfo[j].offset, uniform_alignment) != 0) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-00327", "%s(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64 ") must be a multiple of device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".", vkCallingFunction, i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniform_alignment); } } } } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { VkDeviceSize storage_alignment = device_limits.minStorageBufferOffsetAlignment; for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { if (pDescriptorWrites[i].pBufferInfo != NULL) { if (SafeModulo(pDescriptorWrites[i].pBufferInfo[j].offset, storage_alignment) != 0) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-00328", "%s(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64 ") must be a multiple of device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".", vkCallingFunction, i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storage_alignment); } } } } // pNext chain must be either NULL or a pointer to a valid instance of VkWriteDescriptorSetAccelerationStructureKHR // or VkWriteDescriptorSetInlineUniformBlockEX if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) { const auto *pnext_struct = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureKHR>(pDescriptorWrites[i].pNext); if (!pnext_struct || (pnext_struct->accelerationStructureCount != pDescriptorWrites[i].descriptorCount)) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-02382", "%s(): If descriptorType is VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, the pNext" "chain must include a VkWriteDescriptorSetAccelerationStructureKHR structure whose " "accelerationStructureCount %d member equals descriptorCount %d.", vkCallingFunction, pnext_struct ? pnext_struct->accelerationStructureCount : -1, pDescriptorWrites[i].descriptorCount); } // further checks only if we have right structtype if (pnext_struct) { if (pnext_struct->accelerationStructureCount != pDescriptorWrites[i].descriptorCount) { skip |= LogError( device, "VUID-VkWriteDescriptorSetAccelerationStructureKHR-accelerationStructureCount-02236", "%s(): accelerationStructureCount %d must be equal to descriptorCount %d in the extended structure " ".", vkCallingFunction, pnext_struct->accelerationStructureCount, pDescriptorWrites[i].descriptorCount); } if (pnext_struct->accelerationStructureCount == 0) { skip |= LogError(device, "VUID-VkWriteDescriptorSetAccelerationStructureKHR-accelerationStructureCount-arraylength", "%s(): accelerationStructureCount must be greater than 0 ."); } const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(device_createinfo_pnext); if (robustness2_features && robustness2_features->nullDescriptor == VK_FALSE) { for (uint32_t j = 0; j < pnext_struct->accelerationStructureCount; ++j) { if (pnext_struct->pAccelerationStructures[j] == VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkWriteDescriptorSetAccelerationStructureKHR-pAccelerationStructures-03580", "%s(): If the nullDescriptor feature is not enabled, each member of " "pAccelerationStructures must not be VK_NULL_HANDLE."); } } } } } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV) { const auto *pnext_struct = LvlFindInChain<VkWriteDescriptorSetAccelerationStructureNV>(pDescriptorWrites[i].pNext); if (!pnext_struct || (pnext_struct->accelerationStructureCount != pDescriptorWrites[i].descriptorCount)) { skip |= LogError(device, "VUID-VkWriteDescriptorSet-descriptorType-03817", "%s(): If descriptorType is VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, the pNext" "chain must include a VkWriteDescriptorSetAccelerationStructureNV structure whose " "accelerationStructureCount %d member equals descriptorCount %d.", vkCallingFunction, pnext_struct ? pnext_struct->accelerationStructureCount : -1, pDescriptorWrites[i].descriptorCount); } // further checks only if we have right structtype if (pnext_struct) { if (pnext_struct->accelerationStructureCount != pDescriptorWrites[i].descriptorCount) { skip |= LogError( device, "VUID-VkWriteDescriptorSetAccelerationStructureNV-accelerationStructureCount-03747", "%s(): accelerationStructureCount %d must be equal to descriptorCount %d in the extended structure " ".", vkCallingFunction, pnext_struct->accelerationStructureCount, pDescriptorWrites[i].descriptorCount); } if (pnext_struct->accelerationStructureCount == 0) { skip |= LogError(device, "VUID-VkWriteDescriptorSetAccelerationStructureNV-accelerationStructureCount-arraylength", "%s(): accelerationStructureCount must be greater than 0 ."); } const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(device_createinfo_pnext); if (robustness2_features && robustness2_features->nullDescriptor == VK_FALSE) { for (uint32_t j = 0; j < pnext_struct->accelerationStructureCount; ++j) { if (pnext_struct->pAccelerationStructures[j] == VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkWriteDescriptorSetAccelerationStructureNV-pAccelerationStructures-03749", "%s(): If the nullDescriptor feature is not enabled, each member of " "pAccelerationStructures must not be VK_NULL_HANDLE."); } } } } } } } return skip; } bool StatelessValidation::manual_PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) const { return validate_WriteDescriptorSet("vkUpdateDescriptorSets", descriptorWriteCount, pDescriptorWrites); } bool StatelessValidation::manual_PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return CreateRenderPassGeneric(device, pCreateInfo, pAllocator, pRenderPass, RENDER_PASS_VERSION_1); } bool StatelessValidation::manual_PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return CreateRenderPassGeneric(device, pCreateInfo, pAllocator, pRenderPass, RENDER_PASS_VERSION_2); } bool StatelessValidation::manual_PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return CreateRenderPassGeneric(device, pCreateInfo, pAllocator, pRenderPass, RENDER_PASS_VERSION_2); } bool StatelessValidation::manual_PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) const { bool skip = false; // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml // This is an array of handles, where the elements are allowed to be VK_NULL_HANDLE, and does not require any validation beyond // validate_array() skip |= validate_array("vkFreeCommandBuffers", "commandBufferCount", "pCommandBuffers", commandBufferCount, &pCommandBuffers, true, true, kVUIDUndefined, kVUIDUndefined); return skip; } bool StatelessValidation::manual_PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) const { bool skip = false; // VkCommandBufferInheritanceInfo validation, due to a 'noautovalidity' of pBeginInfo->pInheritanceInfo in vkBeginCommandBuffer const char *cmd_name = "vkBeginCommandBuffer"; bool cb_is_secondary; { auto lock = cb_read_lock(); cb_is_secondary = (secondary_cb_map.find(commandBuffer) != secondary_cb_map.end()); } if (cb_is_secondary) { // Implicit VUs // validate only sType here; pointer has to be validated in core_validation const bool k_not_required = false; const char *k_no_vuid = nullptr; const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo; skip |= validate_struct_type(cmd_name, "pBeginInfo->pInheritanceInfo", "VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO", info, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, k_not_required, k_no_vuid, "VUID-VkCommandBufferInheritanceInfo-sType-sType"); if (info) { const VkStructureType allowed_structs_vk_command_buffer_inheritance_info[] = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT}; skip |= validate_struct_pnext( cmd_name, "pBeginInfo->pInheritanceInfo->pNext", "VkCommandBufferInheritanceConditionalRenderingInfoEXT", info->pNext, ARRAY_SIZE(allowed_structs_vk_command_buffer_inheritance_info), allowed_structs_vk_command_buffer_inheritance_info, GeneratedVulkanHeaderVersion, "VUID-VkCommandBufferInheritanceInfo-pNext-pNext", "VUID-VkCommandBufferInheritanceInfo-sType-unique"); skip |= validate_bool32(cmd_name, "pBeginInfo->pInheritanceInfo->occlusionQueryEnable", info->occlusionQueryEnable); // Explicit VUs if (!physical_device_features.inheritedQueries && info->occlusionQueryEnable == VK_TRUE) { skip |= LogError( commandBuffer, "VUID-VkCommandBufferInheritanceInfo-occlusionQueryEnable-00056", "%s: Inherited queries feature is disabled, but pBeginInfo->pInheritanceInfo->occlusionQueryEnable is VK_TRUE.", cmd_name); } if (physical_device_features.inheritedQueries) { skip |= validate_flags(cmd_name, "pBeginInfo->pInheritanceInfo->queryFlags", "VkQueryControlFlagBits", AllVkQueryControlFlagBits, info->queryFlags, kOptionalFlags, "VUID-VkCommandBufferInheritanceInfo-queryFlags-00057"); } else { // !inheritedQueries skip |= validate_reserved_flags(cmd_name, "pBeginInfo->pInheritanceInfo->queryFlags", info->queryFlags, "VUID-VkCommandBufferInheritanceInfo-queryFlags-02788"); } if (physical_device_features.pipelineStatisticsQuery) { skip |= validate_flags(cmd_name, "pBeginInfo->pInheritanceInfo->pipelineStatistics", "VkQueryPipelineStatisticFlagBits", AllVkQueryPipelineStatisticFlagBits, info->pipelineStatistics, kOptionalFlags, "VUID-VkCommandBufferInheritanceInfo-pipelineStatistics-02789"); } else { // !pipelineStatisticsQuery skip |= validate_reserved_flags(cmd_name, "pBeginInfo->pInheritanceInfo->pipelineStatistics", info->pipelineStatistics, "VUID-VkCommandBufferInheritanceInfo-pipelineStatistics-00058"); } const auto *conditional_rendering = LvlFindInChain<VkCommandBufferInheritanceConditionalRenderingInfoEXT>(info->pNext); if (conditional_rendering) { const auto *cr_features = LvlFindInChain<VkPhysicalDeviceConditionalRenderingFeaturesEXT>(device_createinfo_pnext); const auto inherited_conditional_rendering = cr_features && cr_features->inheritedConditionalRendering; if (!inherited_conditional_rendering && conditional_rendering->conditionalRenderingEnable == VK_TRUE) { skip |= LogError( commandBuffer, "VUID-VkCommandBufferInheritanceConditionalRenderingInfoEXT-conditionalRenderingEnable-01977", "vkBeginCommandBuffer: Inherited conditional rendering is disabled, but " "pBeginInfo->pInheritanceInfo->pNext<VkCommandBufferInheritanceConditionalRenderingInfoEXT> is VK_TRUE."); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) const { bool skip = false; if (!physical_device_features.multiViewport) { if (firstViewport != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewport-firstViewport-01224", "vkCmdSetViewport: The multiViewport feature is disabled, but firstViewport (=%" PRIu32 ") is not 0.", firstViewport); } if (viewportCount > 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewport-viewportCount-01225", "vkCmdSetViewport: The multiViewport feature is disabled, but viewportCount (=%" PRIu32 ") is not 1.", viewportCount); } } else { // multiViewport enabled const uint64_t sum = static_cast<uint64_t>(firstViewport) + static_cast<uint64_t>(viewportCount); if (sum > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewport-firstViewport-01223", "vkCmdSetViewport: firstViewport + viewportCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", firstViewport, viewportCount, sum, device_limits.maxViewports); } } if (pViewports) { for (uint32_t viewport_i = 0; viewport_i < viewportCount; ++viewport_i) { const auto &viewport = pViewports[viewport_i]; // will crash on invalid ptr const char *fn_name = "vkCmdSetViewport"; skip |= manual_PreCallValidateViewport( viewport, fn_name, ParameterName("pViewports[%i]", ParameterName::IndexVector{viewport_i}), commandBuffer); } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) const { bool skip = false; if (!physical_device_features.multiViewport) { if (firstScissor != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-firstScissor-00593", "vkCmdSetScissor: The multiViewport feature is disabled, but firstScissor (=%" PRIu32 ") is not 0.", firstScissor); } if (scissorCount > 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-scissorCount-00594", "vkCmdSetScissor: The multiViewport feature is disabled, but scissorCount (=%" PRIu32 ") is not 1.", scissorCount); } } else { // multiViewport enabled const uint64_t sum = static_cast<uint64_t>(firstScissor) + static_cast<uint64_t>(scissorCount); if (sum > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-firstScissor-00592", "vkCmdSetScissor: firstScissor + scissorCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", firstScissor, scissorCount, sum, device_limits.maxViewports); } } if (pScissors) { for (uint32_t scissor_i = 0; scissor_i < scissorCount; ++scissor_i) { const auto &scissor = pScissors[scissor_i]; // will crash on invalid ptr if (scissor.offset.x < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-x-00595", "vkCmdSetScissor: pScissors[%" PRIu32 "].offset.x (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.x); } if (scissor.offset.y < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-x-00595", "vkCmdSetScissor: pScissors[%" PRIu32 "].offset.y (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.y); } const int64_t x_sum = static_cast<int64_t>(scissor.offset.x) + static_cast<int64_t>(scissor.extent.width); if (x_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-offset-00596", "vkCmdSetScissor: offset.x + extent.width (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.x, scissor.extent.width, x_sum, scissor_i); } const int64_t y_sum = static_cast<int64_t>(scissor.offset.y) + static_cast<int64_t>(scissor.extent.height); if (y_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissor-offset-00597", "vkCmdSetScissor: offset.y + extent.height (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.y, scissor.extent.height, y_sum, scissor_i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const { bool skip = false; if (!physical_device_features.wideLines && (lineWidth != 1.0f)) { skip |= LogError(commandBuffer, "VUID-vkCmdSetLineWidth-lineWidth-00788", "VkPhysicalDeviceFeatures::wideLines is disabled, but lineWidth (=%f) is not 1.0.", lineWidth); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const { bool skip = false; if (!physical_device_features.multiDrawIndirect && ((drawCount > 1))) { skip |= LogError(device, "VUID-vkCmdDrawIndirect-drawCount-02718", "CmdDrawIndirect(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d", drawCount); } if (drawCount > device_limits.maxDrawIndirectCount) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndirect-drawCount-02719", "CmdDrawIndirect(): drawCount (%u) is not less than or equal to the maximum allowed (%u).", drawCount, device_limits.maxDrawIndirectCount); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const { bool skip = false; if (!physical_device_features.multiDrawIndirect && ((drawCount > 1))) { skip |= LogError(device, "VUID-vkCmdDrawIndexedIndirect-drawCount-02718", "CmdDrawIndexedIndirect(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d", drawCount); } if (drawCount > device_limits.maxDrawIndirectCount) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndexedIndirect-drawCount-02719", "CmdDrawIndexedIndirect(): drawCount (%u) is not less than or equal to the maximum allowed (%u).", drawCount, device_limits.maxDrawIndirectCount); } return skip; } bool StatelessValidation::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkDeviceSize offset, VkDeviceSize countBufferOffset, bool khr) const { bool skip = false; const char *api_name = khr ? "vkCmdDrawIndirectCountKHR()" : "vkCmdDrawIndirectCount()"; if (offset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndirectCount-offset-02710", "%s: parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.", api_name, offset); } if (countBufferOffset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndirectCount-countBufferOffset-02716", "%s: parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", api_name, countBufferOffset); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { return ValidateCmdDrawIndirectCount(commandBuffer, offset, countBufferOffset, false); } bool StatelessValidation::manual_PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { return ValidateCmdDrawIndirectCount(commandBuffer, offset, countBufferOffset, true); } bool StatelessValidation::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkDeviceSize offset, VkDeviceSize countBufferOffset, bool khr) const { bool skip = false; const char *api_name = khr ? "vkCmdDrawIndexedIndirectCountKHR()" : "vkCmdDrawIndexedIndirectCount()"; if (offset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndexedIndirectCount-offset-02710", "%s: parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.", api_name, offset); } if (countBufferOffset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndexedIndirectCount-countBufferOffset-02716", "%s: parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", api_name, countBufferOffset); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { return ValidateCmdDrawIndexedIndirectCount(commandBuffer, offset, countBufferOffset, false); } bool StatelessValidation::manual_PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { return ValidateCmdDrawIndexedIndirectCount(commandBuffer, offset, countBufferOffset, true); } bool StatelessValidation::manual_PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) const { bool skip = false; for (uint32_t rect = 0; rect < rectCount; rect++) { if (pRects[rect].layerCount == 0) { skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-layerCount-01934", "CmdClearAttachments(): pRects[%d].layerCount is zero.", rect); } if (pRects[rect].rect.extent.width == 0) { skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-rect-02682", "CmdClearAttachments(): pRects[%d].rect.extent.width is zero.", rect); } if (pRects[rect].rect.extent.height == 0) { skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-rect-02683", "CmdClearAttachments(): pRects[%d].rect.extent.height is zero.", rect); } } return skip; } bool StatelessValidation::ValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties, const char *apiName) const { bool skip = false; if (pImageFormatInfo != nullptr) { const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(pImageFormatInfo->pNext); if (image_stencil_struct != nullptr) { if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0) { VkImageUsageFlags legal_flags = (VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); // No flags other than the legal attachment bits may be set legal_flags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; if ((image_stencil_struct->stencilUsage & ~legal_flags) != 0) { skip |= LogError(physicalDevice, "VUID-VkImageStencilUsageCreateInfo-stencilUsage-02539", "%s(): in pNext chain, VkImageStencilUsageCreateInfo::stencilUsage " "includes VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, it must not include bits other than " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT", apiName); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateGetPhysicalDeviceImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { return ValidateGetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo, pImageFormatProperties, "vkGetPhysicalDeviceImageFormatProperties2"); } bool StatelessValidation::manual_PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { return ValidateGetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo, pImageFormatProperties, "vkGetPhysicalDeviceImageFormatProperties2KHR"); } bool StatelessValidation::manual_PreCallValidateGetPhysicalDeviceImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties *pImageFormatProperties) const { bool skip = false; if (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { skip |= LogError(physicalDevice, "VUID-vkGetPhysicalDeviceImageFormatProperties-tiling-02248", "vkGetPhysicalDeviceImageFormatProperties(): tiling must not be VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) const { bool skip = false; if (pRegions != nullptr) { for (uint32_t i = 0; i < regionCount; i++) { if (pRegions[i].size == 0) { skip |= LogError(device, "VUID-VkBufferCopy-size-01988", "vkCmdCopyBuffer() pRegions[%u].size must be greater than zero", i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfo) const { bool skip = false; if (pCopyBufferInfo->pRegions != nullptr) { for (uint32_t i = 0; i < pCopyBufferInfo->regionCount; i++) { if (pCopyBufferInfo->pRegions[i].size == 0) { skip |= LogError(device, "VUID-VkBufferCopy2KHR-size-01988", "vkCmdCopyBuffer2KHR() pCopyBufferInfo->pRegions[%u].size must be greater than zero", i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) const { bool skip = false; if (dstOffset & 3) { skip |= LogError(device, "VUID-vkCmdUpdateBuffer-dstOffset-00036", "vkCmdUpdateBuffer() parameter, VkDeviceSize dstOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", dstOffset); } if ((dataSize <= 0) || (dataSize > 65536)) { skip |= LogError(device, "VUID-vkCmdUpdateBuffer-dataSize-00037", "vkCmdUpdateBuffer() parameter, VkDeviceSize dataSize (0x%" PRIxLEAST64 "), must be greater than zero and less than or equal to 65536.", dataSize); } else if (dataSize & 3) { skip |= LogError(device, "VUID-vkCmdUpdateBuffer-dataSize-00038", "vkCmdUpdateBuffer() parameter, VkDeviceSize dataSize (0x%" PRIxLEAST64 "), is not a multiple of 4.", dataSize); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) const { bool skip = false; if (dstOffset & 3) { skip |= LogError(device, "VUID-vkCmdFillBuffer-dstOffset-00025", "vkCmdFillBuffer() parameter, VkDeviceSize dstOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", dstOffset); } if (size != VK_WHOLE_SIZE) { if (size <= 0) { skip |= LogError(device, "VUID-vkCmdFillBuffer-size-00026", "vkCmdFillBuffer() parameter, VkDeviceSize size (0x%" PRIxLEAST64 "), must be greater than zero.", size); } else if (size & 3) { skip |= LogError(device, "VUID-vkCmdFillBuffer-size-00028", "vkCmdFillBuffer() parameter, VkDeviceSize size (0x%" PRIxLEAST64 "), is not a multiple of 4.", size); } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const { bool skip = false; if (pCreateInfo != nullptr) { // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) { // If imageSharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1 if (pCreateInfo->queueFamilyIndexCount <= 1) { skip |= LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01278", "vkCreateSwapchainKHR(): if pCreateInfo->imageSharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->queueFamilyIndexCount must be greater than 1."); } // If imageSharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of // queueFamilyIndexCount uint32_t values if (pCreateInfo->pQueueFamilyIndices == nullptr) { skip |= LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01277", "vkCreateSwapchainKHR(): if pCreateInfo->imageSharingMode is VK_SHARING_MODE_CONCURRENT, " "pCreateInfo->pQueueFamilyIndices must be a pointer to an array of " "pCreateInfo->queueFamilyIndexCount uint32_t values."); } } skip |= ValidateGreaterThanZero(pCreateInfo->imageArrayLayers, "pCreateInfo->imageArrayLayers", "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "vkCreateSwapchainKHR"); } return skip; } bool StatelessValidation::manual_PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const { bool skip = false; if (pPresentInfo && pPresentInfo->pNext) { const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { // TODO: This and all other pNext extension dependencies should be added to code-generation skip |= require_device_extension(IsExtEnabled(device_extensions.vk_khr_incremental_present), "vkQueuePresentKHR", VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME); if (present_regions->swapchainCount != pPresentInfo->swapchainCount) { skip |= LogError(device, "VUID-VkPresentRegionsKHR-swapchainCount-01260", "QueuePresentKHR(): pPresentInfo->swapchainCount has a value of %i but VkPresentRegionsKHR " "extension swapchainCount is %i. These values must be equal.", pPresentInfo->swapchainCount, present_regions->swapchainCount); } skip |= validate_struct_pnext("QueuePresentKHR", "pCreateInfo->pNext->pNext", NULL, present_regions->pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkPresentInfoKHR-pNext-pNext", "VUID-VkPresentInfoKHR-sType-unique"); skip |= validate_array("QueuePresentKHR", "pCreateInfo->pNext->swapchainCount", "pCreateInfo->pNext->pRegions", present_regions->swapchainCount, &present_regions->pRegions, true, false, kVUIDUndefined, kVUIDUndefined); for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { skip |= validate_array("QueuePresentKHR", "pCreateInfo->pNext->pRegions[].rectangleCount", "pCreateInfo->pNext->pRegions[].pRectangles", present_regions->pRegions[i].rectangleCount, &present_regions->pRegions[i].pRectangles, true, false, kVUIDUndefined, kVUIDUndefined); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDisplayModeKHR *pMode) const { bool skip = false; const VkDisplayModeParametersKHR display_mode_parameters = pCreateInfo->parameters; if (display_mode_parameters.visibleRegion.width == 0) { skip |= LogError(device, "VUID-VkDisplayModeParametersKHR-width-01990", "vkCreateDisplayModeKHR(): pCreateInfo->parameters.visibleRegion.width must be greater than 0."); } if (display_mode_parameters.visibleRegion.height == 0) { skip |= LogError(device, "VUID-VkDisplayModeParametersKHR-height-01991", "vkCreateDisplayModeKHR(): pCreateInfo->parameters.visibleRegion.height must be greater than 0."); } if (display_mode_parameters.refreshRate == 0) { skip |= LogError(device, "VUID-VkDisplayModeParametersKHR-refreshRate-01992", "vkCreateDisplayModeKHR(): pCreateInfo->parameters.refreshRate must be greater than 0."); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool StatelessValidation::manual_PreCallValidateCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) const { bool skip = false; if (pCreateInfo->hwnd == nullptr) { skip |= LogError(device, "VUID-VkWin32SurfaceCreateInfoKHR-hwnd-01308", "vkCreateWin32SurfaceKHR(): hwnd must be a valid Win32 HWND but hwnd is NULL."); } return skip; } #endif // VK_USE_PLATFORM_WIN32_KHR bool StatelessValidation::manual_PreCallValidateCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) const { bool skip = false; if (pCreateInfo) { if (pCreateInfo->maxSets <= 0) { skip |= LogError(device, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301", "vkCreateDescriptorPool(): pCreateInfo->maxSets is not greater than 0."); } if (pCreateInfo->pPoolSizes) { for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) { if (pCreateInfo->pPoolSizes[i].descriptorCount <= 0) { skip |= LogError( device, "VUID-VkDescriptorPoolSize-descriptorCount-00302", "vkCreateDescriptorPool(): pCreateInfo->pPoolSizes[%" PRIu32 "].descriptorCount is not greater than 0.", i); } if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && (pCreateInfo->pPoolSizes[i].descriptorCount % 4) != 0) { skip |= LogError(device, "VUID-VkDescriptorPoolSize-type-02218", "vkCreateDescriptorPool(): pCreateInfo->pPoolSizes[%" PRIu32 "].type is VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT " " and pCreateInfo->pPoolSizes[%" PRIu32 "].descriptorCount is not a multiple of 4.", i, i); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const { bool skip = false; if (groupCountX > device_limits.maxComputeWorkGroupCount[0]) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatch-groupCountX-00386", "vkCmdDispatch(): groupCountX (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").", groupCountX, device_limits.maxComputeWorkGroupCount[0]); } if (groupCountY > device_limits.maxComputeWorkGroupCount[1]) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatch-groupCountY-00387", "vkCmdDispatch(): groupCountY (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").", groupCountY, device_limits.maxComputeWorkGroupCount[1]); } if (groupCountZ > device_limits.maxComputeWorkGroupCount[2]) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatch-groupCountZ-00388", "vkCmdDispatch(): groupCountZ (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").", groupCountZ, device_limits.maxComputeWorkGroupCount[2]); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const { bool skip = false; if ((offset % 4) != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchIndirect-offset-02710", "vkCmdDispatchIndirect(): offset (%" PRIu64 ") must be a multiple of 4.", offset); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const { bool skip = false; // Paired if {} else if {} tests used to avoid any possible uint underflow uint32_t limit = device_limits.maxComputeWorkGroupCount[0]; if (baseGroupX >= limit) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-baseGroupX-00421", "vkCmdDispatch(): baseGroupX (%" PRIu32 ") equals or exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").", baseGroupX, limit); } else if (groupCountX > (limit - baseGroupX)) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-groupCountX-00424", "vkCmdDispatchBaseKHR(): baseGroupX (%" PRIu32 ") + groupCountX (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").", baseGroupX, groupCountX, limit); } limit = device_limits.maxComputeWorkGroupCount[1]; if (baseGroupY >= limit) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-baseGroupX-00422", "vkCmdDispatch(): baseGroupY (%" PRIu32 ") equals or exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").", baseGroupY, limit); } else if (groupCountY > (limit - baseGroupY)) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-groupCountY-00425", "vkCmdDispatchBaseKHR(): baseGroupY (%" PRIu32 ") + groupCountY (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").", baseGroupY, groupCountY, limit); } limit = device_limits.maxComputeWorkGroupCount[2]; if (baseGroupZ >= limit) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-baseGroupZ-00423", "vkCmdDispatch(): baseGroupZ (%" PRIu32 ") equals or exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").", baseGroupZ, limit); } else if (groupCountZ > (limit - baseGroupZ)) { skip |= LogError(commandBuffer, "VUID-vkCmdDispatchBase-groupCountZ-00426", "vkCmdDispatchBaseKHR(): baseGroupZ (%" PRIu32 ") + groupCountZ (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").", baseGroupZ, groupCountZ, limit); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) const { return validate_WriteDescriptorSet("vkCmdPushDescriptorSetKHR", descriptorWriteCount, pDescriptorWrites, false); } bool StatelessValidation::manual_PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const { bool skip = false; if (!physical_device_features.multiViewport) { if (firstExclusiveScissor != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035", "vkCmdSetExclusiveScissorNV: The multiViewport feature is disabled, but firstExclusiveScissor (=%" PRIu32 ") is not 0.", firstExclusiveScissor); } if (exclusiveScissorCount > 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036", "vkCmdSetExclusiveScissorNV: The multiViewport feature is disabled, but exclusiveScissorCount (=%" PRIu32 ") is not 1.", exclusiveScissorCount); } } else { // multiViewport enabled const uint64_t sum = static_cast<uint64_t>(firstExclusiveScissor) + static_cast<uint64_t>(exclusiveScissorCount); if (sum > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02034", "vkCmdSetExclusiveScissorNV: firstExclusiveScissor + exclusiveScissorCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", firstExclusiveScissor, exclusiveScissorCount, sum, device_limits.maxViewports); } } if (pExclusiveScissors) { for (uint32_t scissor_i = 0; scissor_i < exclusiveScissorCount; ++scissor_i) { const auto &scissor = pExclusiveScissors[scissor_i]; // will crash on invalid ptr if (scissor.offset.x < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-x-02037", "vkCmdSetExclusiveScissorNV: pScissors[%" PRIu32 "].offset.x (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.x); } if (scissor.offset.y < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-x-02037", "vkCmdSetExclusiveScissorNV: pScissors[%" PRIu32 "].offset.y (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.y); } const int64_t x_sum = static_cast<int64_t>(scissor.offset.x) + static_cast<int64_t>(scissor.extent.width); if (x_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-offset-02038", "vkCmdSetExclusiveScissorNV: offset.x + extent.width (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.x, scissor.extent.width, x_sum, scissor_i); } const int64_t y_sum = static_cast<int64_t>(scissor.offset.y) + static_cast<int64_t>(scissor.extent.height); if (y_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-offset-02039", "vkCmdSetExclusiveScissorNV: offset.y + extent.height (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.y, scissor.extent.height, y_sum, scissor_i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV *pViewportWScalings) const { bool skip = false; const uint64_t sum = static_cast<uint64_t>(firstViewport) + static_cast<uint64_t>(viewportCount); if ((sum < 1) || (sum > device_limits.maxViewports)) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWScalingNV-firstViewport-01324", "vkCmdSetViewportWScalingNV: firstViewport + viewportCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64 ") must be between 1 and VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 "), inculsive.", firstViewport, viewportCount, sum, device_limits.maxViewports); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetViewportShadingRatePaletteNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) const { bool skip = false; if (!physical_device_features.multiViewport) { if (firstViewport != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068", "vkCmdSetViewportShadingRatePaletteNV: The multiViewport feature is disabled, but firstViewport (=%" PRIu32 ") is not 0.", firstViewport); } if (viewportCount > 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069", "vkCmdSetViewportShadingRatePaletteNV: The multiViewport feature is disabled, but viewportCount (=%" PRIu32 ") is not 1.", viewportCount); } } const uint64_t sum = static_cast<uint64_t>(firstViewport) + static_cast<uint64_t>(viewportCount); if (sum > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067", "vkCmdSetViewportShadingRatePaletteNV: firstViewport + viewportCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", firstViewport, viewportCount, sum, device_limits.maxViewports); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetCoarseSampleOrderNV( VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV *pCustomSampleOrders) const { bool skip = false; if (sampleOrderType != VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV && customSampleOrderCount != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081", "vkCmdSetCoarseSampleOrderNV: If sampleOrderType is not VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, " "customSampleOrderCount must be 0."); } for (uint32_t order_i = 0; order_i < customSampleOrderCount; ++order_i) { skip |= ValidateCoarseSampleOrderCustomNV(&pCustomSampleOrders[order_i]); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) const { bool skip = false; if (taskCount > phys_dev_ext_props.mesh_shader_props.maxDrawMeshTasksCount) { skip |= LogError( commandBuffer, "VUID-vkCmdDrawMeshTasksNV-taskCount-02119", "vkCmdDrawMeshTasksNV() parameter, uint32_t taskCount (0x%" PRIxLEAST32 "), must be less than or equal to VkPhysicalDeviceMeshShaderPropertiesNV::maxDrawMeshTasksCount (0x%" PRIxLEAST32 ").", taskCount, phys_dev_ext_props.mesh_shader_props.maxDrawMeshTasksCount); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const { bool skip = false; static const int condition_multiples = 0b0011; if (offset & condition_multiples) { skip |= LogError( commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectNV-offset-02710", "vkCmdDrawMeshTasksIndirectNV() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.", offset); } if (drawCount > 1 && ((stride & condition_multiples) || stride < sizeof(VkDrawMeshTasksIndirectCommandNV))) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02146", "vkCmdDrawMeshTasksIndirectNV() parameter, uint32_t stride (0x%" PRIxLEAST32 "), is not a multiple of 4 or smaller than sizeof (VkDrawMeshTasksIndirectCommandNV).", stride); } if (!physical_device_features.multiDrawIndirect && ((drawCount > 1))) { skip |= LogError( commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02718", "vkCmdDrawMeshTasksIndirectNV(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d", drawCount); } if (drawCount > device_limits.maxDrawIndirectCount) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02719", "vkCmdDrawMeshTasksIndirectNV: drawCount (%u) is not less than or equal to the maximum allowed (%u).", drawCount, device_limits.maxDrawIndirectCount); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { bool skip = false; if (offset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectCountNV-offset-02710", "vkCmdDrawMeshTasksIndirectCountNV() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.", offset); } if (countBufferOffset & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawMeshTasksIndirectCountNV-countBufferOffset-02716", "vkCmdDrawMeshTasksIndirectCountNV() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", countBufferOffset); } return skip; } bool StatelessValidation::manual_PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const { bool skip = false; // Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml if (pCreateInfo != nullptr) { // If queryType is VK_QUERY_TYPE_PIPELINE_STATISTICS, pipelineStatistics must be a valid combination of // VkQueryPipelineStatisticFlagBits values if ((pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) && (pCreateInfo->pipelineStatistics != 0) && ((pCreateInfo->pipelineStatistics & (~AllVkQueryPipelineStatisticFlagBits)) != 0)) { skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00792", "vkCreateQueryPool(): if pCreateInfo->queryType is VK_QUERY_TYPE_PIPELINE_STATISTICS, " "pCreateInfo->pipelineStatistics must be a valid combination of VkQueryPipelineStatisticFlagBits " "values."); } if (pCreateInfo->queryCount == 0) { skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryCount-02763", "vkCreateQueryPool(): queryCount must be greater than zero."); } } return skip; } bool StatelessValidation::manual_PreCallValidateEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { return validate_array("vkEnumerateDeviceExtensionProperties", "pPropertyCount", "pProperties", pPropertyCount, &pProperties, true, false, false, kVUIDUndefined, "VUID-vkEnumerateDeviceExtensionProperties-pProperties-parameter"); } void StatelessValidation::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { if (result != VK_SUCCESS) return; RecordRenderPass(*pRenderPass, pCreateInfo); } void StatelessValidation::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) { // Track the state necessary for checking vkCreateGraphicsPipeline (subpass usage of depth and color attachments) if (result != VK_SUCCESS) return; RecordRenderPass(*pRenderPass, pCreateInfo); } void StatelessValidation::PostCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { // Track the state necessary for checking vkCreateGraphicsPipeline (subpass usage of depth and color attachments) std::unique_lock<std::mutex> lock(renderpass_map_mutex); renderpasses_states.erase(renderPass); } void StatelessValidation::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, VkCommandBuffer *pCommandBuffers, VkResult result) { if ((result == VK_SUCCESS) && pAllocateInfo && (pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) { auto lock = cb_write_lock(); for (uint32_t cb_index = 0; cb_index < pAllocateInfo->commandBufferCount; cb_index++) { secondary_cb_map.insert({pCommandBuffers[cb_index], pAllocateInfo->commandPool}); } } } void StatelessValidation::PostCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { auto lock = cb_write_lock(); for (uint32_t cb_index = 0; cb_index < commandBufferCount; cb_index++) { secondary_cb_map.erase(pCommandBuffers[cb_index]); } } void StatelessValidation::PostCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { auto lock = cb_write_lock(); for (auto item = secondary_cb_map.begin(); item != secondary_cb_map.end();) { if (item->second == commandPool) { item = secondary_cb_map.erase(item); } else { ++item; } } } bool StatelessValidation::manual_PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const { bool skip = false; if (pAllocateInfo) { auto chained_prio_struct = LvlFindInChain<VkMemoryPriorityAllocateInfoEXT>(pAllocateInfo->pNext); if (chained_prio_struct && (chained_prio_struct->priority < 0.0f || chained_prio_struct->priority > 1.0f)) { skip |= LogError(device, "VUID-VkMemoryPriorityAllocateInfoEXT-priority-02602", "priority (=%f) must be between `0` and `1`, inclusive.", chained_prio_struct->priority); } VkMemoryAllocateFlags flags = 0; auto flags_info = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (flags_info) { flags = flags_info->flags; } auto opaque_alloc_info = LvlFindInChain<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo->pNext); if (opaque_alloc_info && opaque_alloc_info->opaqueCaptureAddress != 0) { if (!(flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-opaqueCaptureAddress-03329", "If opaqueCaptureAddress is non-zero, VkMemoryAllocateFlagsInfo::flags must include " "VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT."); } #ifdef VK_USE_PLATFORM_WIN32_KHR auto import_memory_win32_handle = LvlFindInChain<VkImportMemoryWin32HandleInfoKHR>(pAllocateInfo->pNext); #endif auto import_memory_fd = LvlFindInChain<VkImportMemoryFdInfoKHR>(pAllocateInfo->pNext); auto import_memory_host_pointer = LvlFindInChain<VkImportMemoryHostPointerInfoEXT>(pAllocateInfo->pNext); #ifdef VK_USE_PLATFORM_ANDROID_KHR auto import_memory_ahb = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext); #endif if (import_memory_host_pointer) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-03332", "If the pNext chain includes a VkImportMemoryHostPointerInfoEXT structure, opaqueCaptureAddress must be zero."); } if ( #ifdef VK_USE_PLATFORM_WIN32_KHR (import_memory_win32_handle && import_memory_win32_handle->handleType) || #endif (import_memory_fd && import_memory_fd->handleType) || #ifdef VK_USE_PLATFORM_ANDROID_KHR (import_memory_ahb && import_memory_ahb->buffer) || #endif (import_memory_host_pointer && import_memory_host_pointer->handleType)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-opaqueCaptureAddress-03333", "If the parameters define an import operation, opaqueCaptureAddress must be zero."); } } if (flags) { VkBool32 capture_replay = false; VkBool32 buffer_device_address = false; const auto *vulkan_12_features = LvlFindInChain<VkPhysicalDeviceVulkan12Features>(device_createinfo_pnext); if (vulkan_12_features) { capture_replay = vulkan_12_features->bufferDeviceAddressCaptureReplay; buffer_device_address = vulkan_12_features->bufferDeviceAddress; } else { const auto *bda_features = LvlFindInChain<VkPhysicalDeviceBufferDeviceAddressFeatures>(device_createinfo_pnext); if (bda_features) { capture_replay = bda_features->bufferDeviceAddressCaptureReplay; buffer_device_address = bda_features->bufferDeviceAddress; } } if ((flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) && !capture_replay) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-flags-03330", "If VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT is set, " "bufferDeviceAddressCaptureReplay must be enabled."); } if ((flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) && !buffer_device_address) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-flags-03331", "If VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT is set, bufferDeviceAddress must be enabled."); } } } return skip; } bool StatelessValidation::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, VkAccelerationStructureNV object_handle, const char *func_name) const { bool skip = false; if (triangles.vertexFormat != VK_FORMAT_R32G32B32_SFLOAT && triangles.vertexFormat != VK_FORMAT_R16G16B16_SFLOAT && triangles.vertexFormat != VK_FORMAT_R16G16B16_SNORM && triangles.vertexFormat != VK_FORMAT_R32G32_SFLOAT && triangles.vertexFormat != VK_FORMAT_R16G16_SFLOAT && triangles.vertexFormat != VK_FORMAT_R16G16_SNORM) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-vertexFormat-02430", "%s", func_name); } else { uint32_t vertex_component_size = 0; if (triangles.vertexFormat == VK_FORMAT_R32G32B32_SFLOAT || triangles.vertexFormat == VK_FORMAT_R32G32_SFLOAT) { vertex_component_size = 4; } else if (triangles.vertexFormat == VK_FORMAT_R16G16B16_SFLOAT || triangles.vertexFormat == VK_FORMAT_R16G16B16_SNORM || triangles.vertexFormat == VK_FORMAT_R16G16_SFLOAT || triangles.vertexFormat == VK_FORMAT_R16G16_SNORM) { vertex_component_size = 2; } if (vertex_component_size > 0 && SafeModulo(triangles.vertexOffset, vertex_component_size) != 0) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-vertexOffset-02429", "%s", func_name); } } if (triangles.indexType != VK_INDEX_TYPE_UINT32 && triangles.indexType != VK_INDEX_TYPE_UINT16 && triangles.indexType != VK_INDEX_TYPE_NONE_NV) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-indexType-02433", "%s", func_name); } else { uint32_t index_element_size = 0; if (triangles.indexType == VK_INDEX_TYPE_UINT32) { index_element_size = 4; } else if (triangles.indexType == VK_INDEX_TYPE_UINT16) { index_element_size = 2; } if (index_element_size > 0 && SafeModulo(triangles.indexOffset, index_element_size) != 0) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-indexOffset-02432", "%s", func_name); } } if (triangles.indexType == VK_INDEX_TYPE_NONE_NV) { if (triangles.indexCount != 0) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-indexCount-02436", "%s", func_name); } if (triangles.indexData != VK_NULL_HANDLE) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-indexData-02434", "%s", func_name); } } if (SafeModulo(triangles.transformOffset, 16) != 0) { skip |= LogError(object_handle, "VUID-VkGeometryTrianglesNV-transformOffset-02438", "%s", func_name); } return skip; } bool StatelessValidation::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, VkAccelerationStructureNV object_handle, const char *func_name) const { bool skip = false; if (SafeModulo(aabbs.offset, 8) != 0) { skip |= LogError(object_handle, "VUID-VkGeometryAABBNV-offset-02440", "%s", func_name); } if (SafeModulo(aabbs.stride, 8) != 0) { skip |= LogError(object_handle, "VUID-VkGeometryAABBNV-stride-02441", "%s", func_name); } return skip; } bool StatelessValidation::ValidateGeometryNV(const VkGeometryNV &geometry, VkAccelerationStructureNV object_handle, const char *func_name) const { bool skip = false; if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) { skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, object_handle, func_name); } else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) { skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, object_handle, func_name); } return skip; } bool StatelessValidation::ValidateAccelerationStructureInfoNV(const VkAccelerationStructureInfoNV &info, VkAccelerationStructureNV object_handle, const char *func_name, bool is_cmd) const { bool skip = false; if (info.type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV && info.geometryCount != 0) { skip |= LogError(object_handle, "VUID-VkAccelerationStructureInfoNV-type-02425", "VkAccelerationStructureInfoNV: If type is VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV then " "geometryCount must be 0."); } if (info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV && info.instanceCount != 0) { skip |= LogError(object_handle, "VUID-VkAccelerationStructureInfoNV-type-02426", "VkAccelerationStructureInfoNV: If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV then " "instanceCount must be 0."); } if (info.flags & VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV && info.flags & VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV) { skip |= LogError(object_handle, "VUID-VkAccelerationStructureInfoNV-flags-02592", "VkAccelerationStructureInfoNV: If flags has the VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV" "bit set, then it must not have the VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV bit set."); } if (info.geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) { skip |= LogError(object_handle, is_cmd ? "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241" : "VUID-VkAccelerationStructureInfoNV-geometryCount-02422", "VkAccelerationStructureInfoNV: geometryCount must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount."); } if (info.instanceCount > phys_dev_ext_props.ray_tracing_propsNV.maxInstanceCount) { skip |= LogError(object_handle, "VUID-VkAccelerationStructureInfoNV-instanceCount-02423", "VkAccelerationStructureInfoNV: instanceCount must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxInstanceCount."); } if (info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV && info.geometryCount > 0) { uint64_t total_triangle_count = 0; for (uint32_t i = 0; i < info.geometryCount; i++) { const VkGeometryNV &geometry = info.pGeometries[i]; skip |= ValidateGeometryNV(geometry, object_handle, func_name); if (geometry.geometryType != VK_GEOMETRY_TYPE_TRIANGLES_NV) { continue; } total_triangle_count += geometry.geometry.triangles.indexCount / 3; } if (total_triangle_count > phys_dev_ext_props.ray_tracing_propsNV.maxTriangleCount) { skip |= LogError(object_handle, "VUID-VkAccelerationStructureInfoNV-maxTriangleCount-02424", "VkAccelerationStructureInfoNV: The total number of triangles in all geometries must be less than " "or equal to VkPhysicalDeviceRayTracingPropertiesNV::maxTriangleCount."); } } if (info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV && info.geometryCount > 1) { const VkGeometryTypeNV first_geometry_type = info.pGeometries[0].geometryType; for (uint32_t i = 1; i < info.geometryCount; i++) { const VkGeometryNV &geometry = info.pGeometries[i]; if (geometry.geometryType != first_geometry_type) { skip |= LogError(device, "VUID-VkAccelerationStructureInfoNV-type-02786", "VkAccelerationStructureInfoNV: info.pGeometries[%d].geometryType does not match " "info.pGeometries[0].geometryType.", i); } } } for (uint32_t geometry_index = 0; geometry_index < info.geometryCount; ++geometry_index) { if (!(info.pGeometries[geometry_index].geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV || info.pGeometries[geometry_index].geometryType == VK_GEOMETRY_TYPE_AABBS_NV)) { skip |= LogError(device, "VUID-VkGeometryNV-geometryType-03503", "VkGeometryNV: geometryType must be VK_GEOMETRY_TYPE_TRIANGLES_NV" "or VK_GEOMETRY_TYPE_AABBS_NV."); } } skip |= validate_flags(func_name, "info.flags", "VkBuildAccelerationStructureFlagBitsNV", AllVkBuildAccelerationStructureFlagBitsNV, info.flags, kOptionalFlags, "VUID-VkAccelerationStructureInfoNV-flags-parameter"); return skip; } bool StatelessValidation::manual_PreCallValidateCreateAccelerationStructureNV( VkDevice device, const VkAccelerationStructureCreateInfoNV *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureNV *pAccelerationStructure) const { bool skip = false; if (pCreateInfo) { if ((pCreateInfo->compactedSize != 0) && ((pCreateInfo->info.geometryCount != 0) || (pCreateInfo->info.instanceCount != 0))) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoNV-compactedSize-02421", "vkCreateAccelerationStructureNV(): pCreateInfo->compactedSize nonzero (%" PRIu64 ") with info.geometryCount (%" PRIu32 ") or info.instanceCount (%" PRIu32 ") nonzero.", pCreateInfo->compactedSize, pCreateInfo->info.geometryCount, pCreateInfo->info.instanceCount); } skip |= ValidateAccelerationStructureInfoNV(pCreateInfo->info, VkAccelerationStructureNV(0), "vkCreateAccelerationStructureNV()", false); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) const { bool skip = false; if (pInfo != nullptr) { skip |= ValidateAccelerationStructureInfoNV(*pInfo, dst, "vkCmdBuildAccelerationStructureNV()", true); } return skip; } bool StatelessValidation::manual_PreCallValidateCreateAccelerationStructureKHR( VkDevice device, const VkAccelerationStructureCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureKHR *pAccelerationStructure) const { bool skip = false; const auto *acceleration_structure_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!acceleration_structure_features || (acceleration_structure_features && acceleration_structure_features->accelerationStructure == VK_FALSE)) { skip |= LogError(device, "VUID-vkCreateAccelerationStructureKHR-accelerationStructure-03611", "vkCreateAccelerationStructureKHR(): The accelerationStructure feature must be enabled"); } if (pCreateInfo) { if (pCreateInfo->createFlags & VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR && (!acceleration_structure_features || (acceleration_structure_features && acceleration_structure_features->accelerationStructureCaptureReplay == VK_FALSE))) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-createFlags-03613", "vkCreateAccelerationStructureKHR(): If createFlags includes " "VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR, " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureCaptureReplay must be VK_TRUE"); } if (pCreateInfo->deviceAddress && !(pCreateInfo->createFlags & VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-deviceAddress-03612", "vkCreateAccelerationStructureKHR(): If deviceAddress is not zero, createFlags must include " "VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR"); } if (SafeModulo(pCreateInfo->offset, 256) != 0) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03734", "vkCreateAccelerationStructureKHR(): offset must be a multiple of 256 bytes", pCreateInfo->offset); } } return skip; } bool StatelessValidation::manual_PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void *pData) const { bool skip = false; if (dataSize < 8) { skip = LogError(accelerationStructure, "VUID-vkGetAccelerationStructureHandleNV-dataSize-02240", "vkGetAccelerationStructureHandleNV(): dataSize must be greater than or equal to 8."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; if (queryType != VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) { skip |= LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryType-03432", "vkCmdWriteAccelerationStructuresPropertiesNV: queryType must be " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV."); } return skip; } bool StatelessValidation::manual_PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) const { bool skip = false; for (uint32_t i = 0; i < createInfoCount; i++) { auto feedback_struct = LvlFindInChain<VkPipelineCreationFeedbackCreateInfoEXT>(pCreateInfos[i].pNext); if ((feedback_struct != nullptr) && (feedback_struct->pipelineStageCreationFeedbackCount != pCreateInfos[i].stageCount)) { skip |= LogError(device, "VUID-VkPipelineCreationFeedbackCreateInfoEXT-pipelineStageCreationFeedbackCount-02969", "vkCreateRayTracingPipelinesNV(): in pCreateInfo[%" PRIu32 "], VkPipelineCreationFeedbackEXT::pipelineStageCreationFeedbackCount" "(=%" PRIu32 ") must equal VkRayTracingPipelineCreateInfoNV::stageCount(=%" PRIu32 ").", i, feedback_struct->pipelineStageCreationFeedbackCount, pCreateInfos[i].stageCount); } const auto *pipeline_cache_contol_features = LvlFindInChain<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT>(device_createinfo_pnext); if (!pipeline_cache_contol_features || pipeline_cache_contol_features->pipelineCreationCacheControl == VK_FALSE) { if (pCreateInfos[i].flags & (VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905", "vkCreateRayTracingPipelinesNV(): If the pipelineCreationCacheControl feature is not enabled," "flags must not include VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or" "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT."); } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-02904", "vkCreateRayTracingPipelinesNV(): flags must not include VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV."); } if ((pCreateInfos[i].flags & VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV) && (pCreateInfos[i].flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-02957", "vkCreateRayTracingPipelinesNV(): flags must not include both VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV and" "VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT at the same time."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { if (pCreateInfos[i].basePipelineIndex != -1) { if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03423", "vkCreateRayTracingPipelinesNV parameter, pCreateInfos->basePipelineHandle, must be " "VK_NULL_HANDLE if pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag " "and pCreateInfos->basePipelineIndex is not -1."); } if (pCreateInfos[i].basePipelineIndex > static_cast<int32_t>(i)) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesNV-flags-03415", "vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the" "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag, and the basePipelineIndex member of that same element" "is not -1, basePipelineIndex must be less than the index into pCreateInfos that corresponds to " "that element."); } } if (pCreateInfos[i].basePipelineHandle == VK_NULL_HANDLE) { if (static_cast<uint32_t>(pCreateInfos[i].basePipelineIndex) >= createInfoCount) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03422", "vkCreateRayTracingPipelinesNV if flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT and" "basePipelineHandle is VK_NULL_HANDLE, basePipelineIndex must be a valid index into the calling" "commands pCreateInfos parameter."); } } else { if (pCreateInfos[i].basePipelineIndex != -1) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03424", "vkCreateRayTracingPipelinesNV if flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT and" "basePipelineHandle is not VK_NULL_HANDLE, basePipelineIndex must be -1."); } } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03456", "vkCreateRayTracingPipelinesNV: flags must not include VK_PIPELINE_CREATE_LIBRARY_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03458", "vkCreateRayTracingPipelinesNV: flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03459", "vkCreateRayTracingPipelinesNV: flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03460", "vkCreateRayTracingPipelinesNV: flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03461", "vkCreateRayTracingPipelinesNV: flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03462", "vkCreateRayTracingPipelinesNV: flags must not include VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03463", "vkCreateRayTracingPipelinesNV: flags must not include VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR ."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03588", "vkCreateRayTracingPipelinesNV: flags must not include " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_DISPATCH_BASE) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesNV-flags-03816", "vkCreateRayTracingPipelinesNV: flags must not contain the VK_PIPELINE_CREATE_DISPATCH_BASE flag."); } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateRayTracingPipelinesKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) const { bool skip = false; const auto *raytracing_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(device_createinfo_pnext); if (!raytracing_features || raytracing_features->rayTracingPipeline == VK_FALSE) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesKHR-rayTracingPipeline-03586", "vkCreateRayTracingPipelinesKHR: The rayTracingPipeline feature must be enabled."); } for (uint32_t i = 0; i < createInfoCount; i++) { if (!raytracing_features || (raytracing_features && raytracing_features->rayTraversalPrimitiveCulling == VK_FALSE)) { if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-rayTraversalPrimitiveCulling-03596", "vkCreateRayTracingPipelinesKHR: If the rayTraversalPrimitiveCulling feature is not enabled, " "flags must not include VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-rayTraversalPrimitiveCulling-03597", "vkCreateRayTracingPipelinesKHR: If the rayTraversalPrimitiveCulling feature is not enabled, " "flags must not include VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR."); } } auto feedback_struct = LvlFindInChain<VkPipelineCreationFeedbackCreateInfoEXT>(pCreateInfos[i].pNext); if ((feedback_struct != nullptr) && (feedback_struct->pipelineStageCreationFeedbackCount != pCreateInfos[i].stageCount)) { skip |= LogError(device, "VUID-VkPipelineCreationFeedbackCreateInfoEXT-pipelineStageCreationFeedbackCount-02670", "vkCreateRayTracingPipelinesKHR: in pCreateInfo[%" PRIu32 "], When chained to VkRayTracingPipelineCreateInfoKHR, " "VkPipelineCreationFeedbackEXT::pipelineStageCreationFeedbackCount" "(=%" PRIu32 ") must equal VkRayTracingPipelineCreateInfoKHR::stageCount(=%" PRIu32 ").", i, feedback_struct->pipelineStageCreationFeedbackCount, pCreateInfos[i].stageCount); } const auto *pipeline_cache_contol_features = LvlFindInChain<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT>(device_createinfo_pnext); if (!pipeline_cache_contol_features || pipeline_cache_contol_features->pipelineCreationCacheControl == VK_FALSE) { if (pCreateInfos[i].flags & (VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905", "vkCreateRayTracingPipelinesKHR: If the pipelineCreationCacheControl feature is not enabled," "flags must not include VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or" "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT."); } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-02904", "vkCreateRayTracingPipelinesKHR: flags must not include VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV."); } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { if (pCreateInfos[i].pLibraryInterface == NULL) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03465", "vkCreateRayTracingPipelinesKHR: If flags includes VK_PIPELINE_CREATE_LIBRARY_BIT_KHR, " "pLibraryInterface must not be NULL."); } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_DISPATCH_BASE) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03816", "vkCreateRayTracingPipelinesKHR: flags must not contain the VK_PIPELINE_CREATE_DISPATCH_BASE flag."); } for (uint32_t group_index = 0; group_index < pCreateInfos[i].groupCount; ++group_index) { if ((pCreateInfos[i].pGroups[group_index].type == VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR) || (pCreateInfos[i].pGroups[group_index].type == VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR)) { if ((pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR) && (pCreateInfos[i].pGroups[group_index].anyHitShader == VK_SHADER_UNUSED_KHR)) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03470", "vkCreateRayTracingPipelinesKHR: If flags includes " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR," "for any element of pGroups with a type of VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR" "or VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, the anyHitShader of that element " "must not be VK_SHADER_UNUSED_KHR"); } if ((pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR) && (pCreateInfos[i].pGroups[group_index].closestHitShader == VK_SHADER_UNUSED_KHR)) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03471", "vkCreateRayTracingPipelinesKHR: If flags includes " "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR," "for any element of pGroups with a type of VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR" "or VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, the closestHitShader of that " "element must not be VK_SHADER_UNUSED_KHR"); } } if (raytracing_features && raytracing_features->rayTracingPipelineShaderGroupHandleCaptureReplay == VK_TRUE && pCreateInfos[i].pGroups[group_index].pShaderGroupCaptureReplayHandle) { if (!(pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError( device, "VUID-VkRayTracingPipelineCreateInfoKHR-rayTracingPipelineShaderGroupHandleCaptureReplay-03599", "vkCreateRayTracingPipelinesKHR: If " "VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipelineShaderGroupHandleCaptureReplay is " "VK_TRUE and the pShaderGroupCaptureReplayHandle member of any element of pGroups is not NULL, flags must " "include VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR."); } } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { if (pCreateInfos[i].basePipelineIndex != -1) { if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03423", "vkCreateRayTracingPipelinesKHR: parameter, pCreateInfos->basePipelineHandle, must be " "VK_NULL_HANDLE if pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag " "and pCreateInfos->basePipelineIndex is not -1."); } if (pCreateInfos[i].basePipelineIndex > static_cast<int32_t>(i)) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03415", "vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the" "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag, and the basePipelineIndex member of that same element is" "not -1, basePipelineIndex must be less than the index into pCreateInfos that corresponds to that " "element."); } } if (pCreateInfos[i].basePipelineHandle == VK_NULL_HANDLE) { if (static_cast<uint32_t>(pCreateInfos[i].basePipelineIndex) >= createInfoCount) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03422", "vkCreateRayTracingPipelinesKHR: if flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT and" "basePipelineHandle is VK_NULL_HANDLE, basePipelineIndex (%d) must be a valid into the calling" "commands pCreateInfos parameter %d.", pCreateInfos[i].basePipelineIndex, createInfoCount); } } else { if (pCreateInfos[i].basePipelineIndex != -1) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03424", "vkCreateRayTracingPipelinesKHR: if flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT and" "basePipelineHandle is not VK_NULL_HANDLE, basePipelineIndex must be -1."); } } } if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR && (raytracing_features && raytracing_features->rayTracingPipelineShaderGroupHandleCaptureReplay == VK_FALSE)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03598", "vkCreateRayTracingPipelinesKHR: If flags includes " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, " "rayTracingPipelineShaderGroupHandleCaptureReplay must be enabled."); } bool library_enabled = IsExtEnabled(device_extensions.vk_khr_pipeline_library); if (!library_enabled && (pCreateInfos[i].pLibraryInfo || pCreateInfos[i].pLibraryInterface)) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03595", "vkCreateRayTracingPipelinesKHR: If the VK_KHR_pipeline_library extension is not enabled, " "pLibraryInfo and pLibraryInterface must be NULL."); } if (pCreateInfos[i].pLibraryInfo) { if (pCreateInfos[i].pLibraryInfo->libraryCount == 0) { if (pCreateInfos[i].stageCount == 0) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03600", "vkCreateRayTracingPipelinesKHR: If pLibraryInfo is not NULL and its libraryCount is 0, " "stageCount must not be 0."); } if (pCreateInfos[i].groupCount == 0) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03601", "vkCreateRayTracingPipelinesKHR: If pLibraryInfo is not NULL and its libraryCount is 0, " "groupCount must not be 0."); } } else { if (pCreateInfos[i].pLibraryInterface == NULL) { skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03590", "vkCreateRayTracingPipelinesKHR: If pLibraryInfo is not NULL and its libraryCount member " "is greater than 0, its " "pLibraryInterface member must not be NULL."); } } } if (pCreateInfos[i].pLibraryInterface) { if (pCreateInfos[i].pLibraryInterface->maxPipelineRayHitAttributeSize > phys_dev_ext_props.ray_tracing_propsKHR.maxRayHitAttributeSize) { skip |= LogError(device, "VUID-VkRayTracingPipelineInterfaceCreateInfoKHR-maxPipelineRayHitAttributeSize-03605", "vkCreateRayTracingPipelinesKHR: maxPipelineRayHitAttributeSize must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxRayHitAttributeSize."); } } if (deferredOperation != VK_NULL_HANDLE) { if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT) { skip |= LogError(device, "VUID-vkCreateRayTracingPipelinesKHR-deferredOperation-03587", "vkCreateRayTracingPipelinesKHR: If deferredOperation is not VK_NULL_HANDLE, the flags member of " "elements of pCreateInfos must not include VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT."); } } } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool StatelessValidation::PreCallValidateGetDeviceGroupSurfacePresentModes2EXT(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR *pModes) const { bool skip = false; if (!device_extensions.vk_khr_swapchain) skip |= OutputExtensionError("vkGetDeviceGroupSurfacePresentModes2EXT", VK_KHR_SWAPCHAIN_EXTENSION_NAME); if (!device_extensions.vk_khr_get_surface_capabilities_2) skip |= OutputExtensionError("vkGetDeviceGroupSurfacePresentModes2EXT", VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME); if (!device_extensions.vk_khr_surface) skip |= OutputExtensionError("vkGetDeviceGroupSurfacePresentModes2EXT", VK_KHR_SURFACE_EXTENSION_NAME); if (!device_extensions.vk_khr_get_physical_device_properties_2) skip |= OutputExtensionError("vkGetDeviceGroupSurfacePresentModes2EXT", VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (!device_extensions.vk_ext_full_screen_exclusive) skip |= OutputExtensionError("vkGetDeviceGroupSurfacePresentModes2EXT", VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME); skip |= validate_struct_type( "vkGetDeviceGroupSurfacePresentModes2EXT", "pSurfaceInfo", "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR", pSurfaceInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR, true, "VUID-vkGetDeviceGroupSurfacePresentModes2EXT-pSurfaceInfo-parameter", "VUID-VkPhysicalDeviceSurfaceInfo2KHR-sType-sType"); if (pSurfaceInfo != NULL) { const VkStructureType allowed_structs_VkPhysicalDeviceSurfaceInfo2KHR[] = { VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT, VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT}; skip |= validate_struct_pnext("vkGetDeviceGroupSurfacePresentModes2EXT", "pSurfaceInfo->pNext", "VkSurfaceFullScreenExclusiveInfoEXT, VkSurfaceFullScreenExclusiveWin32InfoEXT", pSurfaceInfo->pNext, ARRAY_SIZE(allowed_structs_VkPhysicalDeviceSurfaceInfo2KHR), allowed_structs_VkPhysicalDeviceSurfaceInfo2KHR, GeneratedVulkanHeaderVersion, "VUID-VkPhysicalDeviceSurfaceInfo2KHR-pNext-pNext", "VUID-VkPhysicalDeviceSurfaceInfo2KHR-sType-unique"); skip |= validate_required_handle("vkGetDeviceGroupSurfacePresentModes2EXT", "pSurfaceInfo->surface", pSurfaceInfo->surface); } return skip; } #endif bool StatelessValidation::manual_PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const { // Validation for pAttachments which is excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml bool skip = false; if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { skip |= validate_array("vkCreateFramebuffer", "attachmentCount", "pAttachments", pCreateInfo->attachmentCount, &pCreateInfo->pAttachments, false, true, kVUIDUndefined, kVUIDUndefined); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const { bool skip = false; if (lineStippleFactor < 1 || lineStippleFactor > 256) { skip |= LogError(commandBuffer, "VUID-vkCmdSetLineStippleEXT-lineStippleFactor-02776", "vkCmdSetLineStippleEXT::lineStippleFactor=%d is not in [1,256].", lineStippleFactor); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const { bool skip = false; if (indexType == VK_INDEX_TYPE_NONE_NV) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-indexType-02507", "vkCmdBindIndexBuffer() indexType must not be VK_INDEX_TYPE_NONE_NV."); } const auto *index_type_uint8_features = LvlFindInChain<VkPhysicalDeviceIndexTypeUint8FeaturesEXT>(device_createinfo_pnext); if (indexType == VK_INDEX_TYPE_UINT8_EXT && (!index_type_uint8_features || !index_type_uint8_features->indexTypeUint8)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-indexType-02765", "vkCmdBindIndexBuffer() indexType is VK_INDEX_TYPE_UINT8_EXT but indexTypeUint8 feature is not enabled."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const { bool skip = false; if (firstBinding > device_limits.maxVertexInputBindings) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers-firstBinding-00624", "vkCmdBindVertexBuffers() firstBinding (%u) must be less than maxVertexInputBindings (%u)", firstBinding, device_limits.maxVertexInputBindings); } else if ((firstBinding + bindingCount) > device_limits.maxVertexInputBindings) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers-firstBinding-00625", "vkCmdBindVertexBuffers() sum of firstBinding (%u) and bindingCount (%u) must be less than " "maxVertexInputBindings (%u)", firstBinding, bindingCount, device_limits.maxVertexInputBindings); } for (uint32_t i = 0; i < bindingCount; ++i) { if (pBuffers[i] == VK_NULL_HANDLE) { const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(device_createinfo_pnext); if (!(robustness2_features && robustness2_features->nullDescriptor)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers-pBuffers-04001", "vkCmdBindVertexBuffers() required parameter pBuffers[%d] specified as VK_NULL_HANDLE", i); } else { if (pOffsets[i] != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers-pBuffers-04002", "vkCmdBindVertexBuffers() pBuffers[%d] is VK_NULL_HANDLE, but pOffsets[%d] is not 0", i, i); } } } } return skip; } bool StatelessValidation::manual_PreCallValidateSetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) const { bool skip = false; if (pNameInfo->objectType == VK_OBJECT_TYPE_UNKNOWN) { skip |= LogError(device, "VUID-VkDebugUtilsObjectNameInfoEXT-objectType-02589", "vkSetDebugUtilsObjectNameEXT() pNameInfo->objectType cannot be VK_OBJECT_TYPE_UNKNOWN."); } return skip; } bool StatelessValidation::manual_PreCallValidateSetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) const { bool skip = false; if (pTagInfo->objectType == VK_OBJECT_TYPE_UNKNOWN) { skip |= LogError(device, "VUID-VkDebugUtilsObjectTagInfoEXT-objectType-01908", "vkSetDebugUtilsObjectTagEXT() pTagInfo->objectType cannot be VK_OBJECT_TYPE_UNKNOWN."); } return skip; } bool StatelessValidation::manual_PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const { bool skip = false; if (semaphore == VK_NULL_HANDLE && fence == VK_NULL_HANDLE) { skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-semaphore-01780", "vkAcquireNextImageKHR: semaphore and fence are both VK_NULL_HANDLE."); } return skip; } bool StatelessValidation::manual_PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) const { bool skip = false; if (pAcquireInfo->semaphore == VK_NULL_HANDLE && pAcquireInfo->fence == VK_NULL_HANDLE) { skip |= LogError(pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-semaphore-01782", "vkAcquireNextImage2KHR: pAcquireInfo->semaphore and pAcquireInfo->fence are both VK_NULL_HANDLE."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const { bool skip = false; char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT"; for (uint32_t i = 0; i < bindingCount; ++i) { if (pOffsets[i] & 3) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02359", "%s: pOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ") is not a multiple of 4.", cmd_name, i, pOffsets[i]); } } if (firstBinding >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-firstBinding-02356", "%s: The firstBinding(%" PRIu32 ") index is greater than or equal to " "VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstBinding, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } if (firstBinding + bindingCount > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-firstBinding-02357", "%s: The sum of firstBinding(%" PRIu32 ") and bindCount(%" PRIu32 ") is greater than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstBinding, bindingCount, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } for (uint32_t i = 0; i < bindingCount; ++i) { // pSizes is optional and may be nullptr. if (pSizes != nullptr) { if (pSizes[i] != VK_WHOLE_SIZE && pSizes[i] > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferSize) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSize-02361", "%s: pSizes[%" PRIu32 "] (0x%" PRIxLEAST64 ") is not VK_WHOLE_SIZE and is greater than " "VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBufferSize.", cmd_name, i, pSizes[i]); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdBeginTransformFeedbackEXT"; if (firstCounterBuffer >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-firstCounterBuffer-02368", "%s: The firstCounterBuffer(%" PRIu32 ") index is greater than or equal to " "VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstCounterBuffer, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } if (firstCounterBuffer + counterBufferCount > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-firstCounterBuffer-02369", "%s: The sum of firstCounterBuffer(%" PRIu32 ") and counterBufferCount(%" PRIu32 ") is greater than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstCounterBuffer, counterBufferCount, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdEndTransformFeedbackEXT"; if (firstCounterBuffer >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-firstCounterBuffer-02376", "%s: The firstCounterBuffer(%" PRIu32 ") index is greater than or equal to " "VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstCounterBuffer, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } if (firstCounterBuffer + counterBufferCount > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-firstCounterBuffer-02377", "%s: The sum of firstCounterBuffer(%" PRIu32 ") and counterBufferCount(%" PRIu32 ") is greater than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBuffers(%" PRIu32 ").", cmd_name, firstCounterBuffer, counterBufferCount, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBuffers); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride) const { bool skip = false; if ((vertexStride <= 0) || (vertexStride > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataStride)) { skip |= LogError( counterBuffer, "VUID-vkCmdDrawIndirectByteCountEXT-vertexStride-02289", "vkCmdDrawIndirectByteCountEXT: vertexStride (%d) must be between 0 and maxTransformFeedbackBufferDataStride (%d).", vertexStride, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataStride); } if ((counterOffset % 4) != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdDrawIndirectByteCountEXT-counterBufferOffset-04568", "vkCmdDrawIndirectByteCountEXT(): offset (%" PRIu64 ") must be a multiple of 4.", counterOffset); } return skip; } bool StatelessValidation::ValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion, const char *apiName) const { bool skip = false; // Check samplerYcbcrConversion feature is set const auto *ycbcr_features = LvlFindInChain<VkPhysicalDeviceSamplerYcbcrConversionFeatures>(device_createinfo_pnext); if ((ycbcr_features == nullptr) || (ycbcr_features->samplerYcbcrConversion == VK_FALSE)) { const auto *vulkan_11_features = LvlFindInChain<VkPhysicalDeviceVulkan11Features>(device_createinfo_pnext); if ((vulkan_11_features == nullptr) || (vulkan_11_features->samplerYcbcrConversion == VK_FALSE)) { skip |= LogError(device, "VUID-vkCreateSamplerYcbcrConversion-None-01648", "%s: samplerYcbcrConversion must be enabled.", apiName); } } #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *external_format_android = LvlFindInChain<VkExternalFormatANDROID>(pCreateInfo); const bool is_external_format = external_format_android != nullptr && external_format_android->externalFormat != 0; #else const bool is_external_format = false; #endif const VkFormat format = pCreateInfo->format; // If there is a VkExternalFormatANDROID with externalFormat != 0, the value of components is ignored. if (!is_external_format) { const VkComponentMapping components = pCreateInfo->components; // XChroma Subsampled is same as "the format has a _422 or _420 suffix" from spec if (FormatIsXChromaSubsampled(format) == true) { if ((components.g != VK_COMPONENT_SWIZZLE_G) && (components.g != VK_COMPONENT_SWIZZLE_IDENTITY)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-components-02581", "%s: When using a XChroma subsampled format (%s) the components.g needs to be VK_COMPONENT_SWIZZLE_G " "or VK_COMPONENT_SWIZZLE_IDENTITY, but is %s.", apiName, string_VkFormat(format), string_VkComponentSwizzle(components.g)); } if ((components.a != VK_COMPONENT_SWIZZLE_A) && (components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (components.a != VK_COMPONENT_SWIZZLE_ONE) && (components.a != VK_COMPONENT_SWIZZLE_ZERO)) { skip |= LogError( device, "VUID-VkSamplerYcbcrConversionCreateInfo-components-02582", "%s: When using a XChroma subsampled format (%s) the components.a needs to be VK_COMPONENT_SWIZZLE_A or " "VK_COMPONENT_SWIZZLE_IDENTITY or VK_COMPONENT_SWIZZLE_ONE or VK_COMPONENT_SWIZZLE_ZERO, but is %s.", apiName, string_VkFormat(format), string_VkComponentSwizzle(components.a)); } if ((components.r != VK_COMPONENT_SWIZZLE_R) && (components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (components.r != VK_COMPONENT_SWIZZLE_B)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-components-02583", "%s: When using a XChroma subsampled format (%s) the components.r needs to be VK_COMPONENT_SWIZZLE_R " "or VK_COMPONENT_SWIZZLE_IDENTITY or VK_COMPONENT_SWIZZLE_B, but is %s.", apiName, string_VkFormat(format), string_VkComponentSwizzle(components.r)); } if ((components.b != VK_COMPONENT_SWIZZLE_B) && (components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (components.b != VK_COMPONENT_SWIZZLE_R)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-components-02584", "%s: When using a XChroma subsampled format (%s) the components.b needs to be VK_COMPONENT_SWIZZLE_B " "or VK_COMPONENT_SWIZZLE_IDENTITY or VK_COMPONENT_SWIZZLE_R, but is %s.", apiName, string_VkFormat(format), string_VkComponentSwizzle(components.b)); } // If one is identity, both need to be const bool r_identity = ((components.r == VK_COMPONENT_SWIZZLE_R) || (components.r == VK_COMPONENT_SWIZZLE_IDENTITY)); const bool b_identity = ((components.b == VK_COMPONENT_SWIZZLE_B) || (components.b == VK_COMPONENT_SWIZZLE_IDENTITY)); if ((r_identity != b_identity) && ((r_identity == true) || (b_identity == true))) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-components-02585", "%s: When using a XChroma subsampled format (%s) if either the components.r (%s) or components.b (%s) " "are an identity swizzle, then both need to be an identity swizzle.", apiName, string_VkFormat(format), string_VkComponentSwizzle(components.r), string_VkComponentSwizzle(components.b)); } } if (pCreateInfo->ycbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) { // Checks same VU multiple ways in order to give a more useful error message const char *vuid = "VUID-VkSamplerYcbcrConversionCreateInfo-ycbcrModel-01655"; if ((components.r == VK_COMPONENT_SWIZZLE_ONE) || (components.r == VK_COMPONENT_SWIZZLE_ZERO) || (components.g == VK_COMPONENT_SWIZZLE_ONE) || (components.g == VK_COMPONENT_SWIZZLE_ZERO) || (components.b == VK_COMPONENT_SWIZZLE_ONE) || (components.b == VK_COMPONENT_SWIZZLE_ZERO)) { skip |= LogError( device, vuid, "%s: The ycbcrModel is not VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY so components.r (%s), " "components.g (%s), nor components.b (%s) can't be VK_COMPONENT_SWIZZLE_ZERO or VK_COMPONENT_SWIZZLE_ONE.", apiName, string_VkComponentSwizzle(components.r), string_VkComponentSwizzle(components.g), string_VkComponentSwizzle(components.b)); } // "must not correspond to a channel which contains zero or one as a consequence of conversion to RGBA" // 4 channel format = no issue // 3 = no [a] // 2 = no [b,a] // 1 = no [g,b,a] // depth/stencil = no [g,b,a] (shouldn't ever occur, but no VU preventing it) const uint32_t channels = (FormatIsDepthOrStencil(format) == true) ? 1 : FormatChannelCount(format); if ((channels < 4) && ((components.r == VK_COMPONENT_SWIZZLE_A) || (components.g == VK_COMPONENT_SWIZZLE_A) || (components.b == VK_COMPONENT_SWIZZLE_A))) { skip |= LogError(device, vuid, "%s: The ycbcrModel is not VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY so components.r (%s), " "components.g (%s), or components.b (%s) can't be VK_COMPONENT_SWIZZLE_A.", apiName, string_VkComponentSwizzle(components.r), string_VkComponentSwizzle(components.g), string_VkComponentSwizzle(components.b)); } else if ((channels < 3) && ((components.r == VK_COMPONENT_SWIZZLE_B) || (components.g == VK_COMPONENT_SWIZZLE_B) || (components.b == VK_COMPONENT_SWIZZLE_B) || (components.b == VK_COMPONENT_SWIZZLE_IDENTITY))) { skip |= LogError(device, vuid, "%s: The ycbcrModel is not VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY so components.r (%s), " "components.g (%s), or components.b (%s) can't be VK_COMPONENT_SWIZZLE_B " "(components.b also can't be VK_COMPONENT_SWIZZLE_IDENTITY).", apiName, string_VkComponentSwizzle(components.r), string_VkComponentSwizzle(components.g), string_VkComponentSwizzle(components.b)); } else if ((channels < 2) && ((components.r == VK_COMPONENT_SWIZZLE_G) || (components.g == VK_COMPONENT_SWIZZLE_G) || (components.g == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.b == VK_COMPONENT_SWIZZLE_G))) { skip |= LogError(device, vuid, "%s: The ycbcrModel is not VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY so components.r (%s), " "components.g (%s), or components.b (%s) can't be VK_COMPONENT_SWIZZLE_G " "(components.g also can't be VK_COMPONENT_SWIZZLE_IDENTITY).", apiName, string_VkComponentSwizzle(components.r), string_VkComponentSwizzle(components.g), string_VkComponentSwizzle(components.b)); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion, "vkCreateSamplerYcbcrConversion"); } bool StatelessValidation::manual_PreCallValidateCreateSamplerYcbcrConversionKHR( VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion, "vkCreateSamplerYcbcrConversionKHR"); } bool StatelessValidation::manual_PreCallValidateImportSemaphoreFdKHR( VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const { bool skip = false; VkExternalSemaphoreHandleTypeFlags supported_handle_types = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT; if (0 == (pImportSemaphoreFdInfo->handleType & supported_handle_types)) { skip |= LogError(device, "VUID-VkImportSemaphoreFdInfoKHR-handleType-01143", "vkImportSemaphoreFdKHR() to semaphore %s handleType %s is not one of the supported handleTypes (%s).", report_data->FormatHandle(pImportSemaphoreFdInfo->semaphore).c_str(), string_VkExternalSemaphoreHandleTypeFlagBits(pImportSemaphoreFdInfo->handleType), string_VkExternalSemaphoreHandleTypeFlags(supported_handle_types).c_str()); } return skip; } bool StatelessValidation::manual_PreCallValidateCopyAccelerationStructureToMemoryKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const { bool skip = false; if (pInfo->mode != VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR) { skip |= LogError(device, "VUID-VkCopyAccelerationStructureToMemoryInfoKHR-mode-03412", "vkCopyAccelerationStructureToMemoryKHR: mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR."); } const auto *acc_struct_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!acc_struct_features || acc_struct_features->accelerationStructureHostCommands == VK_FALSE) { skip |= LogError( device, "VUID-vkCopyAccelerationStructureToMemoryKHR-accelerationStructureHostCommands-03584", "vkCopyAccelerationStructureToMemoryKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands feature must be enabled."); } skip |= validate_required_pointer("vkCopyAccelerationStructureToMemoryKHR", "pInfo->dst.hostAddress", pInfo->dst.hostAddress, "VUID-vkCopyAccelerationStructureToMemoryKHR-pInfo-03732"); if (SafeModulo((VkDeviceSize)pInfo->dst.hostAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCopyAccelerationStructureToMemoryKHR-pInfo-03751", "vkCopyAccelerationStructureToMemoryKHR(): pInfo->dst.hostAddress must be aligned to 16 bytes."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const { bool skip = false; if (pInfo->mode != VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR) { skip |= // to update VUID to VkCmdCopyAccelerationStructureToMemoryInfoKHR after spec update LogError(commandBuffer, "VUID-VkCopyAccelerationStructureToMemoryInfoKHR-mode-03412", "vkCmdCopyAccelerationStructureToMemoryKHR: mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR."); } if (SafeModulo(pInfo->dst.deviceAddress, 256) != 0) { skip |= LogError(device, "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-pInfo-03740", "vkCmdCopyAccelerationStructureToMemoryKHR(): pInfo->dst.deviceAddress must be aligned to 256 bytes.", pInfo->dst.deviceAddress); } return skip; } bool StatelessValidation::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo, const char *api_name) const { bool skip = false; if (!(pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR || pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) { skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-mode-03410", "(%s): mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR" "or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR.", api_name); } return skip; } bool StatelessValidation::manual_PreCallValidateCopyAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR()"); const auto *acc_struct_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!acc_struct_features || acc_struct_features->accelerationStructureHostCommands == VK_FALSE) { skip |= LogError( device, "VUID-vkCopyAccelerationStructureKHR-accelerationStructureHostCommands-03582", "vkCopyAccelerationStructureKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands feature must be enabled."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdCopyAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR()"); return skip; } bool StatelessValidation::ValidateCopyMemoryToAccelerationStructureInfoKHR(const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo, const char *api_name, bool is_cmd) const { bool skip = false; if (pInfo->mode != VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR) { skip |= LogError(device, "VUID-VkCopyMemoryToAccelerationStructureInfoKHR-mode-03413", "(%s): mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR.", api_name); } return skip; } bool StatelessValidation::manual_PreCallValidateCopyMemoryToAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyMemoryToAccelerationStructureInfoKHR(pInfo, "vkCopyMemoryToAccelerationStructureKHR()", true); const auto *acc_struct_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!acc_struct_features || acc_struct_features->accelerationStructureHostCommands == VK_FALSE) { skip |= LogError( device, "VUID-vkCopyMemoryToAccelerationStructureKHR-accelerationStructureHostCommands-03583", "vkCopyMemoryToAccelerationStructureKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands feature must be enabled."); } skip |= validate_required_pointer("vkCopyMemoryToAccelerationStructureKHR", "pInfo->src.hostAddress", pInfo->src.hostAddress, "VUID-vkCopyMemoryToAccelerationStructureKHR-pInfo-03729"); return skip; } bool StatelessValidation::manual_PreCallValidateCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyMemoryToAccelerationStructureInfoKHR(pInfo, "vkCmdCopyMemoryToAccelerationStructureKHR()", false); if (SafeModulo(pInfo->src.deviceAddress, 256) != 0) { skip |= LogError(device, "VUID-vkCmdCopyMemoryToAccelerationStructureKHR-pInfo-03743", "vkCmdCopyMemoryToAccelerationStructureKHR(): pInfo->src.deviceAddress must be aligned to 256 bytes.", pInfo->src.deviceAddress); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; if (!(queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR || queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR)) { skip |= LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryType-03432", "vkCmdWriteAccelerationStructuresPropertiesKHR: queryType must be " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR or " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR."); } return skip; } bool StatelessValidation::manual_PreCallValidateWriteAccelerationStructuresPropertiesKHR( VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, size_t dataSize, void *pData, size_t stride) const { bool skip = false; const auto *acc_structure_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!acc_structure_features || acc_structure_features->accelerationStructureHostCommands == VK_FALSE) { skip |= LogError( device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructureHostCommands-03585", "vkCmdWriteAccelerationStructuresPropertiesKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands feature must be enabled."); } if (dataSize < accelerationStructureCount * stride) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-dataSize-03452", "vkWriteAccelerationStructuresPropertiesKHR: dataSize (%zu) must be greater than or equal to " "accelerationStructureCount (%d) *stride(%zu).", dataSize, accelerationStructureCount, stride); } if (!(queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR || queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR)) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-queryType-03432", "vkWriteAccelerationStructuresPropertiesKHR: queryType must be " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR or " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR."); } if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { if (SafeModulo(stride, sizeof(VkDeviceSize)) != 0) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-queryType-03448", "vkWriteAccelerationStructuresPropertiesKHR: If queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR," "then stride (%zu) must be a multiple of the size of VkDeviceSize", stride); } } if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR) { if (SafeModulo(stride, sizeof(VkDeviceSize)) != 0) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-queryType-03450", "vkWriteAccelerationStructuresPropertiesKHR: If queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR," "then stride (%zu) must be a multiple of the size of VkDeviceSize", stride); } } return skip; } bool StatelessValidation::manual_PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; const auto *raytracing_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(device_createinfo_pnext); if (!raytracing_features || raytracing_features->rayTracingPipelineShaderGroupHandleCaptureReplay == VK_FALSE) { skip |= LogError( device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-rayTracingPipelineShaderGroupHandleCaptureReplay-03606", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR:VkPhysicalDeviceRayTracingPipelineFeaturesKHR::" "rayTracingPipelineShaderGroupHandleCaptureReplay must be enabled to call this function."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) const { bool skip = false; // RayGen if (pRaygenShaderBindingTable->size != pRaygenShaderBindingTable->stride) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-size-04023", "vkCmdTraceRaysKHR: The size member of pRayGenShaderBindingTable must be equal to its stride member"); } if (SafeModulo(pRaygenShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-pRayGenShaderBindingTable-03682", "vkCmdTraceRaysKHR: pRaygenShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // Callable if (SafeModulo(pCallableShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-03694", "vkCmdTraceRaysKHR: The stride member of pCallableShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment."); } if (pCallableShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-04041", "vkCmdTraceRaysKHR: The stride member of pCallableShaderBindingTable must be" "less than or equal to VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride."); } if (SafeModulo(pCallableShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-pCallableShaderBindingTable-03693", "vkCmdTraceRaysKHR: pCallableShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // hitShader if (SafeModulo(pHitShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-03690", "vkCmdTraceRaysKHR: The stride member of pHitShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment."); } if (pHitShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-04035", "vkCmdTraceRaysKHR: TThe stride member of pHitShaderBindingTable must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride"); } if (SafeModulo(pHitShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-pHitShaderBindingTable-03689", "vkCmdTraceRaysKHR: pHitShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // missShader if (SafeModulo(pMissShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-03686", "vkCmdTraceRaysKHR: The stride member of pMissShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment"); } if (pMissShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-stride-04029", "vkCmdTraceRaysKHR: The stride member of pMissShaderBindingTable must be" "less than or equal to VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride."); } if (SafeModulo(pMissShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-pMissShaderBindingTable-03685", "vkCmdTraceRaysKHR: pMissShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } if (width * depth * height > phys_dev_ext_props.ray_tracing_propsKHR.maxRayDispatchInvocationCount) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-width-03629", "vkCmdTraceRaysKHR: width {times} height {times} depth must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxRayDispatchInvocationCount"); } if (width > device_limits.maxComputeWorkGroupCount[0] * device_limits.maxComputeWorkGroupSize[0]) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-width-03626", "vkCmdTraceRaysKHR: width must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[0] " "{times} VkPhysicalDeviceLimits::maxComputeWorkGroupSize[0]"); } if (height > device_limits.maxComputeWorkGroupCount[1] * device_limits.maxComputeWorkGroupSize[1]) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-height-03627", "vkCmdTraceRaysKHR: height must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[1] " "{times} VkPhysicalDeviceLimits::maxComputeWorkGroupSize[1]"); } if (depth > device_limits.maxComputeWorkGroupCount[2] * device_limits.maxComputeWorkGroupSize[2]) { skip |= LogError(device, "VUID-vkCmdTraceRaysKHR-depth-03628", "vkCmdTraceRaysKHR: depth must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[2] " "{times} VkPhysicalDeviceLimits::maxComputeWorkGroupSize[2]"); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR *pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) const { bool skip = false; const auto *raytracing_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(device_createinfo_pnext); if (!raytracing_features || raytracing_features->rayTracingPipelineTraceRaysIndirect == VK_FALSE) { skip |= LogError( device, "VUID-vkCmdTraceRaysIndirectKHR-rayTracingPipelineTraceRaysIndirect-03637", "vkCmdTraceRaysIndirectKHR: the VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipelineTraceRaysIndirect " "feature must be enabled."); } // RayGen if (pRaygenShaderBindingTable->size != pRaygenShaderBindingTable->stride) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-size-04023", "vkCmdTraceRaysKHR: The size member of pRayGenShaderBindingTable must be equal to its stride member"); } if (SafeModulo(pRaygenShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-pRayGenShaderBindingTable-03682", "vkCmdTraceRaysIndirectKHR: pRaygenShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // Callabe if (SafeModulo(pCallableShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-03694", "vkCmdTraceRaysIndirectKHR: The stride member of pCallableShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment."); } if (pCallableShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-04041", "vkCmdTraceRaysIndirectKHR: The stride member of pCallableShaderBindingTable must be less than or equal " "to VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride."); } if (SafeModulo(pCallableShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-pCallableShaderBindingTable-03693", "vkCmdTraceRaysIndirectKHR: pCallableShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // hitShader if (SafeModulo(pHitShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-03690", "vkCmdTraceRaysIndirectKHR: The stride member of pHitShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment."); } if (pHitShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-04035", "vkCmdTraceRaysIndirectKHR: The stride member of pHitShaderBindingTable must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride."); } if (SafeModulo(pHitShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-pHitShaderBindingTable-03689", "vkCmdTraceRaysIndirectKHR: pHitShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } // missShader if (SafeModulo(pMissShaderBindingTable->stride, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-03686", "vkCmdTraceRaysIndirectKHR:The stride member of pMissShaderBindingTable must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleAlignment."); } if (pMissShaderBindingTable->stride > phys_dev_ext_props.ray_tracing_propsKHR.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-stride-04029", "vkCmdTraceRaysIndirectKHR: The stride member of pMissShaderBindingTable must be less than or equal to " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxShaderGroupStride."); } if (SafeModulo(pMissShaderBindingTable->deviceAddress, phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-pMissShaderBindingTable-03685", "vkCmdTraceRaysIndirectKHR: pMissShaderBindingTable->deviceAddress must be a multiple of " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupBaseAlignment."); } if (SafeModulo(indirectDeviceAddress, 4) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysIndirectKHR-indirectDeviceAddress-03634", "vkCmdTraceRaysIndirectKHR: indirectDeviceAddress must be a multiple of 4."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) const { bool skip = false; if (SafeModulo(callableShaderBindingOffset, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-callableShaderBindingOffset-02462", "vkCmdTraceRaysNV: callableShaderBindingOffset must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupBaseAlignment."); } if (SafeModulo(callableShaderBindingStride, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupHandleSize) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-callableShaderBindingStride-02465", "vkCmdTraceRaysNV: callableShaderBindingStride must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupHandleSize."); } if (callableShaderBindingStride > phys_dev_ext_props.ray_tracing_propsNV.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-callableShaderBindingStride-02468", "vkCmdTraceRaysNV: callableShaderBindingStride must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxShaderGroupStride. "); } // hitShader if (SafeModulo(hitShaderBindingOffset, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-hitShaderBindingOffset-02460", "vkCmdTraceRaysNV: hitShaderBindingOffset must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupBaseAlignment."); } if (SafeModulo(hitShaderBindingStride, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupHandleSize) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-hitShaderBindingStride-02464", "vkCmdTraceRaysNV: hitShaderBindingStride must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupHandleSize."); } if (hitShaderBindingStride > phys_dev_ext_props.ray_tracing_propsNV.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-hitShaderBindingStride-02467", "vkCmdTraceRaysNV: hitShaderBindingStride must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxShaderGroupStride."); } // missShader if (SafeModulo(missShaderBindingOffset, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-missShaderBindingOffset-02458", "vkCmdTraceRaysNV: missShaderBindingOffset must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupBaseAlignment."); } if (SafeModulo(missShaderBindingStride, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupHandleSize) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-missShaderBindingStride-02463", "vkCmdTraceRaysNV: missShaderBindingStride must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupHandleSize."); } if (missShaderBindingStride > phys_dev_ext_props.ray_tracing_propsNV.maxShaderGroupStride) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-missShaderBindingStride-02466", "vkCmdTraceRaysNV: missShaderBindingStride must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxShaderGroupStride."); } // raygenShader if (SafeModulo(raygenShaderBindingOffset, phys_dev_ext_props.ray_tracing_propsNV.shaderGroupBaseAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-raygenShaderBindingOffset-02456", "vkCmdTraceRaysNV: raygenShaderBindingOffset must be a multiple of " "VkPhysicalDeviceRayTracingPropertiesNV::shaderGroupBaseAlignment."); } if (width > device_limits.maxComputeWorkGroupCount[0]) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-width-02469", "vkCmdTraceRaysNV: width must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[o]."); } if (height > device_limits.maxComputeWorkGroupCount[1]) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-height-02470", "vkCmdTraceRaysNV: height must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[1]."); } if (depth > device_limits.maxComputeWorkGroupCount[2]) { skip |= LogError(device, "VUID-vkCmdTraceRaysNV-depth-02471", "vkCmdTraceRaysNV: depth must be less than or equal to VkPhysicalDeviceLimits::maxComputeWorkGroupCount[2]."); } return skip; } bool StatelessValidation::manual_PreCallValidateGetDeviceAccelerationStructureCompatibilityKHR( VkDevice device, const VkAccelerationStructureVersionInfoKHR *pVersionInfo, VkAccelerationStructureCompatibilityKHR *pCompatibility) const { bool skip = false; const auto *ray_query_features = LvlFindInChain<VkPhysicalDeviceRayQueryFeaturesKHR>(device_createinfo_pnext); const auto *raytracing_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(device_createinfo_pnext); if ((!raytracing_features && !ray_query_features) || ((ray_query_features && !(ray_query_features->rayQuery)) || (raytracing_features && !raytracing_features->rayTracingPipeline))) { skip |= LogError(device, "VUID-vkGetDeviceAccelerationStructureCompatibilityKHR-rayTracingPipeline-03661", "vkGetDeviceAccelerationStructureCompatibilityKHR: The rayTracing or rayQuery feature must be enabled."); } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport *pViewports) const { bool skip = false; if (!physical_device_features.multiViewport) { if (viewportCount != 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-viewportCount-03395", "vkCmdSetViewportWithCountEXT: The multiViewport feature is disabled, but viewportCount (=%" PRIu32 ") is not 1.", viewportCount); } } else { // multiViewport enabled if (viewportCount < 1 || viewportCount > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-viewportCount-03394", "vkCmdSetViewportWithCountEXT: viewportCount (=%" PRIu32 ") must " "not be greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", viewportCount, device_limits.maxViewports); } } if (pViewports) { for (uint32_t viewport_i = 0; viewport_i < viewportCount; ++viewport_i) { const auto &viewport = pViewports[viewport_i]; // will crash on invalid ptr const char *fn_name = "vkCmdSetViewportWithCountEXT"; skip |= manual_PreCallValidateViewport( viewport, fn_name, ParameterName("pViewports[%i]", ParameterName::IndexVector{viewport_i}), commandBuffer); } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D *pScissors) const { bool skip = false; if (!physical_device_features.multiViewport) { if (scissorCount != 1) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-scissorCount-03398", "vkCmdSetScissorWithCountEXT: scissorCount (=%" PRIu32 ") must " "be 1 when the multiViewport feature is disabled.", scissorCount); } } else { // multiViewport enabled if (scissorCount == 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-scissorCount-03397", "vkCmdSetScissorWithCountEXT: scissorCount (=%" PRIu32 ") must " "be great than zero.", scissorCount); } else if (scissorCount > device_limits.maxViewports) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-scissorCount-03397", "vkCmdSetScissorWithCountEXT: scissorCount (=%" PRIu32 ") must " "not be greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").", scissorCount, device_limits.maxViewports); } } if (pScissors) { for (uint32_t scissor_i = 0; scissor_i < scissorCount; ++scissor_i) { const auto &scissor = pScissors[scissor_i]; // will crash on invalid ptr if (scissor.offset.x < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-x-03399", "vkCmdSetScissor: pScissors[%" PRIu32 "].offset.x (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.x); } if (scissor.offset.y < 0) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-x-03399", "vkCmdSetScissor: pScissors[%" PRIu32 "].offset.y (=%" PRIi32 ") is negative.", scissor_i, scissor.offset.y); } const int64_t x_sum = static_cast<int64_t>(scissor.offset.x) + static_cast<int64_t>(scissor.extent.width); if (x_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-offset-03400", "vkCmdSetScissor: offset.x + extent.width (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.x, scissor.extent.width, x_sum, scissor_i); } const int64_t y_sum = static_cast<int64_t>(scissor.offset.y) + static_cast<int64_t>(scissor.extent.height); if (y_sum > INT32_MAX) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-offset-03401", "vkCmdSetScissor: offset.y + extent.height (=%" PRIi32 " + %" PRIu32 " = %" PRIi64 ") of pScissors[%" PRIu32 "] will overflow int32_t.", scissor.offset.y, scissor.extent.height, y_sum, scissor_i); } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes, const VkDeviceSize *pStrides) const { bool skip = false; if (firstBinding >= device_limits.maxVertexInputBindings) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-firstBinding-03355", "vkCmdBindVertexBuffers2EXT() firstBinding (%u) must be less than maxVertexInputBindings (%u)", firstBinding, device_limits.maxVertexInputBindings); } else if ((firstBinding + bindingCount) > device_limits.maxVertexInputBindings) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-firstBinding-03356", "vkCmdBindVertexBuffers2EXT() sum of firstBinding (%u) and bindingCount (%u) must be less than " "maxVertexInputBindings (%u)", firstBinding, bindingCount, device_limits.maxVertexInputBindings); } for (uint32_t i = 0; i < bindingCount; ++i) { if (pBuffers[i] == VK_NULL_HANDLE) { const auto *robustness2_features = LvlFindInChain<VkPhysicalDeviceRobustness2FeaturesEXT>(device_createinfo_pnext); if (!(robustness2_features && robustness2_features->nullDescriptor)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-04111", "vkCmdBindVertexBuffers2EXT() required parameter pBuffers[%d] specified as VK_NULL_HANDLE", i); } else { if (pOffsets[i] != 0) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-04112", "vkCmdBindVertexBuffers2EXT() pBuffers[%d] is VK_NULL_HANDLE, but pOffsets[%d] is not 0", i, i); } } } if (pStrides) { if (pStrides[i] > device_limits.maxVertexInputBindingStride) { skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03362", "vkCmdBindVertexBuffers2EXT() pStrides[%d] (%u) must be less than maxVertexInputBindingStride (%u)", i, pStrides[i], device_limits.maxVertexInputBindingStride); } } } return skip; } bool StatelessValidation::ValidateAccelerationStructureBuildGeometryInfoKHR( const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, uint32_t infoCount, const char *api_name) const { bool skip = false; for (uint32_t i = 0; i < infoCount; ++i) { if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03654", "(%s): type must not be VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.", api_name); } if (pInfos[i].flags & VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR && pInfos[i].flags & VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-flags-03796", "(%s): If flags has the VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR bit set," "then it must not have the VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR bit set.", api_name); } if (pInfos[i].pGeometries && pInfos[i].ppGeometries) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-pGeometries-03788", "(%s): Only one of pGeometries or ppGeometries can be a valid pointer, the other must be NULL", api_name); } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && pInfos[i].geometryCount != 1) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03790", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, geometryCount must be 1", api_name); } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && pInfos[i].geometryCount > phys_dev_ext_props.acc_structure_props.maxGeometryCount) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03793", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR then geometryCount must be" " less than or equal to VkPhysicalDeviceAccelerationStructurePropertiesKHR::maxGeometryCount", api_name); } if (pInfos[i].pGeometries) { for (uint32_t j = 0; j < pInfos[i].geometryCount; ++j) { skip |= validate_ranged_enum( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometryType", ParameterName::IndexVector{i, j}), "VkGeometryTypeKHR", AllVkGeometryTypeKHREnums, pInfos[i].pGeometries[j].geometryType, "VUID-VkAccelerationStructureGeometryKHR-geometryType-parameter"); if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.triangles", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR", &(pInfos[i].pGeometries[j].geometry.triangles), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.triangles.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].pGeometries[j].geometry.triangles.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-pNext-pNext", kVUIDUndefined); skip |= validate_ranged_enum(api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.triangles.vertexFormat", ParameterName::IndexVector{i, j}), "VkFormat", AllVkFormatEnums, pInfos[i].pGeometries[j].geometry.triangles.vertexFormat, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-vertexFormat-parameter"); skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.triangles", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR", &pInfos[i].pGeometries[j].geometry.triangles, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-triangles-parameter", kVUIDUndefined); skip |= validate_ranged_enum( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.triangles.indexType", ParameterName::IndexVector{i, j}), "VkIndexType", AllVkIndexTypeEnums, pInfos[i].pGeometries[j].geometry.triangles.indexType, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-indexType-parameter"); if (pInfos[i].pGeometries[j].geometry.triangles.vertexStride > UINT32_MAX) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-vertexStride-03819", "(%s):vertexStride must be less than or equal to 2^32-1", api_name); } if (pInfos[i].pGeometries[j].geometry.triangles.indexType != VK_INDEX_TYPE_UINT16 && pInfos[i].pGeometries[j].geometry.triangles.indexType != VK_INDEX_TYPE_UINT32 && pInfos[i].pGeometries[j].geometry.triangles.indexType != VK_INDEX_TYPE_NONE_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-indexType-03798", "(%s):indexType must be VK_INDEX_TYPE_UINT16, VK_INDEX_TYPE_UINT32, or VK_INDEX_TYPE_NONE_KHR", api_name); } } if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.instances", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR", &pInfos[i].pGeometries[j].geometry.instances, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-instances-parameter", kVUIDUndefined); skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.instances", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR", &(pInfos[i].pGeometries[j].geometry.instances), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryInstancesDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.instances.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].pGeometries[j].geometry.instances.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryInstancesDataKHR-pNext-pNext", kVUIDUndefined); skip |= validate_bool32(api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.instances.arrayOfPointers", ParameterName::IndexVector{i, j}), pInfos[i].pGeometries[j].geometry.instances.arrayOfPointers); } if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.aabbs", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR", &pInfos[i].pGeometries[j].geometry.aabbs, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-aabbs-parameter", kVUIDUndefined); skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.aabbs", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR", &(pInfos[i].pGeometries[j].geometry.aabbs), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].pGeometries[%i].geometry.aabbs.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].pGeometries[j].geometry.aabbs.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-pNext-pNext", kVUIDUndefined); if (pInfos[i].pGeometries[j].geometry.aabbs.stride > UINT32_MAX) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-stride-03820", "(%s):stride must be less than or equal to 2^32-1", api_name); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && pInfos[i].pGeometries[j].geometryType != VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03789", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, the geometryType member" " of elements of either pGeometries or ppGeometries must be VK_GEOMETRY_TYPE_INSTANCES_KHR", api_name); } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03791", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR the geometryType member " "of elements of" " either pGeometries or ppGeometries must not be VK_GEOMETRY_TYPE_INSTANCES_KHR", api_name); } if (pInfos[i].pGeometries[j].geometryType != pInfos[i].pGeometries[0].geometryType) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03792", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR then the geometryType" " member of each geometry in either pGeometries or ppGeometries must be the same.", api_name); } } } } if (pInfos[i].ppGeometries != NULL) { for (uint32_t j = 0; j < pInfos[i].geometryCount; ++j) { skip |= validate_ranged_enum( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometryType", ParameterName::IndexVector{i, j}), "VkGeometryTypeKHR", AllVkGeometryTypeKHREnums, pInfos[i].ppGeometries[j]->geometryType, "VUID-VkAccelerationStructureGeometryKHR-geometryType-parameter"); if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.triangles", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR", &pInfos[i].ppGeometries[j]->geometry.triangles, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-triangles-parameter", kVUIDUndefined); skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.triangles", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR", &(pInfos[i].ppGeometries[j]->geometry.triangles), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.triangles.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].ppGeometries[j]->geometry.triangles.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-pNext-pNext", kVUIDUndefined); skip |= validate_ranged_enum(api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.triangles.vertexFormat", ParameterName::IndexVector{i, j}), "VkFormat", AllVkFormatEnums, pInfos[i].ppGeometries[j]->geometry.triangles.vertexFormat, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-vertexFormat-parameter"); skip |= validate_ranged_enum(api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.triangles.indexType", ParameterName::IndexVector{i, j}), "VkIndexType", AllVkIndexTypeEnums, pInfos[i].ppGeometries[j]->geometry.triangles.indexType, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-indexType-parameter"); if (pInfos[i].ppGeometries[j]->geometry.triangles.vertexStride > UINT32_MAX) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-vertexStride-03819", "(%s):vertexStride must be less than or equal to 2^32-1", api_name); } if (pInfos[i].ppGeometries[j]->geometry.triangles.indexType != VK_INDEX_TYPE_UINT16 && pInfos[i].ppGeometries[j]->geometry.triangles.indexType != VK_INDEX_TYPE_UINT32 && pInfos[i].ppGeometries[j]->geometry.triangles.indexType != VK_INDEX_TYPE_NONE_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryTrianglesDataKHR-indexType-03798", "(%s):indexType must be VK_INDEX_TYPE_UINT16, VK_INDEX_TYPE_UINT32, or VK_INDEX_TYPE_NONE_KHR", api_name); } } if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.instances", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR", &pInfos[i].ppGeometries[j]->geometry.instances, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-instances-parameter", kVUIDUndefined); skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.instances", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR", &(pInfos[i].ppGeometries[j]->geometry.instances), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryInstancesDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.instances.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].ppGeometries[j]->geometry.instances.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryInstancesDataKHR-pNext-pNext", kVUIDUndefined); skip |= validate_bool32(api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.instances.arrayOfPointers", ParameterName::IndexVector{i, j}), pInfos[i].ppGeometries[j]->geometry.instances.arrayOfPointers); } if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { skip |= validate_struct_type(api_name, "pInfos[i].pGeometries[j].geometry.aabbs", "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR", &pInfos[i].ppGeometries[j]->geometry.aabbs, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, true, "VUID-VkAccelerationStructureGeometryKHR-aabbs-parameter", kVUIDUndefined); skip |= validate_struct_type( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.aabbs", ParameterName::IndexVector{i, j}), "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR", &(pInfos[i].ppGeometries[j]->geometry.aabbs), VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, false, kVUIDUndefined, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-sType-sType"); skip |= validate_struct_pnext( api_name, ParameterName("pInfos[%i].ppGeometries[%i]->geometry.aabbs.pNext", ParameterName::IndexVector{i, j}), NULL, pInfos[i].ppGeometries[j]->geometry.aabbs.pNext, 0, NULL, GeneratedVulkanHeaderVersion, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-pNext-pNext", kVUIDUndefined); if (pInfos[i].ppGeometries[j]->geometry.aabbs.stride > UINT32_MAX) { skip |= LogError(device, "VUID-VkAccelerationStructureGeometryAabbsDataKHR-stride-03820", "(%s):stride must be less than or equal to 2^32-1", api_name); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && pInfos[i].ppGeometries[j]->geometryType != VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03789", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, the geometryType member" " of elements of either pGeometries or ppGeometries must be VK_GEOMETRY_TYPE_INSTANCES_KHR", api_name); } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03791", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR the geometryType member " "of elements of" " either pGeometries or ppGeometries must not be VK_GEOMETRY_TYPE_INSTANCES_KHR", api_name); } if (pInfos[i].ppGeometries[j]->geometryType != pInfos[i].ppGeometries[0]->geometryType) { skip |= LogError(device, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-type-03792", "(%s): If type is VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR then the geometryType" " member of each geometry in either pGeometries or ppGeometries must be the same.", api_name); } } } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; skip |= ValidateAccelerationStructureBuildGeometryInfoKHR(pInfos, infoCount, "vkCmdBuildAccelerationStructuresKHR"); for (uint32_t i = 0; i < infoCount; ++i) { if (SafeModulo(pInfos[i].scratchData.deviceAddress, phys_dev_ext_props.acc_structure_props.minAccelerationStructureScratchOffsetAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03710", "vkCmdBuildAccelerationStructuresKHR:For each element of pInfos, its " "scratchData.deviceAddress member must be a multiple of " "VkPhysicalDeviceAccelerationStructurePropertiesKHR::minAccelerationStructureScratchOffsetAlignment."); } for (uint32_t k = 0; k < infoCount; ++k) { if (i == k) continue; bool found = false; if (pInfos[i].dstAccelerationStructure == pInfos[k].dstAccelerationStructure) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresKHR-dstAccelerationStructure-03698", "vkCmdBuildAccelerationStructuresKHR:The dstAccelerationStructure member of any element (%d) of pInfos must " "not be " "the same acceleration structure as the dstAccelerationStructure member of any other element (%d) of pInfos.", i, k); found = true; } if (pInfos[i].srcAccelerationStructure == pInfos[k].dstAccelerationStructure) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03403", "vkCmdBuildAccelerationStructuresKHR:The srcAccelerationStructure member of any element (%d) of pInfos must " "not be " "the same acceleration structure as the dstAccelerationStructure member of any other element (%d) of pInfos.", i, k); found = true; } if (found) break; } for (uint32_t j = 0; j < pInfos[i].geometryCount; ++j) { if (pInfos[i].pGeometries) { if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { if (pInfos[i].pGeometries[j].geometry.instances.arrayOfPointers == VK_TRUE) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03716", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is " "VK_TRUE, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03715", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is VK_FALSE, " "geometry.data->deviceAddress must be aligned to 16 bytes."); } } } else if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03714", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_AABBS_KHR, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.triangles.transformData.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03810", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries " "with a geometryType of VK_GEOMETRY_TYPE_TRIANGLES_KHR, " "geometry.transformData->deviceAddress must be aligned to 16 bytes."); } } } else if (pInfos[i].ppGeometries) { if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { if (pInfos[i].ppGeometries[j]->geometry.instances.arrayOfPointers == VK_TRUE) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03716", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is " "VK_TRUE, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03715", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is VK_FALSE, " "geometry.data->deviceAddress must be aligned to 16 bytes."); } } } else if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03714", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_AABBS_KHR, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.triangles.transformData.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03810", "vkCmdBuildAccelerationStructuresKHR:For any element of pInfos[i].pGeometries " "with a geometryType of VK_GEOMETRY_TYPE_TRIANGLES_KHR, " "geometry.transformData->deviceAddress must be aligned to 16 bytes."); } } } } } return skip; } bool StatelessValidation::manual_PreCallValidateCmdBuildAccelerationStructuresIndirectKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkDeviceAddress *pIndirectDeviceAddresses, const uint32_t *pIndirectStrides, const uint32_t *const *ppMaxPrimitiveCounts) const { bool skip = false; skip |= ValidateAccelerationStructureBuildGeometryInfoKHR(pInfos, infoCount, "vkCmdBuildAccelerationStructuresIndirectKHR"); const auto *ray_tracing_acceleration_structure_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!ray_tracing_acceleration_structure_features || ray_tracing_acceleration_structure_features->accelerationStructureIndirectBuild == VK_FALSE) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-accelerationStructureIndirectBuild-03650", "vkCmdBuildAccelerationStructuresIndirectKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureIndirectBuild feature must be enabled."); } for (uint32_t i = 0; i < infoCount; ++i) { if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (pInfos[i].srcAccelerationStructure == VK_NULL_HANDLE) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03666", "vkCmdBuildAccelerationStructuresIndirectKHR:For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be " "VK_NULL_HANDLE."); } } if (SafeModulo(pInfos[i].scratchData.deviceAddress, phys_dev_ext_props.acc_structure_props.minAccelerationStructureScratchOffsetAlignment) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03710", "vkCmdBuildAccelerationStructuresIndirectKHR:For each element of pInfos, its " "scratchData.deviceAddress member must be a multiple of " "VkPhysicalDeviceAccelerationStructurePropertiesKHR::minAccelerationStructureScratchOffsetAlignment."); } for (uint32_t k = 0; k < infoCount; ++k) { if (i == k) continue; if (pInfos[i].srcAccelerationStructure == pInfos[k].dstAccelerationStructure) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03403", "vkCmdBuildAccelerationStructuresIndirectKHR:The srcAccelerationStructure member of any element (%d) " "of pInfos must not be the same acceleration structure as the dstAccelerationStructure member of " "any other element [%d) of pInfos.", i, k); break; } } for (uint32_t j = 0; j < pInfos[i].geometryCount; ++j) { if (pInfos[i].pGeometries) { if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { if (pInfos[i].pGeometries[j].geometry.instances.arrayOfPointers == VK_TRUE) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03716", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is " "VK_TRUE, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 16) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03715", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is VK_FALSE, " "geometry.data->deviceAddress must be aligned to 16 bytes."); } } } if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03714", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_AABBS_KHR, geometry.data->deviceAddress must be aligned to 8 bytes."); } } if (pInfos[i].pGeometries[j].geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { if (SafeModulo(pInfos[i].pGeometries[j].geometry.triangles.indexData.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03810", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries " "with a geometryType of VK_GEOMETRY_TYPE_TRIANGLES_KHR, " "geometry.transformData->deviceAddress must be aligned to 16 bytes."); } } } else if (pInfos[i].ppGeometries) { if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { if (pInfos[i].ppGeometries[j]->geometry.instances.arrayOfPointers == VK_TRUE) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03716", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is " "VK_TRUE, geometry.data->deviceAddress must be aligned to 8 bytes."); } } else { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 16) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03715", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_INSTANCES_KHR, if geometry.arrayOfPointers is VK_FALSE, " "geometry.data->deviceAddress must be aligned to 16 bytes."); } } } if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_AABBS_KHR) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.instances.data.deviceAddress, 8) != 0) { skip |= LogError( device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03714", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries with a " "geometryType of VK_GEOMETRY_TYPE_AABBS_KHR, geometry.data->deviceAddress must be aligned to 8 bytes."); } } if (pInfos[i].ppGeometries[j]->geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { if (SafeModulo(pInfos[i].ppGeometries[j]->geometry.triangles.indexData.deviceAddress, 16) != 0) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03810", "vkCmdBuildAccelerationStructuresIndirectKHR:For any element of pInfos[i].pGeometries " "with a geometryType of VK_GEOMETRY_TYPE_TRIANGLES_KHR, " "geometry.transformData->deviceAddress must be aligned to 16 bytes."); } } } } } return skip; } bool StatelessValidation::manual_PreCallValidateBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; skip |= ValidateAccelerationStructureBuildGeometryInfoKHR(pInfos, infoCount, "vkBuildAccelerationStructuresKHR"); const auto *ray_tracing_acceleration_structure_features = LvlFindInChain<VkPhysicalDeviceAccelerationStructureFeaturesKHR>(device_createinfo_pnext); if (!ray_tracing_acceleration_structure_features || ray_tracing_acceleration_structure_features->accelerationStructureHostCommands == VK_FALSE) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-accelerationStructureHostCommands-03581", "vkBuildAccelerationStructuresKHR: The " "VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands feature must be enabled"); } for (uint32_t i = 0; i < infoCount; ++i) { for (uint32_t j = 0; j < infoCount; ++j) { if (i == j) continue; bool found = false; if (pInfos[i].dstAccelerationStructure == pInfos[j].dstAccelerationStructure) { skip |= LogError( device, "VUID-vkBuildAccelerationStructuresKHR-dstAccelerationStructure-03698", "vkBuildAccelerationStructuresKHR(): The dstAccelerationStructure member of any element (%d) of pInfos must " "not be " "the same acceleration structure as the dstAccelerationStructure member of any other element (%d) of pInfos.", i, j); found = true; } if (pInfos[i].srcAccelerationStructure == pInfos[j].dstAccelerationStructure) { skip |= LogError( device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03403", "vkBuildAccelerationStructuresKHR(): The srcAccelerationStructure member of any element (%d) of pInfos must " "not be " "the same acceleration structure as the dstAccelerationStructure member of any other element (%d) of pInfos.", i, j); found = true; } if (found) break; } } return skip; } bool StatelessValidation::manual_PreCallValidateGetAccelerationStructureBuildSizesKHR( VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR *pBuildInfo, const uint32_t *pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR *pSizeInfo) const { bool skip = false; skip |= ValidateAccelerationStructureBuildGeometryInfoKHR(pBuildInfo, 1, "vkGetAccelerationStructureBuildSizesKHR"); const auto *ray_tracing_pipeline_features = LvlFindInChain<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(device_createinfo_pnext); const auto *ray_query_features = LvlFindInChain<VkPhysicalDeviceRayQueryFeaturesKHR>(device_createinfo_pnext); if (!(ray_tracing_pipeline_features || ray_query_features) || ((ray_tracing_pipeline_features && ray_tracing_pipeline_features->rayTracingPipeline == VK_FALSE) || (ray_query_features && ray_query_features->rayQuery == VK_FALSE))) { skip |= LogError(device, "VUID-vkGetAccelerationStructureBuildSizesKHR-rayTracingPipeline-03617", "vkGetAccelerationStructureBuildSizesKHR:The rayTracingPipeline or rayQuery feature must be enabled"); } return skip; } bool StatelessValidation::manual_PreCallValidateCreatePrivateDataSlotEXT(VkDevice device, const VkPrivateDataSlotCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPrivateDataSlotEXT *pPrivateDataSlot) const { bool skip = false; const auto *private_data_features = LvlFindInChain<VkPhysicalDevicePrivateDataFeaturesEXT>(device_createinfo_pnext); if (private_data_features && private_data_features->privateData == VK_FALSE) { skip |= LogError(device, "VUID-vkCreatePrivateDataSlotEXT-privateData-04564", "vkCreatePrivateDataSlotEXT(): The privateData feature must be enabled."); } return skip; }
1
15,310
:grimacing: yikes, thanks for catching this!
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -0,0 +1,10 @@ +using System; +using MvvmCross.Droid.Support.V7.RecyclerView.Model; + +namespace MvvmCross.Droid.Support.V7.RecyclerView +{ + public interface IMvxRecyclerAdapterBindableHolder + { + event Action<MvxViewHolderBindedEventArgs> MvxViewHolderBinded; + } +}
1
1
12,219
`Binded` is weird. I think it needs to be something with `Bound` in it (and drop the `Mvx` bit) like `ViewHolderBound` or something.
MvvmCross-MvvmCross
.cs
@@ -84,8 +84,8 @@ func importPoolBuilder(cStorPool *apis.CStorPool, cachefileFlag bool) []string { } // CreatePool creates a new cStor pool. -func CreatePool(cStorPool *apis.CStorPool) error { - createAttr := createPoolBuilder(cStorPool) +func CreatePool(cStorPool *apis.CStorPool, diskList []string) error { + createAttr := createPoolBuilder(cStorPool, diskList) glog.V(4).Info("createAttr : ", createAttr) stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, createAttr...)
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pool import ( "fmt" "strings" "time" "github.com/golang/glog" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "github.com/openebs/maya/pkg/util" "github.com/pkg/errors" ) var ( poolTypeCommand = map[string]string{"mirrored": "mirror", "raidz": "raidz", "raidz2": "raidz2"} defaultGroupSize = map[string]int{"striped": 1, "mirrored": 2, "raidz": 3, "raidz2": 6} ) // PoolOperator is the name of the tool that makes pool-related operations. const ( PoolOperator = "zpool" StatusNoPoolsAvailable = "no pools available" ZpoolStatusDegraded = "DEGRADED" ZpoolStatusFaulted = "FAULTED" ZpoolStatusOffline = "OFFLINE" ZpoolStatusOnline = "ONLINE" ZpoolStatusRemoved = "REMOVED" ZpoolStatusUnavail = "UNAVAIL" ) //PoolAddEventHandled is a flag representing if the pool has been initially imported or created var PoolAddEventHandled = false // PoolNamePrefix is a typed string to store pool name prefix type PoolNamePrefix string // PoolPrefix is prefix for pool name const ( PoolPrefix PoolNamePrefix = "cstor-" ) // RunnerVar the runner variable for executing binaries. var RunnerVar util.Runner // ImportPool imports cStor pool if already present. func ImportPool(cStorPool *apis.CStorPool, cachefileFlag bool) error { importAttr := importPoolBuilder(cStorPool, cachefileFlag) stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, importAttr...) if err != nil { glog.Errorf("Unable to import pool: %v, %v", err.Error(), string(stdoutStderr)) return err } glog.Info("Importing Pool Successful") return nil } // importPoolBuilder is to build pool import command. func importPoolBuilder(cStorPool *apis.CStorPool, cachefileFlag bool) []string { // populate pool import attributes. var importAttr []string importAttr = append(importAttr, "import") if cStorPool.Spec.PoolSpec.CacheFile != "" && cachefileFlag { importAttr = append(importAttr, "-c", cStorPool.Spec.PoolSpec.CacheFile, "-o", cStorPool.Spec.PoolSpec.CacheFile) } importAttr = append(importAttr, string(PoolPrefix)+string(cStorPool.ObjectMeta.UID)) return importAttr } // CreatePool creates a new cStor pool. func CreatePool(cStorPool *apis.CStorPool) error { createAttr := createPoolBuilder(cStorPool) glog.V(4).Info("createAttr : ", createAttr) stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, createAttr...) if err != nil { glog.Errorf("Unable to create pool: %v", string(stdoutStderr)) return err } return nil } // createPoolBuilder is to build create pool command. func createPoolBuilder(cStorPool *apis.CStorPool) []string { // populate pool creation attributes. var createAttr []string // When disks of other file formats, say ext4, are used to create cstorpool, // it errors out with normal zpool create. To avoid that, we go for forceful create. createAttr = append(createAttr, "create", "-f") if cStorPool.Spec.PoolSpec.CacheFile != "" { cachefile := "cachefile=" + cStorPool.Spec.PoolSpec.CacheFile createAttr = append(createAttr, "-o", cachefile) } openebsPoolname := "io.openebs:poolname=" + cStorPool.Name createAttr = append(createAttr, "-O", openebsPoolname) poolNameUID := string(PoolPrefix) + string(cStorPool.ObjectMeta.UID) createAttr = append(createAttr, poolNameUID) poolType := cStorPool.Spec.PoolSpec.PoolType diskList := cStorPool.Spec.Disks.DiskList if poolType == "striped" { for _, disk := range diskList { createAttr = append(createAttr, disk) } return createAttr } // To generate pool of the following types: // mirrored (grouped by multiples of 2): mirror disk1 disk2 mirror disk3 disk4 // raidz (grouped by multiples of 3): raidz disk1 disk2 disk3 raidz disk 4 disk5 disk6 // raidz2 (grouped by multiples of 6): raidz2 disk1 disk2 disk3 disk4 disk5 disk6 for i, disk := range diskList { if i%defaultGroupSize[poolType] == 0 { createAttr = append(createAttr, poolTypeCommand[poolType]) } createAttr = append(createAttr, disk) } return createAttr } // CheckValidPool checks for validity of CStorPool resource. func CheckValidPool(cStorPool *apis.CStorPool) error { poolUID := cStorPool.ObjectMeta.UID if len(poolUID) == 0 { return fmt.Errorf("Poolname/UID cannot be empty") } diskCount := len(cStorPool.Spec.Disks.DiskList) poolType := cStorPool.Spec.PoolSpec.PoolType if diskCount < defaultGroupSize[poolType] { return errors.Errorf("Expected %v no of disks, got %v no of disks for pool type: %v", defaultGroupSize[poolType], diskCount, poolType) } if diskCount%defaultGroupSize[poolType] != 0 { return errors.Errorf("Expected multiples of %v number of disks, got %v no of disks for pool type: %v", defaultGroupSize[poolType], diskCount, poolType) } return nil } // GetPoolName return the pool already created. func GetPoolName() ([]string, error) { GetPoolStr := []string{"get", "-Hp", "name", "-o", "name"} poolNameByte, err := RunnerVar.RunStdoutPipe(PoolOperator, GetPoolStr...) if err != nil || len(string(poolNameByte)) == 0 { return []string{}, err } noisyPoolName := string(poolNameByte) sepNoisyPoolName := strings.Split(noisyPoolName, "\n") var poolNames []string for _, poolName := range sepNoisyPoolName { poolName = strings.TrimSpace(poolName) poolNames = append(poolNames, poolName) } return poolNames, nil } // DeletePool destroys the pool created. func DeletePool(poolName string) error { deletePoolStr := []string{"destroy", poolName} stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, deletePoolStr...) if err != nil { glog.Errorf("Unable to delete pool: %v", string(stdoutStderr)) return err } return nil } // Capacity finds the capacity of the pool. // The ouptut of command executed is as follows: /* root@cstor-sparse-pool-o8bw-6869f69cc8-jhs6c:/# zpool get size,free,allocated cstor-2ebe403a-f2e2-11e8-87fd-42010a800087 NAME PROPERTY VALUE SOURCE cstor-2ebe403a-f2e2-11e8-87fd-42010a800087 size 9.94G - cstor-2ebe403a-f2e2-11e8-87fd-42010a800087 free 9.94G - cstor-2ebe403a-f2e2-11e8-87fd-42010a800087 allocated 202K - */ func Capacity(poolName string) (*apis.CStorPoolCapacityAttr, error) { capacityPoolStr := []string{"get", "size,free,allocated", poolName} stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, capacityPoolStr...) if err != nil { glog.Errorf("Unable to get pool capacity: %v", string(stdoutStderr)) return nil, err } poolCapacity := capacityOutputParser(string(stdoutStderr)) if strings.TrimSpace(poolCapacity.Used) == "" || strings.TrimSpace(poolCapacity.Free) == "" { return nil, fmt.Errorf("Unable to get pool capacity from capacity parser") } return poolCapacity, nil } // PoolStatus finds the status of the pool. // The ouptut of command(`zpool status <pool-name>`) executed is as follows: /* pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM cstor-530c9c4f-e0df-11e8-94a8-42010a80013b ONLINE 0 0 0 scsi-0Google_PersistentDisk_ashu-disk2 ONLINE 0 0 0 errors: No known data errors */ // The output is then parsed by poolStatusOutputParser function to get the status of the pool func Status(poolName string) (string, error) { var poolStatus string statusPoolStr := []string{"status", poolName} stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, statusPoolStr...) if err != nil { glog.Errorf("Unable to get pool status: %v", string(stdoutStderr)) return "", err } poolStatus = poolStatusOutputParser(string(stdoutStderr)) if poolStatus == ZpoolStatusDegraded { return string(apis.CStorPoolStatusDegraded), nil } else if poolStatus == ZpoolStatusFaulted { return string(apis.CStorPoolStatusOffline), nil } else if poolStatus == ZpoolStatusOffline { return string(apis.CStorPoolStatusOffline), nil } else if poolStatus == ZpoolStatusOnline { return string(apis.CStorPoolStatusOnline), nil } else if poolStatus == ZpoolStatusRemoved { return string(apis.CStorPoolStatusDegraded), nil } else if poolStatus == ZpoolStatusUnavail { return string(apis.CStorPoolStatusOffline), nil } else { return string(apis.CStorPoolStatusError), nil } return poolStatus, nil } // poolStatusOutputParser parse output of `zpool status` command to extract the status of the pool. // ToDo: Need to find some better way e.g contract for zpool command outputs. func poolStatusOutputParser(output string) string { var outputStr []string var poolStatus string if strings.TrimSpace(string(output)) != "" { outputStr = strings.Split(string(output), "\n") if !(len(outputStr) < 2) { poolStatusArr := strings.Split(outputStr[1], ":") if !(len(outputStr) < 2) { poolStatus = strings.TrimSpace(poolStatusArr[1]) } } } return poolStatus } // capacityOutputParser parse output of `zpool get` command to extract the capacity of the pool. // ToDo: Need to find some better way e.g contract for zpool command outputs. func capacityOutputParser(output string) *apis.CStorPoolCapacityAttr { var outputStr []string // Initialize capacity object. capacity := &apis.CStorPoolCapacityAttr{ "", "", "", } if strings.TrimSpace(string(output)) != "" { outputStr = strings.Split(string(output), "\n") if !(len(outputStr) < 4) { poolCapacityArrTotal := strings.Fields(outputStr[1]) poolCapacityArrFree := strings.Fields(outputStr[2]) poolCapacityArrAlloc := strings.Fields(outputStr[3]) if !(len(poolCapacityArrTotal) < 4 || len(poolCapacityArrFree) < 4) || len(poolCapacityArrAlloc) < 4 { capacity.Total = strings.TrimSpace(poolCapacityArrTotal[2]) capacity.Free = strings.TrimSpace(poolCapacityArrFree[2]) capacity.Used = strings.TrimSpace(poolCapacityArrAlloc[2]) } } } return capacity } // SetCachefile is to set the cachefile for pool. func SetCachefile(cStorPool *apis.CStorPool) error { poolNameUID := string(PoolPrefix) + string(cStorPool.ObjectMeta.UID) setCachefileStr := []string{"set", "cachefile=" + cStorPool.Spec.PoolSpec.CacheFile, poolNameUID} stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, setCachefileStr...) if err != nil { glog.Errorf("Unable to set cachefile: %v", string(stdoutStderr)) return err } return nil } // CheckForZreplInitial is blocking call for checking status of zrepl in cstor-pool container. func CheckForZreplInitial(ZreplRetryInterval time.Duration) { for { _, err := RunnerVar.RunCombinedOutput(PoolOperator, "status") if err != nil { time.Sleep(ZreplRetryInterval) glog.Errorf("zpool status returned error in zrepl startup : %v", err) glog.Infof("Waiting for zpool replication container to start...") continue } break } } // CheckForZreplContinuous is continuous health checker for status of zrepl in cstor-pool container. func CheckForZreplContinuous(ZreplRetryInterval time.Duration) { for { out, err := RunnerVar.RunCombinedOutput(PoolOperator, "status") if err == nil { //even though we imported pool, it disappeared (may be due to zrepl container crashing). // so we need to reimport. if PoolAddEventHandled && strings.Contains(string(out), StatusNoPoolsAvailable) { break } time.Sleep(ZreplRetryInterval) continue } glog.Errorf("zpool status returned error in zrepl healthcheck : %v, out: %s", err, out) break } } // LabelClear is to clear zpool label on disks. func LabelClear(disks []string) error { var failLabelClear = false for _, disk := range disks { labelClearStr := []string{"labelclear", "-f", disk} stdoutStderr, err := RunnerVar.RunCombinedOutput(PoolOperator, labelClearStr...) if err != nil { glog.Errorf("Unable to clear label: %v, err = %v", string(stdoutStderr), err) failLabelClear = true } } if failLabelClear { return fmt.Errorf("Unable to clear labels from the disks of the pool") } return nil }
1
13,828
can we have String() and GoString() implementation of `apis.CStorPool` struct to pretty print it.
openebs-maya
go
@@ -914,6 +914,10 @@ CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value, size_t fieldn = 0; auto err = ParseTableDelimiters(fieldn, &struct_def, [&](const std::string &name) -> CheckedError { + if (name == "$schema") { + NEXT(); // Ignore this field. + return NoError(); + } auto field = struct_def.fields.Lookup(name); if (!field) { if (!opts.skip_unexpected_fields_in_json) {
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <list> #ifdef _WIN32 #if !defined(_USE_MATH_DEFINES) #define _USE_MATH_DEFINES // For M_PI. #endif // !defined(_USE_MATH_DEFINES) #endif // _WIN32 #include <math.h> #include "flatbuffers/idl.h" #include "flatbuffers/util.h" namespace flatbuffers { const char *const kTypeNames[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ IDLTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD nullptr }; const char kTypeSizes[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ sizeof(CTYPE), FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; // The enums in the reflection schema should match the ones we use internally. // Compare the last element to check if these go out of sync. static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union), "enums don't match"); // Any parsing calls have to be wrapped in this macro, which automates // handling of recursive error checking a bit. It will check the received // CheckedError object, and return straight away on error. #define ECHECK(call) { auto ce = (call); if (ce.Check()) return ce; } // These two functions are called hundreds of times below, so define a short // form: #define NEXT() ECHECK(Next()) #define EXPECT(tok) ECHECK(Expect(tok)) static bool ValidateUTF8(const std::string &str) { const char *s = &str[0]; const char * const sEnd = s + str.length(); while (s < sEnd) { if (FromUTF8(&s) < 0) { return false; } } return true; } CheckedError Parser::Error(const std::string &msg) { error_ = file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : ""; #ifdef _WIN32 error_ += "(" + NumToString(line_) + ")"; // MSVC alike #else if (file_being_parsed_.length()) error_ += ":"; error_ += NumToString(line_) + ":0"; // gcc alike #endif error_ += ": error: " + msg; return CheckedError(true); } inline CheckedError NoError() { return CheckedError(false); } inline std::string OutOfRangeErrorMsg(int64_t val, const std::string &op, int64_t limit) { const std::string cause = NumToString(val) + op + NumToString(limit); return "constant does not fit (" + cause + ")"; } // Ensure that integer values we parse fit inside the declared integer type. CheckedError Parser::CheckInRange(int64_t val, int64_t min, int64_t max) { if (val < min) return Error(OutOfRangeErrorMsg(val, " < ", min)); else if (val > max) return Error(OutOfRangeErrorMsg(val, " > ", max)); else return NoError(); } // atot: templated version of atoi/atof: convert a string to an instance of T. template<typename T> inline CheckedError atot(const char *s, Parser &parser, T *val) { int64_t i = StringToInt(s); const int64_t min = std::numeric_limits<T>::min(); const int64_t max = std::numeric_limits<T>::max(); ECHECK(parser.CheckInRange(i, min, max)); *val = (T)i; return NoError(); } template<> inline CheckedError atot<uint64_t>(const char *s, Parser &parser, uint64_t *val) { (void)parser; *val = StringToUInt(s); return NoError(); } template<> inline CheckedError atot<bool>(const char *s, Parser &parser, bool *val) { (void)parser; *val = 0 != atoi(s); return NoError(); } template<> inline CheckedError atot<float>(const char *s, Parser &parser, float *val) { (void)parser; *val = static_cast<float>(strtod(s, nullptr)); return NoError(); } template<> inline CheckedError atot<double>(const char *s, Parser &parser, double *val) { (void)parser; *val = strtod(s, nullptr); return NoError(); } template<> inline CheckedError atot<Offset<void>>(const char *s, Parser &parser, Offset<void> *val) { (void)parser; *val = Offset<void>(atoi(s)); return NoError(); } std::string Namespace::GetFullyQualifiedName(const std::string &name, size_t max_components) const { // Early exit if we don't have a defined namespace. if (components.size() == 0 || !max_components) { return name; } std::stringstream stream; for (size_t i = 0; i < std::min(components.size(), max_components); i++) { if (i) { stream << "."; } stream << components[i]; } if (name.length()) stream << "." << name; return stream.str(); } // Declare tokens we'll use. Single character tokens are represented by their // ascii character code (e.g. '{'), others above 256. #define FLATBUFFERS_GEN_TOKENS(TD) \ TD(Eof, 256, "end of file") \ TD(StringConstant, 257, "string constant") \ TD(IntegerConstant, 258, "integer constant") \ TD(FloatConstant, 259, "float constant") \ TD(Identifier, 260, "identifier") \ TD(Table, 261, "table") \ TD(Struct, 262, "struct") \ TD(Enum, 263, "enum") \ TD(Union, 264, "union") \ TD(NameSpace, 265, "namespace") \ TD(RootType, 266, "root_type") \ TD(FileIdentifier, 267, "file_identifier") \ TD(FileExtension, 268, "file_extension") \ TD(Include, 269, "include") \ TD(Attribute, 270, "attribute") \ TD(Null, 271, "null") \ TD(Service, 272, "rpc_service") \ TD(NativeInclude, 273, "native_include") #ifdef __GNUC__ __extension__ // Stop GCC complaining about trailing comma with -Wpendantic. #endif enum { #define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) kToken ## NAME = VALUE, FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN) #undef FLATBUFFERS_TOKEN #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ kToken ## ENUM, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; static std::string TokenToString(int t) { static const char *tokens[] = { #define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING, FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN) #undef FLATBUFFERS_TOKEN #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ IDLTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; if (t < 256) { // A single ascii char token. std::string s; s.append(1, static_cast<char>(t)); return s; } else { // Other tokens. return tokens[t - 256]; } } std::string Parser::TokenToStringId(int t) { return TokenToString(t) + (t == kTokenIdentifier ? ": " + attribute_ : ""); } // Parses exactly nibbles worth of hex digits into a number, or error. CheckedError Parser::ParseHexNum(int nibbles, uint64_t *val) { for (int i = 0; i < nibbles; i++) if (!isxdigit(static_cast<const unsigned char>(cursor_[i]))) return Error("escape code must be followed by " + NumToString(nibbles) + " hex digits"); std::string target(cursor_, cursor_ + nibbles); *val = StringToUInt(target.c_str(), nullptr, 16); cursor_ += nibbles; return NoError(); } CheckedError Parser::SkipByteOrderMark() { if (static_cast<unsigned char>(*cursor_) != 0xef) return NoError(); cursor_++; if (static_cast<unsigned char>(*cursor_) != 0xbb) return Error("invalid utf-8 byte order mark"); cursor_++; if (static_cast<unsigned char>(*cursor_) != 0xbf) return Error("invalid utf-8 byte order mark"); cursor_++; return NoError(); } bool IsIdentifierStart(char c) { return isalpha(static_cast<unsigned char>(c)) || c == '_'; } CheckedError Parser::Next() { doc_comment_.clear(); bool seen_newline = false; attribute_.clear(); for (;;) { char c = *cursor_++; token_ = c; switch (c) { case '\0': cursor_--; token_ = kTokenEof; return NoError(); case ' ': case '\r': case '\t': break; case '\n': line_++; seen_newline = true; break; case '{': case '}': case '(': case ')': case '[': case ']': case ',': case ':': case ';': case '=': return NoError(); case '.': if(!isdigit(static_cast<const unsigned char>(*cursor_))) return NoError(); return Error("floating point constant can\'t start with \".\""); case '\"': case '\'': { int unicode_high_surrogate = -1; while (*cursor_ != c) { if (*cursor_ < ' ' && *cursor_ >= 0) return Error("illegal character in string constant"); if (*cursor_ == '\\') { cursor_++; if (unicode_high_surrogate != -1 && *cursor_ != 'u') { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } switch (*cursor_) { case 'n': attribute_ += '\n'; cursor_++; break; case 't': attribute_ += '\t'; cursor_++; break; case 'r': attribute_ += '\r'; cursor_++; break; case 'b': attribute_ += '\b'; cursor_++; break; case 'f': attribute_ += '\f'; cursor_++; break; case '\"': attribute_ += '\"'; cursor_++; break; case '\'': attribute_ += '\''; cursor_++; break; case '\\': attribute_ += '\\'; cursor_++; break; case '/': attribute_ += '/'; cursor_++; break; case 'x': { // Not in the JSON standard cursor_++; uint64_t val; ECHECK(ParseHexNum(2, &val)); attribute_ += static_cast<char>(val); break; } case 'u': { cursor_++; uint64_t val; ECHECK(ParseHexNum(4, &val)); if (val >= 0xD800 && val <= 0xDBFF) { if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (multiple high surrogates)"); } else { unicode_high_surrogate = static_cast<int>(val); } } else if (val >= 0xDC00 && val <= 0xDFFF) { if (unicode_high_surrogate == -1) { return Error( "illegal Unicode sequence (unpaired low surrogate)"); } else { int code_point = 0x10000 + ((unicode_high_surrogate & 0x03FF) << 10) + (val & 0x03FF); ToUTF8(code_point, &attribute_); unicode_high_surrogate = -1; } } else { if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } ToUTF8(static_cast<int>(val), &attribute_); } break; } default: return Error("unknown escape code in string constant"); } } else { // printable chars + UTF-8 bytes if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } attribute_ += *cursor_++; } } if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } cursor_++; if (!opts.allow_non_utf8 && !ValidateUTF8(attribute_)) { return Error("illegal UTF-8 sequence"); } token_ = kTokenStringConstant; return NoError(); } case '/': if (*cursor_ == '/') { const char *start = ++cursor_; while (*cursor_ && *cursor_ != '\n' && *cursor_ != '\r') cursor_++; if (*start == '/') { // documentation comment if (cursor_ != source_ && !seen_newline) return Error( "a documentation comment should be on a line on its own"); doc_comment_.push_back(std::string(start + 1, cursor_)); } break; } else if (*cursor_ == '*') { cursor_++; // TODO: make nested. while (*cursor_ != '*' || cursor_[1] != '/') { if (*cursor_ == '\n') line_++; if (!*cursor_) return Error("end of file in comment"); cursor_++; } cursor_ += 2; break; } // fall thru default: if (IsIdentifierStart(c)) { // Collect all chars of an identifier: const char *start = cursor_ - 1; while (isalnum(static_cast<unsigned char>(*cursor_)) || *cursor_ == '_') cursor_++; attribute_.append(start, cursor_); // First, see if it is a type keyword from the table of types: #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, \ PTYPE) \ if (attribute_ == IDLTYPE) { \ token_ = kToken ## ENUM; \ return NoError(); \ } FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // If it's a boolean constant keyword, turn those into integers, // which simplifies our logic downstream. if (attribute_ == "true" || attribute_ == "false") { attribute_ = NumToString(attribute_ == "true"); token_ = kTokenIntegerConstant; return NoError(); } // Check for declaration keywords: if (attribute_ == "table") { token_ = kTokenTable; return NoError(); } if (attribute_ == "struct") { token_ = kTokenStruct; return NoError(); } if (attribute_ == "enum") { token_ = kTokenEnum; return NoError(); } if (attribute_ == "union") { token_ = kTokenUnion; return NoError(); } if (attribute_ == "namespace") { token_ = kTokenNameSpace; return NoError(); } if (attribute_ == "root_type") { token_ = kTokenRootType; return NoError(); } if (attribute_ == "include") { token_ = kTokenInclude; return NoError(); } if (attribute_ == "attribute") { token_ = kTokenAttribute; return NoError(); } if (attribute_ == "file_identifier") { token_ = kTokenFileIdentifier; return NoError(); } if (attribute_ == "file_extension") { token_ = kTokenFileExtension; return NoError(); } if (attribute_ == "null") { token_ = kTokenNull; return NoError(); } if (attribute_ == "rpc_service") { token_ = kTokenService; return NoError(); } if (attribute_ == "native_include") { token_ = kTokenNativeInclude; return NoError(); } // If not, it is a user-defined identifier: token_ = kTokenIdentifier; return NoError(); } else if (isdigit(static_cast<unsigned char>(c)) || c == '-') { const char *start = cursor_ - 1; if (c == '-' && *cursor_ == '0' && (cursor_[1] == 'x' || cursor_[1] == 'X')) { ++start; ++cursor_; attribute_.append(&c, &c + 1); c = '0'; } if (c == '0' && (*cursor_ == 'x' || *cursor_ == 'X')) { cursor_++; while (isxdigit(static_cast<unsigned char>(*cursor_))) cursor_++; attribute_.append(start + 2, cursor_); attribute_ = NumToString(static_cast<int64_t>( StringToUInt(attribute_.c_str(), nullptr, 16))); token_ = kTokenIntegerConstant; return NoError(); } while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; if (*cursor_ == '.' || *cursor_ == 'e' || *cursor_ == 'E') { if (*cursor_ == '.') { cursor_++; while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; } // See if this float has a scientific notation suffix. Both JSON // and C++ (through strtod() we use) have the same format: if (*cursor_ == 'e' || *cursor_ == 'E') { cursor_++; if (*cursor_ == '+' || *cursor_ == '-') cursor_++; while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; } token_ = kTokenFloatConstant; } else { token_ = kTokenIntegerConstant; } attribute_.append(start, cursor_); return NoError(); } std::string ch; ch = c; if (c < ' ' || c > '~') ch = "code: " + NumToString(c); return Error("illegal character: " + ch); } } } // Check if a given token is next. bool Parser::Is(int t) { return t == token_; } // Expect a given token to be next, consume it, or error if not present. CheckedError Parser::Expect(int t) { if (t != token_) { return Error("expecting: " + TokenToString(t) + " instead got: " + TokenToStringId(token_)); } NEXT(); return NoError(); } CheckedError Parser::ParseNamespacing(std::string *id, std::string *last) { while (Is('.')) { NEXT(); *id += "."; *id += attribute_; if (last) *last = attribute_; EXPECT(kTokenIdentifier); } return NoError(); } EnumDef *Parser::LookupEnum(const std::string &id) { // Search thru parent namespaces. for (int components = static_cast<int>(namespaces_.back()->components.size()); components >= 0; components--) { auto ed = enums_.Lookup( namespaces_.back()->GetFullyQualifiedName(id, components)); if (ed) return ed; } return nullptr; } CheckedError Parser::ParseTypeIdent(Type &type) { std::string id = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&id, nullptr)); auto enum_def = LookupEnum(id); if (enum_def) { type = enum_def->underlying_type; if (enum_def->is_union) type.base_type = BASE_TYPE_UNION; } else { type.base_type = BASE_TYPE_STRUCT; type.struct_def = LookupCreateStruct(id); } return NoError(); } // Parse any IDL type. CheckedError Parser::ParseType(Type &type) { if (token_ >= kTokenBOOL && token_ <= kTokenSTRING) { type.base_type = static_cast<BaseType>(token_ - kTokenNONE); NEXT(); } else { if (token_ == kTokenIdentifier) { ECHECK(ParseTypeIdent(type)); } else if (token_ == '[') { NEXT(); Type subtype; ECHECK(ParseType(subtype)); if (subtype.base_type == BASE_TYPE_VECTOR) { // We could support this, but it will complicate things, and it's // easier to work around with a struct around the inner vector. return Error( "nested vector types not supported (wrap in table first)."); } type = Type(BASE_TYPE_VECTOR, subtype.struct_def, subtype.enum_def); type.element = subtype.base_type; EXPECT(']'); } else { return Error("illegal type syntax"); } } return NoError(); } CheckedError Parser::AddField(StructDef &struct_def, const std::string &name, const Type &type, FieldDef **dest) { auto &field = *new FieldDef(); field.value.offset = FieldIndexToOffset(static_cast<voffset_t>(struct_def.fields.vec.size())); field.name = name; field.file = struct_def.file; field.value.type = type; if (struct_def.fixed) { // statically compute the field offset auto size = InlineSize(type); auto alignment = InlineAlignment(type); // structs_ need to have a predictable format, so we need to align to // the largest scalar struct_def.minalign = std::max(struct_def.minalign, alignment); struct_def.PadLastField(alignment); field.value.offset = static_cast<voffset_t>(struct_def.bytesize); struct_def.bytesize += size; } if (struct_def.fields.Add(name, &field)) return Error("field already exists: " + name); *dest = &field; return NoError(); } CheckedError Parser::ParseField(StructDef &struct_def) { std::string name = attribute_; if (name == struct_def.name) return Error("field name can not be the same as table/struct name"); std::vector<std::string> dc = doc_comment_; EXPECT(kTokenIdentifier); EXPECT(':'); Type type; ECHECK(ParseType(type)); if (struct_def.fixed && !IsScalar(type.base_type) && !IsStruct(type)) return Error("structs_ may contain only scalar or struct fields"); FieldDef *typefield = nullptr; if (type.base_type == BASE_TYPE_UNION) { // For union fields, add a second auto-generated field to hold the type, // with a special suffix. ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), type.enum_def->underlying_type, &typefield)); } else if (type.base_type == BASE_TYPE_VECTOR && type.element == BASE_TYPE_UNION) { // Only cpp supports the union vector feature so far. if (opts.lang_to_generate != IDLOptions::kCpp) { return Error("Vectors of unions are not yet supported in all " "the specified programming languages."); } // For vector of union fields, add a second auto-generated vector field to // hold the types, with a special suffix. Type union_vector(BASE_TYPE_VECTOR, nullptr, type.enum_def); union_vector.element = BASE_TYPE_UTYPE; ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), union_vector, &typefield)); } FieldDef *field; ECHECK(AddField(struct_def, name, type, &field)); if (token_ == '=') { NEXT(); if (!IsScalar(type.base_type)) return Error("default values currently only supported for scalars"); ECHECK(ParseSingleValue(field->value)); } if (IsFloat(field->value.type.base_type)) { if (!strpbrk(field->value.constant.c_str(), ".eE")) field->value.constant += ".0"; } if (type.enum_def && IsScalar(type.base_type) && !struct_def.fixed && !type.enum_def->attributes.Lookup("bit_flags") && !type.enum_def->ReverseLookup(static_cast<int>( StringToInt(field->value.constant.c_str())))) return Error("enum " + type.enum_def->name + " does not have a declaration for this field\'s default of " + field->value.constant); field->doc_comment = dc; ECHECK(ParseMetaData(&field->attributes)); field->deprecated = field->attributes.Lookup("deprecated") != nullptr; auto hash_name = field->attributes.Lookup("hash"); if (hash_name) { switch (type.base_type) { case BASE_TYPE_INT: case BASE_TYPE_UINT: { if (FindHashFunction32(hash_name->constant.c_str()) == nullptr) return Error("Unknown hashing algorithm for 32 bit types: " + hash_name->constant); break; } case BASE_TYPE_LONG: case BASE_TYPE_ULONG: { if (FindHashFunction64(hash_name->constant.c_str()) == nullptr) return Error("Unknown hashing algorithm for 64 bit types: " + hash_name->constant); break; } default: return Error( "only int, uint, long and ulong data types support hashing."); } } auto cpp_type = field->attributes.Lookup("cpp_type"); if (cpp_type) { if (!hash_name) return Error("cpp_type can only be used with a hashed field"); } if (field->deprecated && struct_def.fixed) return Error("can't deprecate fields in a struct"); field->required = field->attributes.Lookup("required") != nullptr; if (field->required && (struct_def.fixed || IsScalar(field->value.type.base_type))) return Error("only non-scalar fields in tables may be 'required'"); field->key = field->attributes.Lookup("key") != nullptr; if (field->key) { if (struct_def.has_key) return Error("only one field may be set as 'key'"); struct_def.has_key = true; if (!IsScalar(field->value.type.base_type)) { field->required = true; if (field->value.type.base_type != BASE_TYPE_STRING) return Error("'key' field must be string or scalar type"); } } field->native_inline = field->attributes.Lookup("native_inline") != nullptr; if (field->native_inline && !IsStruct(field->value.type)) return Error("native_inline can only be defined on structs'"); auto nested = field->attributes.Lookup("nested_flatbuffer"); if (nested) { if (nested->type.base_type != BASE_TYPE_STRING) return Error( "nested_flatbuffer attribute must be a string (the root type)"); if (field->value.type.base_type != BASE_TYPE_VECTOR || field->value.type.element != BASE_TYPE_UCHAR) return Error( "nested_flatbuffer attribute may only apply to a vector of ubyte"); // This will cause an error if the root type of the nested flatbuffer // wasn't defined elsewhere. LookupCreateStruct(nested->constant); } if (field->attributes.Lookup("flexbuffer")) { field->flexbuffer = true; uses_flexbuffers_ = true; if (field->value.type.base_type != BASE_TYPE_VECTOR || field->value.type.element != BASE_TYPE_UCHAR) return Error( "flexbuffer attribute may only apply to a vector of ubyte"); } if (typefield) { // If this field is a union, and it has a manually assigned id, // the automatically added type field should have an id as well (of N - 1). auto attr = field->attributes.Lookup("id"); if (attr) { auto id = atoi(attr->constant.c_str()); auto val = new Value(); val->type = attr->type; val->constant = NumToString(id - 1); typefield->attributes.Add("id", val); } } EXPECT(';'); return NoError(); } CheckedError Parser::ParseString(Value &val) { auto s = attribute_; EXPECT(kTokenStringConstant); val.constant = NumToString(builder_.CreateString(s).o); return NoError(); } CheckedError Parser::ParseComma() { if (!opts.protobuf_ascii_alike) EXPECT(','); return NoError(); } CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field, size_t parent_fieldn, const StructDef *parent_struct_def) { switch (val.type.base_type) { case BASE_TYPE_UNION: { assert(field); std::string constant; // Find corresponding type field we may have already parsed. for (auto elem = field_stack_.rbegin(); elem != field_stack_.rbegin() + parent_fieldn; ++elem) { auto &type = elem->second->value.type; if (type.base_type == BASE_TYPE_UTYPE && type.enum_def == val.type.enum_def) { constant = elem->first.constant; break; } } if (constant.empty()) { // We haven't seen the type field yet. Sadly a lot of JSON writers // output these in alphabetical order, meaning it comes after this // value. So we scan past the value to find it, then come back here. auto type_name = field->name + UnionTypeFieldSuffix(); assert(parent_struct_def); auto type_field = parent_struct_def->fields.Lookup(type_name); assert(type_field); // Guaranteed by ParseField(). // Remember where we are in the source file, so we can come back here. auto backup = *static_cast<ParserState *>(this); ECHECK(SkipAnyJsonValue()); // The table. ECHECK(ParseComma()); auto next_name = attribute_; if (Is(kTokenStringConstant)) { NEXT(); } else { EXPECT(kTokenIdentifier); } if (next_name != type_name) return Error("missing type field after this union value: " + type_name); EXPECT(':'); Value type_val = type_field->value; ECHECK(ParseAnyValue(type_val, type_field, 0, nullptr)); constant = type_val.constant; // Got the information we needed, now rewind: *static_cast<ParserState *>(this) = backup; } uint8_t enum_idx; ECHECK(atot(constant.c_str(), *this, &enum_idx)); auto enum_val = val.type.enum_def->ReverseLookup(enum_idx); if (!enum_val) return Error("illegal type id for: " + field->name); if (enum_val->union_type.base_type == BASE_TYPE_STRUCT) { ECHECK(ParseTable(*enum_val->union_type.struct_def, &val.constant, nullptr)); if (enum_val->union_type.struct_def->fixed) { // All BASE_TYPE_UNION values are offsets, so turn this into one. SerializeStruct(*enum_val->union_type.struct_def, val); builder_.ClearOffsets(); val.constant = NumToString(builder_.GetSize()); } } else if (enum_val->union_type.base_type == BASE_TYPE_STRING) { ECHECK(ParseString(val)); } else { assert(false); } break; } case BASE_TYPE_STRUCT: ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr)); break; case BASE_TYPE_STRING: { ECHECK(ParseString(val)); break; } case BASE_TYPE_VECTOR: { uoffset_t off; ECHECK(ParseVector(val.type.VectorType(), &off)); val.constant = NumToString(off); break; } case BASE_TYPE_INT: case BASE_TYPE_UINT: case BASE_TYPE_LONG: case BASE_TYPE_ULONG: { if (field && field->attributes.Lookup("hash") && (token_ == kTokenIdentifier || token_ == kTokenStringConstant)) { ECHECK(ParseHash(val, field)); } else { ECHECK(ParseSingleValue(val)); } break; } default: ECHECK(ParseSingleValue(val)); break; } return NoError(); } void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) { assert(val.constant.length() == struct_def.bytesize); builder_.Align(struct_def.minalign); builder_.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()), struct_def.bytesize); builder_.AddStructOffset(val.offset, builder_.GetSize()); } CheckedError Parser::ParseTableDelimiters(size_t &fieldn, const StructDef *struct_def, const std::function<CheckedError(const std::string &name)> &body) { // We allow tables both as JSON object{ .. } with field names // or vector[..] with all fields in order char terminator = '}'; bool is_nested_vector = struct_def && Is('['); if (is_nested_vector) { NEXT(); terminator = ']'; } else { EXPECT('{'); } for (;;) { if ((!opts.strict_json || !fieldn) && Is(terminator)) break; std::string name; if (is_nested_vector) { if (fieldn > struct_def->fields.vec.size()) { return Error("too many unnamed fields in nested array"); } name = struct_def->fields.vec[fieldn]->name; } else { name = attribute_; if (Is(kTokenStringConstant)) { NEXT(); } else { EXPECT(opts.strict_json ? kTokenStringConstant : kTokenIdentifier); } if (!opts.protobuf_ascii_alike || !(Is('{') || Is('['))) EXPECT(':'); } ECHECK(body(name)); if (Is(terminator)) break; ECHECK(ParseComma()); } NEXT(); if (is_nested_vector && fieldn != struct_def->fields.vec.size()) { return Error("wrong number of unnamed fields in table vector"); } return NoError(); } CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value, uoffset_t *ovalue) { size_t fieldn = 0; auto err = ParseTableDelimiters(fieldn, &struct_def, [&](const std::string &name) -> CheckedError { auto field = struct_def.fields.Lookup(name); if (!field) { if (!opts.skip_unexpected_fields_in_json) { return Error("unknown field: " + name); } else { ECHECK(SkipAnyJsonValue()); } } else { if (Is(kTokenNull)) { NEXT(); // Ignore this field. } else { Value val = field->value; if (field->flexbuffer) { flexbuffers::Builder builder(1024, flexbuffers::BUILDER_FLAG_SHARE_ALL); ECHECK(ParseFlexBufferValue(&builder)); builder.Finish(); auto off = builder_.CreateVector(builder.GetBuffer()); val.constant = NumToString(off.o); } else { ECHECK(ParseAnyValue(val, field, fieldn, &struct_def)); } // Hardcoded insertion-sort with error-check. // If fields are specified in order, then this loop exits immediately. auto elem = field_stack_.rbegin(); for (; elem != field_stack_.rbegin() + fieldn; ++elem) { auto existing_field = elem->second; if (existing_field == field) return Error("field set more than once: " + field->name); if (existing_field->value.offset < field->value.offset) break; } // Note: elem points to before the insertion point, thus .base() points // to the correct spot. field_stack_.insert(elem.base(), std::make_pair(val, field)); fieldn++; } } return NoError(); }); ECHECK(err); // Check if all required fields are parsed. for (auto field_it = struct_def.fields.vec.begin(); field_it != struct_def.fields.vec.end(); ++field_it) { auto required_field = *field_it; if (!required_field->required) { continue; } bool found = false; for (auto pf_it = field_stack_.end() - fieldn; pf_it != field_stack_.end(); ++pf_it) { auto parsed_field = pf_it->second; if (parsed_field == required_field) { found = true; break; } } if (!found) { return Error("required field is missing: " + required_field->name + " in " + struct_def.name); } } if (struct_def.fixed && fieldn != struct_def.fields.vec.size()) return Error("struct: wrong number of initializers: " + struct_def.name); auto start = struct_def.fixed ? builder_.StartStruct(struct_def.minalign) : builder_.StartTable(); for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { // Go through elements in reverse, since we're building the data backwards. for (auto it = field_stack_.rbegin(); it != field_stack_.rbegin() + fieldn; ++it) { auto &field_value = it->first; auto field = it->second; if (!struct_def.sortbysize || size == SizeOf(field_value.type.base_type)) { switch (field_value.type.base_type) { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, \ PTYPE) \ case BASE_TYPE_ ## ENUM: \ builder_.Pad(field->padding); \ if (struct_def.fixed) { \ CTYPE val; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ builder_.PushElement(val); \ } else { \ CTYPE val, valdef; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ ECHECK(atot(field->value.constant.c_str(), *this, &valdef)); \ builder_.AddElement(field_value.offset, val, valdef); \ } \ break; FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD); #undef FLATBUFFERS_TD #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, \ PTYPE) \ case BASE_TYPE_ ## ENUM: \ builder_.Pad(field->padding); \ if (IsStruct(field->value.type)) { \ SerializeStruct(*field->value.type.struct_def, field_value); \ } else { \ CTYPE val; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ builder_.AddOffset(field_value.offset, val); \ } \ break; FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD); #undef FLATBUFFERS_TD } } } } for (size_t i = 0; i < fieldn; i++) field_stack_.pop_back(); if (struct_def.fixed) { builder_.ClearOffsets(); builder_.EndStruct(); assert(value); // Temporarily store this struct in the value string, since it is to // be serialized in-place elsewhere. value->assign( reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()), struct_def.bytesize); builder_.PopBytes(struct_def.bytesize); assert(!ovalue); } else { auto val = builder_.EndTable(start, static_cast<voffset_t>(struct_def.fields.vec.size())); if (ovalue) *ovalue = val; if (value) *value = NumToString(val); } return NoError(); } CheckedError Parser::ParseVectorDelimiters(size_t &count, const std::function<CheckedError()> &body) { EXPECT('['); for (;;) { if ((!opts.strict_json || !count) && Is(']')) break; ECHECK(body()); count++; if (Is(']')) break; ECHECK(ParseComma()); } NEXT(); return NoError(); } CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue) { size_t count = 0; auto err = ParseVectorDelimiters(count, [&]() -> CheckedError { Value val; val.type = type; ECHECK(ParseAnyValue(val, nullptr, 0, nullptr)); field_stack_.push_back(std::make_pair(val, nullptr)); return NoError(); }); ECHECK(err); builder_.StartVector(count * InlineSize(type) / InlineAlignment(type), InlineAlignment(type)); for (size_t i = 0; i < count; i++) { // start at the back, since we're building the data backwards. auto &val = field_stack_.back().first; switch (val.type.base_type) { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \ else { \ CTYPE elem; \ ECHECK(atot(val.constant.c_str(), *this, &elem)); \ builder_.PushElement(elem); \ } \ break; FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD } field_stack_.pop_back(); } builder_.ClearOffsets(); *ovalue = builder_.EndVector(count); return NoError(); } CheckedError Parser::ParseMetaData(SymbolTable<Value> *attributes) { if (Is('(')) { NEXT(); for (;;) { auto name = attribute_; EXPECT(kTokenIdentifier); if (known_attributes_.find(name) == known_attributes_.end()) return Error("user define attributes must be declared before use: " + name); auto e = new Value(); attributes->Add(name, e); if (Is(':')) { NEXT(); ECHECK(ParseSingleValue(*e)); } if (Is(')')) { NEXT(); break; } EXPECT(','); } } return NoError(); } CheckedError Parser::TryTypedValue(int dtoken, bool check, Value &e, BaseType req, bool *destmatch) { bool match = dtoken == token_; if (match) { *destmatch = true; e.constant = attribute_; if (!check) { if (e.type.base_type == BASE_TYPE_NONE) { e.type.base_type = req; } else { return Error(std::string("type mismatch: expecting: ") + kTypeNames[e.type.base_type] + ", found: " + kTypeNames[req]); } } NEXT(); } return NoError(); } CheckedError Parser::ParseEnumFromString(Type &type, int64_t *result) { *result = 0; // Parse one or more enum identifiers, separated by spaces. const char *next = attribute_.c_str(); do { const char *divider = strchr(next, ' '); std::string word; if (divider) { word = std::string(next, divider); next = divider + strspn(divider, " "); } else { word = next; next += word.length(); } if (type.enum_def) { // The field has an enum type auto enum_val = type.enum_def->vals.Lookup(word); if (!enum_val) return Error("unknown enum value: " + word + ", for enum: " + type.enum_def->name); *result |= enum_val->value; } else { // No enum type, probably integral field. if (!IsInteger(type.base_type)) return Error("not a valid value for this field: " + word); // TODO: could check if its a valid number constant here. const char *dot = strrchr(word.c_str(), '.'); if (!dot) return Error("enum values need to be qualified by an enum type"); std::string enum_def_str(word.c_str(), dot); std::string enum_val_str(dot + 1, word.c_str() + word.length()); auto enum_def = LookupEnum(enum_def_str); if (!enum_def) return Error("unknown enum: " + enum_def_str); auto enum_val = enum_def->vals.Lookup(enum_val_str); if (!enum_val) return Error("unknown enum value: " + enum_val_str); *result |= enum_val->value; } } while(*next); return NoError(); } CheckedError Parser::ParseHash(Value &e, FieldDef* field) { assert(field); Value *hash_name = field->attributes.Lookup("hash"); switch (e.type.base_type) { case BASE_TYPE_INT: { auto hash = FindHashFunction32(hash_name->constant.c_str()); int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str())); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_UINT: { auto hash = FindHashFunction32(hash_name->constant.c_str()); uint32_t hashed_value = hash(attribute_.c_str()); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_LONG: { auto hash = FindHashFunction64(hash_name->constant.c_str()); int64_t hashed_value = static_cast<int64_t>(hash(attribute_.c_str())); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_ULONG: { auto hash = FindHashFunction64(hash_name->constant.c_str()); uint64_t hashed_value = hash(attribute_.c_str()); e.constant = NumToString(hashed_value); break; } default: assert(0); } NEXT(); return NoError(); } CheckedError Parser::TokenError() { return Error("cannot parse value starting with: " + TokenToStringId(token_)); } CheckedError Parser::ParseSingleValue(Value &e) { // First see if this could be a conversion function: if (token_ == kTokenIdentifier && *cursor_ == '(') { auto functionname = attribute_; NEXT(); EXPECT('('); ECHECK(ParseSingleValue(e)); EXPECT(')'); #define FLATBUFFERS_FN_DOUBLE(name, op) \ if (functionname == name) { \ auto x = strtod(e.constant.c_str(), nullptr); \ e.constant = NumToString(op); \ } FLATBUFFERS_FN_DOUBLE("deg", x / M_PI * 180); FLATBUFFERS_FN_DOUBLE("rad", x * M_PI / 180); FLATBUFFERS_FN_DOUBLE("sin", sin(x)); FLATBUFFERS_FN_DOUBLE("cos", cos(x)); FLATBUFFERS_FN_DOUBLE("tan", tan(x)); FLATBUFFERS_FN_DOUBLE("asin", asin(x)); FLATBUFFERS_FN_DOUBLE("acos", acos(x)); FLATBUFFERS_FN_DOUBLE("atan", atan(x)); // TODO(wvo): add more useful conversion functions here. #undef FLATBUFFERS_FN_DOUBLE // Then check if this could be a string/identifier enum value: } else if (e.type.base_type != BASE_TYPE_STRING && e.type.base_type != BASE_TYPE_NONE && (token_ == kTokenIdentifier || token_ == kTokenStringConstant)) { if (IsIdentifierStart(attribute_[0])) { // Enum value. int64_t val; ECHECK(ParseEnumFromString(e.type, &val)); e.constant = NumToString(val); NEXT(); } else { // Numeric constant in string. if (IsInteger(e.type.base_type)) { char *end; e.constant = NumToString(StringToInt(attribute_.c_str(), &end)); if (*end) return Error("invalid integer: " + attribute_); } else if (IsFloat(e.type.base_type)) { char *end; e.constant = NumToString(strtod(attribute_.c_str(), &end)); if (*end) return Error("invalid float: " + attribute_); } else { assert(0); // Shouldn't happen, we covered all types. e.constant = "0"; } NEXT(); } } else { bool match = false; ECHECK(TryTypedValue(kTokenIntegerConstant, IsScalar(e.type.base_type), e, BASE_TYPE_INT, &match)); ECHECK(TryTypedValue(kTokenFloatConstant, IsFloat(e.type.base_type), e, BASE_TYPE_FLOAT, &match)); ECHECK(TryTypedValue(kTokenStringConstant, e.type.base_type == BASE_TYPE_STRING, e, BASE_TYPE_STRING, &match)); if (!match) return TokenError(); } return NoError(); } StructDef *Parser::LookupCreateStruct(const std::string &name, bool create_if_new, bool definition) { std::string qualified_name = namespaces_.back()->GetFullyQualifiedName(name); // See if it exists pre-declared by an unqualified use. auto struct_def = structs_.Lookup(name); if (struct_def && struct_def->predecl) { if (definition) { // Make sure it has the current namespace, and is registered under its // qualified name. struct_def->defined_namespace = namespaces_.back(); structs_.Move(name, qualified_name); } return struct_def; } // See if it exists pre-declared by an qualified use. struct_def = structs_.Lookup(qualified_name); if (struct_def && struct_def->predecl) { if (definition) { // Make sure it has the current namespace. struct_def->defined_namespace = namespaces_.back(); } return struct_def; } if (!definition) { // Search thru parent namespaces. for (size_t components = namespaces_.back()->components.size(); components && !struct_def; components--) { struct_def = structs_.Lookup( namespaces_.back()->GetFullyQualifiedName(name, components - 1)); } } if (!struct_def && create_if_new) { struct_def = new StructDef(); if (definition) { structs_.Add(qualified_name, struct_def); struct_def->name = name; struct_def->defined_namespace = namespaces_.back(); } else { // Not a definition. // Rather than failing, we create a "pre declared" StructDef, due to // circular references, and check for errors at the end of parsing. // It is defined in the root namespace, since we don't know what the // final namespace will be. // TODO: maybe safer to use special namespace? structs_.Add(name, struct_def); struct_def->name = name; struct_def->defined_namespace = new Namespace(); namespaces_.insert(namespaces_.begin(), struct_def->defined_namespace); } } return struct_def; } CheckedError Parser::ParseEnum(bool is_union, EnumDef **dest) { std::vector<std::string> enum_comment = doc_comment_; NEXT(); std::string enum_name = attribute_; EXPECT(kTokenIdentifier); auto &enum_def = *new EnumDef(); enum_def.name = enum_name; enum_def.file = file_being_parsed_; enum_def.doc_comment = enum_comment; enum_def.is_union = is_union; enum_def.defined_namespace = namespaces_.back(); if (enums_.Add(namespaces_.back()->GetFullyQualifiedName(enum_name), &enum_def)) return Error("enum already exists: " + enum_name); if (is_union) { enum_def.underlying_type.base_type = BASE_TYPE_UTYPE; enum_def.underlying_type.enum_def = &enum_def; } else { if (opts.proto_mode) { enum_def.underlying_type.base_type = BASE_TYPE_INT; } else { // Give specialized error message, since this type spec used to // be optional in the first FlatBuffers release. if (!Is(':')) { return Error("must specify the underlying integer type for this" " enum (e.g. \': short\', which was the default)."); } else { NEXT(); } // Specify the integer type underlying this enum. ECHECK(ParseType(enum_def.underlying_type)); if (!IsInteger(enum_def.underlying_type.base_type)) return Error("underlying enum type must be integral"); } // Make this type refer back to the enum it was derived from. enum_def.underlying_type.enum_def = &enum_def; } ECHECK(ParseMetaData(&enum_def.attributes)); EXPECT('{'); if (is_union) enum_def.vals.Add("NONE", new EnumVal("NONE", 0)); for (;;) { if (opts.proto_mode && attribute_ == "option") { ECHECK(ParseProtoOption()); } else { auto value_name = attribute_; auto full_name = value_name; std::vector<std::string> value_comment = doc_comment_; EXPECT(kTokenIdentifier); if (is_union) { ECHECK(ParseNamespacing(&full_name, &value_name)); if (opts.union_value_namespacing) { // Since we can't namespace the actual enum identifiers, turn // namespace parts into part of the identifier. value_name = full_name; std::replace(value_name.begin(), value_name.end(), '.', '_'); } } auto prevsize = enum_def.vals.vec.size(); auto value = enum_def.vals.vec.size() ? enum_def.vals.vec.back()->value + 1 : 0; auto &ev = *new EnumVal(value_name, value); if (enum_def.vals.Add(value_name, &ev)) return Error("enum value already exists: " + value_name); ev.doc_comment = value_comment; if (is_union) { if (Is(':')) { NEXT(); ECHECK(ParseType(ev.union_type)); if (ev.union_type.base_type != BASE_TYPE_STRUCT && ev.union_type.base_type != BASE_TYPE_STRING) return Error("union value type may only be table/struct/string"); enum_def.uses_type_aliases = true; } else { ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name)); } } if (Is('=')) { NEXT(); ev.value = StringToInt(attribute_.c_str()); EXPECT(kTokenIntegerConstant); if (!opts.proto_mode && prevsize && enum_def.vals.vec[prevsize - 1]->value >= ev.value) return Error("enum values must be specified in ascending order"); } if (is_union) { if (ev.value < 0 || ev.value >= 256) return Error("union enum value must fit in a ubyte"); } if (opts.proto_mode && Is('[')) { NEXT(); // ignore attributes on enums. while (token_ != ']') NEXT(); NEXT(); } } if (!Is(opts.proto_mode ? ';' : ',')) break; NEXT(); if (Is('}')) break; } EXPECT('}'); if (enum_def.attributes.Lookup("bit_flags")) { for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { if (static_cast<size_t>((*it)->value) >= SizeOf(enum_def.underlying_type.base_type) * 8) return Error("bit flag out of range of underlying integral type"); (*it)->value = 1LL << (*it)->value; } } if (dest) *dest = &enum_def; types_.Add(namespaces_.back()->GetFullyQualifiedName(enum_def.name), new Type(BASE_TYPE_UNION, nullptr, &enum_def)); return NoError(); } CheckedError Parser::StartStruct(const std::string &name, StructDef **dest) { auto &struct_def = *LookupCreateStruct(name, true, true); if (!struct_def.predecl) return Error("datatype already exists: " + name); struct_def.predecl = false; struct_def.name = name; struct_def.file = file_being_parsed_; // Move this struct to the back of the vector just in case it was predeclared, // to preserve declaration order. *std::remove(structs_.vec.begin(), structs_.vec.end(), &struct_def) = &struct_def; *dest = &struct_def; return NoError(); } CheckedError Parser::CheckClash(std::vector<FieldDef*> &fields, StructDef *struct_def, const char *suffix, BaseType basetype) { auto len = strlen(suffix); for (auto it = fields.begin(); it != fields.end(); ++it) { auto &fname = (*it)->name; if (fname.length() > len && fname.compare(fname.length() - len, len, suffix) == 0 && (*it)->value.type.base_type != BASE_TYPE_UTYPE) { auto field = struct_def->fields.Lookup( fname.substr(0, fname.length() - len)); if (field && field->value.type.base_type == basetype) return Error("Field " + fname + " would clash with generated functions for field " + field->name); } } return NoError(); } static bool compareFieldDefs(const FieldDef *a, const FieldDef *b) { auto a_id = atoi(a->attributes.Lookup("id")->constant.c_str()); auto b_id = atoi(b->attributes.Lookup("id")->constant.c_str()); return a_id < b_id; } CheckedError Parser::ParseDecl() { std::vector<std::string> dc = doc_comment_; bool fixed = Is(kTokenStruct); if (fixed) NEXT() else EXPECT(kTokenTable); std::string name = attribute_; EXPECT(kTokenIdentifier); StructDef *struct_def; ECHECK(StartStruct(name, &struct_def)); struct_def->doc_comment = dc; struct_def->fixed = fixed; ECHECK(ParseMetaData(&struct_def->attributes)); struct_def->sortbysize = struct_def->attributes.Lookup("original_order") == nullptr && !fixed; EXPECT('{'); while (token_ != '}') ECHECK(ParseField(*struct_def)); auto force_align = struct_def->attributes.Lookup("force_align"); if (fixed && force_align) { auto align = static_cast<size_t>(atoi(force_align->constant.c_str())); if (force_align->type.base_type != BASE_TYPE_INT || align < struct_def->minalign || align > FLATBUFFERS_MAX_ALIGNMENT || align & (align - 1)) return Error("force_align must be a power of two integer ranging from the" "struct\'s natural alignment to " + NumToString(FLATBUFFERS_MAX_ALIGNMENT)); struct_def->minalign = align; } struct_def->PadLastField(struct_def->minalign); // Check if this is a table that has manual id assignments auto &fields = struct_def->fields.vec; if (!struct_def->fixed && fields.size()) { size_t num_id_fields = 0; for (auto it = fields.begin(); it != fields.end(); ++it) { if ((*it)->attributes.Lookup("id")) num_id_fields++; } // If any fields have ids.. if (num_id_fields) { // Then all fields must have them. if (num_id_fields != fields.size()) return Error( "either all fields or no fields must have an 'id' attribute"); // Simply sort by id, then the fields are the same as if no ids had // been specified. std::sort(fields.begin(), fields.end(), compareFieldDefs); // Verify we have a contiguous set, and reassign vtable offsets. for (int i = 0; i < static_cast<int>(fields.size()); i++) { if (i != atoi(fields[i]->attributes.Lookup("id")->constant.c_str())) return Error("field id\'s must be consecutive from 0, id " + NumToString(i) + " missing or set twice"); fields[i]->value.offset = FieldIndexToOffset(static_cast<voffset_t>(i)); } } } ECHECK(CheckClash(fields, struct_def, UnionTypeFieldSuffix(), BASE_TYPE_UNION)); ECHECK(CheckClash(fields, struct_def, "Type", BASE_TYPE_UNION)); ECHECK(CheckClash(fields, struct_def, "_length", BASE_TYPE_VECTOR)); ECHECK(CheckClash(fields, struct_def, "Length", BASE_TYPE_VECTOR)); ECHECK(CheckClash(fields, struct_def, "_byte_vector", BASE_TYPE_STRING)); ECHECK(CheckClash(fields, struct_def, "ByteVector", BASE_TYPE_STRING)); EXPECT('}'); types_.Add(namespaces_.back()->GetFullyQualifiedName(struct_def->name), new Type(BASE_TYPE_STRUCT, struct_def, nullptr)); return NoError(); } CheckedError Parser::ParseService() { std::vector<std::string> service_comment = doc_comment_; NEXT(); auto service_name = attribute_; EXPECT(kTokenIdentifier); auto &service_def = *new ServiceDef(); service_def.name = service_name; service_def.file = file_being_parsed_; service_def.doc_comment = service_comment; service_def.defined_namespace = namespaces_.back(); if (services_.Add(namespaces_.back()->GetFullyQualifiedName(service_name), &service_def)) return Error("service already exists: " + service_name); ECHECK(ParseMetaData(&service_def.attributes)); EXPECT('{'); do { auto rpc_name = attribute_; EXPECT(kTokenIdentifier); EXPECT('('); Type reqtype, resptype; ECHECK(ParseTypeIdent(reqtype)); EXPECT(')'); EXPECT(':'); ECHECK(ParseTypeIdent(resptype)); if (reqtype.base_type != BASE_TYPE_STRUCT || reqtype.struct_def->fixed || resptype.base_type != BASE_TYPE_STRUCT || resptype.struct_def->fixed) return Error("rpc request and response types must be tables"); auto &rpc = *new RPCCall(); rpc.name = rpc_name; rpc.request = reqtype.struct_def; rpc.response = resptype.struct_def; if (service_def.calls.Add(rpc_name, &rpc)) return Error("rpc already exists: " + rpc_name); ECHECK(ParseMetaData(&rpc.attributes)); EXPECT(';'); } while (token_ != '}'); NEXT(); return NoError(); } bool Parser::SetRootType(const char *name) { root_struct_def_ = structs_.Lookup(name); if (!root_struct_def_) root_struct_def_ = structs_.Lookup( namespaces_.back()->GetFullyQualifiedName(name)); return root_struct_def_ != nullptr; } void Parser::MarkGenerated() { // This function marks all existing definitions as having already // been generated, which signals no code for included files should be // generated. for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { (*it)->generated = true; } for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { if (!(*it)->predecl) { (*it)->generated = true; } } for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) { (*it)->generated = true; } } CheckedError Parser::ParseNamespace() { NEXT(); auto ns = new Namespace(); namespaces_.push_back(ns); if (token_ != ';') { for (;;) { ns->components.push_back(attribute_); EXPECT(kTokenIdentifier); if (Is('.')) NEXT() else break; } } EXPECT(';'); return NoError(); } static bool compareEnumVals(const EnumVal *a, const EnumVal* b) { return a->value < b->value; } // Best effort parsing of .proto declarations, with the aim to turn them // in the closest corresponding FlatBuffer equivalent. // We parse everything as identifiers instead of keywords, since we don't // want protobuf keywords to become invalid identifiers in FlatBuffers. CheckedError Parser::ParseProtoDecl() { bool isextend = attribute_ == "extend"; if (attribute_ == "package") { // These are identical in syntax to FlatBuffer's namespace decl. ECHECK(ParseNamespace()); } else if (attribute_ == "message" || isextend) { std::vector<std::string> struct_comment = doc_comment_; NEXT(); StructDef *struct_def = nullptr; if (isextend) { if (Is('.')) NEXT(); // qualified names may start with a . ? auto id = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&id, nullptr)); struct_def = LookupCreateStruct(id, false); if (!struct_def) return Error("cannot extend unknown message type: " + id); } else { std::string name = attribute_; EXPECT(kTokenIdentifier); ECHECK(StartStruct(name, &struct_def)); // Since message definitions can be nested, we create a new namespace. auto ns = new Namespace(); // Copy of current namespace. *ns = *namespaces_.back(); // But with current message name. ns->components.push_back(name); namespaces_.push_back(ns); } struct_def->doc_comment = struct_comment; ECHECK(ParseProtoFields(struct_def, isextend, false)); if (!isextend) { // We have to remove the nested namespace, but we can't just throw it // away, so put it at the beginning of the vector. auto ns = namespaces_.back(); namespaces_.pop_back(); namespaces_.insert(namespaces_.begin(), ns); } if (Is(';')) NEXT(); } else if (attribute_ == "enum") { // These are almost the same, just with different terminator: EnumDef *enum_def; ECHECK(ParseEnum(false, &enum_def)); if (Is(';')) NEXT(); // Protobuf allows them to be specified in any order, so sort afterwards. auto &v = enum_def->vals.vec; std::sort(v.begin(), v.end(), compareEnumVals); // Temp: remove any duplicates, as .fbs files can't handle them. for (auto it = v.begin(); it != v.end(); ) { if (it != v.begin() && it[0]->value == it[-1]->value) it = v.erase(it); else ++it; } } else if (attribute_ == "syntax") { // Skip these. NEXT(); EXPECT('='); EXPECT(kTokenStringConstant); EXPECT(';'); } else if (attribute_ == "option") { // Skip these. ECHECK(ParseProtoOption()); EXPECT(';'); } else if (attribute_ == "service") { // Skip these. NEXT(); EXPECT(kTokenIdentifier); ECHECK(ParseProtoCurliesOrIdent()); } else { return Error("don\'t know how to parse .proto declaration starting with " + TokenToStringId(token_)); } return NoError(); } CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend, bool inside_oneof) { EXPECT('{'); while (token_ != '}') { if (attribute_ == "message" || attribute_ == "extend" || attribute_ == "enum") { // Nested declarations. ECHECK(ParseProtoDecl()); } else if (attribute_ == "extensions") { // Skip these. NEXT(); EXPECT(kTokenIntegerConstant); if (Is(kTokenIdentifier)) { NEXT(); // to NEXT(); // num } EXPECT(';'); } else if (attribute_ == "option") { // Skip these. ECHECK(ParseProtoOption()); EXPECT(';'); } else if (attribute_ == "reserved") { // Skip these. NEXT(); EXPECT(kTokenIntegerConstant); while (Is(',')) { NEXT(); EXPECT(kTokenIntegerConstant); } EXPECT(';'); } else { std::vector<std::string> field_comment = doc_comment_; // Parse the qualifier. bool required = false; bool repeated = false; bool oneof = false; if (!inside_oneof) { if (attribute_ == "optional") { // This is the default. EXPECT(kTokenIdentifier); } else if (attribute_ == "required") { required = true; EXPECT(kTokenIdentifier); } else if (attribute_ == "repeated") { repeated = true; EXPECT(kTokenIdentifier); } else if (attribute_ == "oneof") { oneof = true; EXPECT(kTokenIdentifier); } else { // can't error, proto3 allows decls without any of the above. } } StructDef *anonymous_struct = nullptr; Type type; if (attribute_ == "group" || oneof) { if (!oneof) EXPECT(kTokenIdentifier); auto name = "Anonymous" + NumToString(anonymous_counter++); ECHECK(StartStruct(name, &anonymous_struct)); type = Type(BASE_TYPE_STRUCT, anonymous_struct); } else { ECHECK(ParseTypeFromProtoType(&type)); } // Repeated elements get mapped to a vector. if (repeated) { type.element = type.base_type; type.base_type = BASE_TYPE_VECTOR; } std::string name = attribute_; // Protos may use our keywords "attribute" & "namespace" as an identifier. if (Is(kTokenAttribute) || Is(kTokenNameSpace)) { NEXT(); // TODO: simpler to just not make these keywords? name += "_"; // Have to make it not a keyword. } else { EXPECT(kTokenIdentifier); } if (!oneof) { // Parse the field id. Since we're just translating schemas, not // any kind of binary compatibility, we can safely ignore these, and // assign our own. EXPECT('='); EXPECT(kTokenIntegerConstant); } FieldDef *field = nullptr; if (isextend) { // We allow a field to be re-defined when extending. // TODO: are there situations where that is problematic? field = struct_def->fields.Lookup(name); } if (!field) ECHECK(AddField(*struct_def, name, type, &field)); field->doc_comment = field_comment; if (!IsScalar(type.base_type)) field->required = required; // See if there's a default specified. if (Is('[')) { NEXT(); for (;;) { auto key = attribute_; ECHECK(ParseProtoKey()); EXPECT('='); auto val = attribute_; ECHECK(ParseProtoCurliesOrIdent()); if (key == "default") { // Temp: skip non-numeric defaults (enums). auto numeric = strpbrk(val.c_str(), "0123456789-+."); if (IsScalar(type.base_type) && numeric == val.c_str()) field->value.constant = val; } else if (key == "deprecated") { field->deprecated = val == "true"; } if (!Is(',')) break; NEXT(); } EXPECT(']'); } if (anonymous_struct) { ECHECK(ParseProtoFields(anonymous_struct, false, oneof)); if (Is(';')) NEXT(); } else { EXPECT(';'); } } } NEXT(); return NoError(); } CheckedError Parser::ParseProtoKey() { if (token_ == '(') { NEXT(); // Skip "(a.b)" style custom attributes. while (token_ == '.' || token_ == kTokenIdentifier) NEXT(); EXPECT(')'); while (Is('.')) { NEXT(); EXPECT(kTokenIdentifier); } } else { EXPECT(kTokenIdentifier); } return NoError(); } CheckedError Parser::ParseProtoCurliesOrIdent() { if (Is('{')) { NEXT(); for (int nesting = 1; nesting; ) { if (token_ == '{') nesting++; else if (token_ == '}') nesting--; NEXT(); } } else { NEXT(); // Any single token. } return NoError(); } CheckedError Parser::ParseProtoOption() { NEXT(); ECHECK(ParseProtoKey()); EXPECT('='); ECHECK(ParseProtoCurliesOrIdent()); return NoError(); } // Parse a protobuf type, and map it to the corresponding FlatBuffer one. CheckedError Parser::ParseTypeFromProtoType(Type *type) { struct type_lookup { const char *proto_type; BaseType fb_type; }; static type_lookup lookup[] = { { "float", BASE_TYPE_FLOAT }, { "double", BASE_TYPE_DOUBLE }, { "int32", BASE_TYPE_INT }, { "int64", BASE_TYPE_LONG }, { "uint32", BASE_TYPE_UINT }, { "uint64", BASE_TYPE_ULONG }, { "sint32", BASE_TYPE_INT }, { "sint64", BASE_TYPE_LONG }, { "fixed32", BASE_TYPE_UINT }, { "fixed64", BASE_TYPE_ULONG }, { "sfixed32", BASE_TYPE_INT }, { "sfixed64", BASE_TYPE_LONG }, { "bool", BASE_TYPE_BOOL }, { "string", BASE_TYPE_STRING }, { "bytes", BASE_TYPE_STRING }, { nullptr, BASE_TYPE_NONE } }; for (auto tl = lookup; tl->proto_type; tl++) { if (attribute_ == tl->proto_type) { type->base_type = tl->fb_type; NEXT(); return NoError(); } } if (Is('.')) NEXT(); // qualified names may start with a . ? ECHECK(ParseTypeIdent(*type)); return NoError(); } CheckedError Parser::SkipAnyJsonValue() { switch (token_) { case '{': { size_t fieldn = 0; return ParseTableDelimiters(fieldn, nullptr, [&](const std::string &) -> CheckedError { ECHECK(SkipAnyJsonValue()); fieldn++; return NoError(); }); } case '[': { size_t count = 0; return ParseVectorDelimiters(count, [&]() { return SkipAnyJsonValue(); }); } case kTokenStringConstant: EXPECT(kTokenStringConstant); break; case kTokenIntegerConstant: EXPECT(kTokenIntegerConstant); break; case kTokenFloatConstant: EXPECT(kTokenFloatConstant); break; default: return TokenError(); } return NoError(); } CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) { switch (token_) { case '{': { auto start = builder->StartMap(); size_t fieldn = 0; auto err = ParseTableDelimiters(fieldn, nullptr, [&](const std::string &name) -> CheckedError { builder->Key(name); ECHECK(ParseFlexBufferValue(builder)); fieldn++; return NoError(); }); ECHECK(err); builder->EndMap(start); break; } case '[':{ auto start = builder->StartVector(); size_t count = 0; ECHECK(ParseVectorDelimiters(count, [&]() { return ParseFlexBufferValue(builder); })); builder->EndVector(start, false, false); break; } case kTokenStringConstant: builder->String(attribute_); EXPECT(kTokenStringConstant); break; case kTokenIntegerConstant: builder->Int(StringToInt(attribute_.c_str())); EXPECT(kTokenIntegerConstant); break; case kTokenFloatConstant: builder->Double(strtod(attribute_.c_str(), nullptr)); EXPECT(kTokenFloatConstant); break; default: return TokenError(); } return NoError(); } bool Parser::ParseFlexBuffer(const char *source, const char *source_filename, flexbuffers::Builder *builder) { auto ok = !StartParseFile(source, source_filename).Check() && !ParseFlexBufferValue(builder).Check(); if (ok) builder->Finish(); return ok; } bool Parser::Parse(const char *source, const char **include_paths, const char *source_filename) { return !ParseRoot(source, include_paths, source_filename).Check(); } CheckedError Parser::StartParseFile(const char *source, const char *source_filename) { file_being_parsed_ = source_filename ? source_filename : ""; source_ = cursor_ = source; line_ = 1; error_.clear(); ECHECK(SkipByteOrderMark()); NEXT(); if (Is(kTokenEof)) return Error("input file is empty"); return NoError(); } CheckedError Parser::ParseRoot(const char *source, const char **include_paths, const char *source_filename) { ECHECK(DoParse(source, include_paths, source_filename, nullptr)); // Check that all types were defined. for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { if ((*it)->predecl) { return Error("type referenced but not defined: " + (*it)->name); } } // This check has to happen here and not earlier, because only now do we // know for sure what the type of these are. for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { auto &enum_def = **it; if (enum_def.is_union) { for (auto val_it = enum_def.vals.vec.begin(); val_it != enum_def.vals.vec.end(); ++val_it) { auto &val = **val_it; if (opts.lang_to_generate != IDLOptions::kCpp && val.union_type.struct_def && val.union_type.struct_def->fixed) return Error( "only tables can be union elements in the generated language: " + val.name); } } } return NoError(); } CheckedError Parser::DoParse(const char *source, const char **include_paths, const char *source_filename, const char *include_filename) { if (source_filename && included_files_.find(source_filename) == included_files_.end()) { included_files_[source_filename] = include_filename ? include_filename : ""; files_included_per_file_[source_filename] = std::set<std::string>(); } if (!include_paths) { static const char *current_directory[] = { "", nullptr }; include_paths = current_directory; } field_stack_.clear(); builder_.Clear(); // Start with a blank namespace just in case this file doesn't have one. namespaces_.push_back(new Namespace()); ECHECK(StartParseFile(source, source_filename)); // Includes must come before type declarations: for (;;) { // Parse pre-include proto statements if any: if (opts.proto_mode && (attribute_ == "option" || attribute_ == "syntax" || attribute_ == "package")) { ECHECK(ParseProtoDecl()); } else if (Is(kTokenNativeInclude)) { NEXT(); native_included_files_.emplace_back(attribute_); EXPECT(kTokenStringConstant); } else if (Is(kTokenInclude) || (opts.proto_mode && attribute_ == "import" && Is(kTokenIdentifier))) { NEXT(); if (opts.proto_mode && attribute_ == "public") NEXT(); auto name = flatbuffers::PosixPath(attribute_.c_str()); EXPECT(kTokenStringConstant); // Look for the file in include_paths. std::string filepath; for (auto paths = include_paths; paths && *paths; paths++) { filepath = flatbuffers::ConCatPathFileName(*paths, name); if(FileExists(filepath.c_str())) break; } if (filepath.empty()) return Error("unable to locate include file: " + name); if (source_filename) files_included_per_file_[source_filename].insert(filepath); if (included_files_.find(filepath) == included_files_.end()) { // We found an include file that we have not parsed yet. // Load it and parse it. std::string contents; if (!LoadFile(filepath.c_str(), true, &contents)) return Error("unable to load include file: " + name); ECHECK(DoParse(contents.c_str(), include_paths, filepath.c_str(), name.c_str())); // We generally do not want to output code for any included files: if (!opts.generate_all) MarkGenerated(); // This is the easiest way to continue this file after an include: // instead of saving and restoring all the state, we simply start the // file anew. This will cause it to encounter the same include // statement again, but this time it will skip it, because it was // entered into included_files_. // This is recursive, but only go as deep as the number of include // statements. return DoParse(source, include_paths, source_filename, include_filename); } EXPECT(';'); } else { break; } } // Now parse all other kinds of declarations: while (token_ != kTokenEof) { if (opts.proto_mode) { ECHECK(ParseProtoDecl()); } else if (token_ == kTokenNameSpace) { ECHECK(ParseNamespace()); } else if (token_ == '{') { if (!root_struct_def_) return Error("no root type set to parse json with"); if (builder_.GetSize()) { return Error("cannot have more than one json object in a file"); } uoffset_t toff; ECHECK(ParseTable(*root_struct_def_, nullptr, &toff)); builder_.Finish(Offset<Table>(toff), file_identifier_.length() ? file_identifier_.c_str() : nullptr); } else if (token_ == kTokenEnum) { ECHECK(ParseEnum(false, nullptr)); } else if (token_ == kTokenUnion) { ECHECK(ParseEnum(true, nullptr)); } else if (token_ == kTokenRootType) { NEXT(); auto root_type = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&root_type, nullptr)); if (!SetRootType(root_type.c_str())) return Error("unknown root type: " + root_type); if (root_struct_def_->fixed) return Error("root type must be a table"); EXPECT(';'); } else if (token_ == kTokenFileIdentifier) { NEXT(); file_identifier_ = attribute_; EXPECT(kTokenStringConstant); if (file_identifier_.length() != FlatBufferBuilder::kFileIdentifierLength) return Error("file_identifier must be exactly " + NumToString(FlatBufferBuilder::kFileIdentifierLength) + " characters"); EXPECT(';'); } else if (token_ == kTokenFileExtension) { NEXT(); file_extension_ = attribute_; EXPECT(kTokenStringConstant); EXPECT(';'); } else if(token_ == kTokenInclude) { return Error("includes must come before declarations"); } else if(token_ == kTokenAttribute) { NEXT(); auto name = attribute_; EXPECT(kTokenStringConstant); EXPECT(';'); known_attributes_[name] = false; } else if (token_ == kTokenService) { ECHECK(ParseService()); } else { ECHECK(ParseDecl()); } } return NoError(); } std::set<std::string> Parser::GetIncludedFilesRecursive( const std::string &file_name) const { std::set<std::string> included_files; std::list<std::string> to_process; if (file_name.empty()) return included_files; to_process.push_back(file_name); while (!to_process.empty()) { std::string current = to_process.front(); to_process.pop_front(); included_files.insert(current); auto new_files = files_included_per_file_.at(current); for (auto it = new_files.begin(); it != new_files.end(); ++it) { if (included_files.find(*it) == included_files.end()) to_process.push_back(*it); } } return included_files; } // Schema serialization functionality: template<typename T> bool compareName(const T* a, const T* b) { return a->defined_namespace->GetFullyQualifiedName(a->name) < b->defined_namespace->GetFullyQualifiedName(b->name); } template<typename T> void AssignIndices(const std::vector<T *> &defvec) { // Pre-sort these vectors, such that we can set the correct indices for them. auto vec = defvec; std::sort(vec.begin(), vec.end(), compareName<T>); for (int i = 0; i < static_cast<int>(vec.size()); i++) vec[i]->index = i; } void Parser::Serialize() { builder_.Clear(); AssignIndices(structs_.vec); AssignIndices(enums_.vec); std::vector<Offset<reflection::Object>> object_offsets; for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { auto offset = (*it)->Serialize(&builder_, *this); object_offsets.push_back(offset); (*it)->serialized_location = offset.o; } std::vector<Offset<reflection::Enum>> enum_offsets; for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { auto offset = (*it)->Serialize(&builder_, *this); enum_offsets.push_back(offset); (*it)->serialized_location = offset.o; } auto schema_offset = reflection::CreateSchema( builder_, builder_.CreateVectorOfSortedTables(&object_offsets), builder_.CreateVectorOfSortedTables(&enum_offsets), builder_.CreateString(file_identifier_), builder_.CreateString(file_extension_), root_struct_def_ ? root_struct_def_->serialized_location : 0); builder_.Finish(schema_offset, reflection::SchemaIdentifier()); } Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<Offset<reflection::Field>> field_offsets; for (auto it = fields.vec.begin(); it != fields.vec.end(); ++it) { field_offsets.push_back( (*it)->Serialize(builder, static_cast<uint16_t>(it - fields.vec.begin()), parser)); } auto qualified_name = defined_namespace->GetFullyQualifiedName(name); return reflection::CreateObject(*builder, builder->CreateString(qualified_name), builder->CreateVectorOfSortedTables( &field_offsets), fixed, static_cast<int>(minalign), static_cast<int>(bytesize), SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings( doc_comment) : 0); } Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder, uint16_t id, const Parser &parser) const { return reflection::CreateField(*builder, builder->CreateString(name), value.type.Serialize(builder), id, value.offset, IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0, IsFloat(value.type.base_type) ? strtod(value.constant.c_str(), nullptr) : 0.0, deprecated, required, key, SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings(doc_comment) : 0); // TODO: value.constant is almost always "0", we could save quite a bit of // space by sharing it. Same for common values of value.type. } Offset<reflection::Enum> EnumDef::Serialize(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<Offset<reflection::EnumVal>> enumval_offsets; for (auto it = vals.vec.begin(); it != vals.vec.end(); ++it) { enumval_offsets.push_back((*it)->Serialize(builder)); } auto qualified_name = defined_namespace->GetFullyQualifiedName(name); return reflection::CreateEnum(*builder, builder->CreateString(qualified_name), builder->CreateVector(enumval_offsets), is_union, underlying_type.Serialize(builder), SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings(doc_comment) : 0); } Offset<reflection::EnumVal> EnumVal::Serialize(FlatBufferBuilder *builder) const { return reflection::CreateEnumVal(*builder, builder->CreateString(name), value, union_type.struct_def ? union_type.struct_def-> serialized_location : 0, union_type.Serialize(builder)); } Offset<reflection::Type> Type::Serialize(FlatBufferBuilder *builder) const { return reflection::CreateType(*builder, static_cast<reflection::BaseType>(base_type), static_cast<reflection::BaseType>(element), struct_def ? struct_def->index : (enum_def ? enum_def->index : -1)); } flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset< reflection::KeyValue>>> Definition::SerializeAttributes(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs; for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) { auto it = parser.known_attributes_.find(kv->first); assert(it != parser.known_attributes_.end()); if (!it->second) { // Custom attribute. attrs.push_back( reflection::CreateKeyValue(*builder, builder->CreateString(kv->first), builder->CreateString( kv->second->constant))); } } if (attrs.size()) { return builder->CreateVectorOfSortedTables(&attrs); } else { return 0; } } std::string Parser::ConformTo(const Parser &base) { for (auto sit = structs_.vec.begin(); sit != structs_.vec.end(); ++sit) { auto &struct_def = **sit; auto qualified_name = struct_def.defined_namespace->GetFullyQualifiedName(struct_def.name); auto struct_def_base = base.structs_.Lookup(qualified_name); if (!struct_def_base) continue; for (auto fit = struct_def.fields.vec.begin(); fit != struct_def.fields.vec.end(); ++fit) { auto &field = **fit; auto field_base = struct_def_base->fields.Lookup(field.name); if (field_base) { if (field.value.offset != field_base->value.offset) return "offsets differ for field: " + field.name; if (field.value.constant != field_base->value.constant) return "defaults differ for field: " + field.name; if (!EqualByName(field.value.type, field_base->value.type)) return "types differ for field: " + field.name; } else { // Doesn't have to exist, deleting fields is fine. // But we should check if there is a field that has the same offset // but is incompatible (in the case of field renaming). for (auto fbit = struct_def_base->fields.vec.begin(); fbit != struct_def_base->fields.vec.end(); ++fbit) { field_base = *fbit; if (field.value.offset == field_base->value.offset) { if (!EqualByName(field.value.type, field_base->value.type)) return "field renamed to different type: " + field.name; break; } } } } } for (auto eit = enums_.vec.begin(); eit != enums_.vec.end(); ++eit) { auto &enum_def = **eit; auto qualified_name = enum_def.defined_namespace->GetFullyQualifiedName(enum_def.name); auto enum_def_base = base.enums_.Lookup(qualified_name); if (!enum_def_base) continue; for (auto evit = enum_def.vals.vec.begin(); evit != enum_def.vals.vec.end(); ++evit) { auto &enum_val = **evit; auto enum_val_base = enum_def_base->vals.Lookup(enum_val.name); if (enum_val_base) { if (enum_val.value != enum_val_base->value) return "values differ for enum: " + enum_val.name; } } } return ""; } } // namespace flatbuffers
1
12,176
Like I said, please replace by `EXPECT(kTokenString)`
google-flatbuffers
java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.security.authentication.AuthenticationCredentialsNotFoundException; +import org.springframework.security.authentication.AuthenticationDetailsSource; import org.springframework.security.authentication.AuthenticationManager; import org.springframework.security.authentication.AuthenticationTrustResolver; import org.springframework.security.authentication.AuthenticationTrustResolverImpl;
1
/* * Copyright 2002-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.web.servletapi; import java.io.IOException; import java.util.List; import javax.servlet.AsyncContext; import javax.servlet.AsyncListener; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.security.authentication.AuthenticationCredentialsNotFoundException; import org.springframework.security.authentication.AuthenticationManager; import org.springframework.security.authentication.AuthenticationTrustResolver; import org.springframework.security.authentication.AuthenticationTrustResolverImpl; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.concurrent.DelegatingSecurityContextRunnable; import org.springframework.security.core.Authentication; import org.springframework.security.core.AuthenticationException; import org.springframework.security.core.context.SecurityContext; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.web.AuthenticationEntryPoint; import org.springframework.security.web.authentication.logout.LogoutHandler; import org.springframework.util.Assert; import org.springframework.util.CollectionUtils; /** * Provides integration with the Servlet 3 APIs. The additional methods that are * integrated with can be found below: * * <ul> * <li>{@link HttpServletRequest#authenticate(HttpServletResponse)} - Allows the user to * determine if they are authenticated and if not send the user to the login page. See * {@link #setAuthenticationEntryPoint(AuthenticationEntryPoint)}.</li> * <li>{@link HttpServletRequest#login(String, String)} - Allows the user to authenticate * using the {@link AuthenticationManager}. See * {@link #setAuthenticationManager(AuthenticationManager)}.</li> * <li>{@link HttpServletRequest#logout()} - Allows the user to logout using the * {@link LogoutHandler}s configured in Spring Security. See * {@link #setLogoutHandlers(List)}.</li> * <li>{@link AsyncContext#start(Runnable)} - Automatically copy the * {@link SecurityContext} from the {@link SecurityContextHolder} found on the Thread that * invoked {@link AsyncContext#start(Runnable)} to the Thread that processes the * {@link Runnable}.</li> * </ul> * * @author Rob Winch * @see SecurityContextHolderAwareRequestFilter * @see Servlet3SecurityContextHolderAwareRequestWrapper * @see SecurityContextAsyncContext */ final class HttpServlet3RequestFactory implements HttpServletRequestFactory { private Log logger = LogFactory.getLog(getClass()); private final String rolePrefix; private AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl(); private AuthenticationEntryPoint authenticationEntryPoint; private AuthenticationManager authenticationManager; private List<LogoutHandler> logoutHandlers; HttpServlet3RequestFactory(String rolePrefix) { this.rolePrefix = rolePrefix; } /** * <p> * Sets the {@link AuthenticationEntryPoint} used when integrating * {@link HttpServletRequest} with Servlet 3 APIs. Specifically, it will be used when * {@link HttpServletRequest#authenticate(HttpServletResponse)} is called and the user * is not authenticated. * </p> * <p> * If the value is null (default), then the default container behavior will be be * retained when invoking {@link HttpServletRequest#authenticate(HttpServletResponse)} * . * </p> * @param authenticationEntryPoint the {@link AuthenticationEntryPoint} to use when * invoking {@link HttpServletRequest#authenticate(HttpServletResponse)} if the user * is not authenticated. */ void setAuthenticationEntryPoint(AuthenticationEntryPoint authenticationEntryPoint) { this.authenticationEntryPoint = authenticationEntryPoint; } /** * <p> * Sets the {@link AuthenticationManager} used when integrating * {@link HttpServletRequest} with Servlet 3 APIs. Specifically, it will be used when * {@link HttpServletRequest#login(String, String)} is invoked to determine if the * user is authenticated. * </p> * <p> * If the value is null (default), then the default container behavior will be * retained when invoking {@link HttpServletRequest#login(String, String)}. * </p> * @param authenticationManager the {@link AuthenticationManager} to use when invoking * {@link HttpServletRequest#login(String, String)} */ void setAuthenticationManager(AuthenticationManager authenticationManager) { this.authenticationManager = authenticationManager; } /** * <p> * Sets the {@link LogoutHandler}s used when integrating with * {@link HttpServletRequest} with Servlet 3 APIs. Specifically it will be used when * {@link HttpServletRequest#logout()} is invoked in order to log the user out. So * long as the {@link LogoutHandler}s do not commit the {@link HttpServletResponse} * (expected), then the user is in charge of handling the response. * </p> * <p> * If the value is null (default), the default container behavior will be retained * when invoking {@link HttpServletRequest#logout()}. * </p> * @param logoutHandlers the {@code List<LogoutHandler>}s when invoking * {@link HttpServletRequest#logout()}. */ void setLogoutHandlers(List<LogoutHandler> logoutHandlers) { this.logoutHandlers = logoutHandlers; } /** * Sets the {@link AuthenticationTrustResolver} to be used. The default is * {@link AuthenticationTrustResolverImpl}. * @param trustResolver the {@link AuthenticationTrustResolver} to use. Cannot be * null. */ void setTrustResolver(AuthenticationTrustResolver trustResolver) { Assert.notNull(trustResolver, "trustResolver cannot be null"); this.trustResolver = trustResolver; } @Override public HttpServletRequest create(HttpServletRequest request, HttpServletResponse response) { return new Servlet3SecurityContextHolderAwareRequestWrapper(request, this.rolePrefix, response); } private class Servlet3SecurityContextHolderAwareRequestWrapper extends SecurityContextHolderAwareRequestWrapper { private final HttpServletResponse response; Servlet3SecurityContextHolderAwareRequestWrapper(HttpServletRequest request, String rolePrefix, HttpServletResponse response) { super(request, HttpServlet3RequestFactory.this.trustResolver, rolePrefix); this.response = response; } @Override public AsyncContext getAsyncContext() { AsyncContext asyncContext = super.getAsyncContext(); if (asyncContext == null) { return null; } return new SecurityContextAsyncContext(asyncContext); } @Override public AsyncContext startAsync() { AsyncContext startAsync = super.startAsync(); return new SecurityContextAsyncContext(startAsync); } @Override public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse) throws IllegalStateException { AsyncContext startAsync = super.startAsync(servletRequest, servletResponse); return new SecurityContextAsyncContext(startAsync); } @Override public boolean authenticate(HttpServletResponse response) throws IOException, ServletException { AuthenticationEntryPoint entryPoint = HttpServlet3RequestFactory.this.authenticationEntryPoint; if (entryPoint == null) { HttpServlet3RequestFactory.this.logger.debug( "authenticationEntryPoint is null, so allowing original HttpServletRequest to handle authenticate"); return super.authenticate(response); } if (isAuthenticated()) { return true; } entryPoint.commence(this, response, new AuthenticationCredentialsNotFoundException("User is not Authenticated")); return false; } @Override public void login(String username, String password) throws ServletException { if (isAuthenticated()) { throw new ServletException("Cannot perform login for '" + username + "' already authenticated as '" + getRemoteUser() + "'"); } AuthenticationManager authManager = HttpServlet3RequestFactory.this.authenticationManager; if (authManager == null) { HttpServlet3RequestFactory.this.logger.debug( "authenticationManager is null, so allowing original HttpServletRequest to handle login"); super.login(username, password); return; } Authentication authentication = getAuthentication(authManager, username, password); SecurityContextHolder.getContext().setAuthentication(authentication); } private Authentication getAuthentication(AuthenticationManager authManager, String username, String password) throws ServletException { try { return authManager.authenticate(new UsernamePasswordAuthenticationToken(username, password)); } catch (AuthenticationException ex) { SecurityContextHolder.clearContext(); throw new ServletException(ex.getMessage(), ex); } } @Override public void logout() throws ServletException { List<LogoutHandler> handlers = HttpServlet3RequestFactory.this.logoutHandlers; if (CollectionUtils.isEmpty(handlers)) { HttpServlet3RequestFactory.this.logger .debug("logoutHandlers is null, so allowing original HttpServletRequest to handle logout"); super.logout(); return; } Authentication authentication = SecurityContextHolder.getContext().getAuthentication(); for (LogoutHandler handler : handlers) { handler.logout(this, this.response, authentication); } } private boolean isAuthenticated() { return getUserPrincipal() != null; } } private static class SecurityContextAsyncContext implements AsyncContext { private final AsyncContext asyncContext; SecurityContextAsyncContext(AsyncContext asyncContext) { this.asyncContext = asyncContext; } @Override public ServletRequest getRequest() { return this.asyncContext.getRequest(); } @Override public ServletResponse getResponse() { return this.asyncContext.getResponse(); } @Override public boolean hasOriginalRequestAndResponse() { return this.asyncContext.hasOriginalRequestAndResponse(); } @Override public void dispatch() { this.asyncContext.dispatch(); } @Override public void dispatch(String path) { this.asyncContext.dispatch(path); } @Override public void dispatch(ServletContext context, String path) { this.asyncContext.dispatch(context, path); } @Override public void complete() { this.asyncContext.complete(); } @Override public void start(Runnable run) { this.asyncContext.start(new DelegatingSecurityContextRunnable(run)); } @Override public void addListener(AsyncListener listener) { this.asyncContext.addListener(listener); } @Override public void addListener(AsyncListener listener, ServletRequest request, ServletResponse response) { this.asyncContext.addListener(listener, request, response); } @Override public <T extends AsyncListener> T createListener(Class<T> clazz) throws ServletException { return this.asyncContext.createListener(clazz); } @Override public long getTimeout() { return this.asyncContext.getTimeout(); } @Override public void setTimeout(long timeout) { this.asyncContext.setTimeout(timeout); } } }
1
17,202
Will you please update the copyright message to `2021` for classes that you modify?
spring-projects-spring-security
java
@@ -301,6 +301,8 @@ getSelectedRows: function () { return service.getSelectedRows(grid).map(function (gridRow) { return gridRow.entity; + }).filter(function (entity) { + return entity.hasOwnProperty('$$hashKey'); }); }, /**
1
(function () { 'use strict'; /** * @ngdoc overview * @name ui.grid.selection * @description * * # ui.grid.selection * This module provides row selection * * <div class="alert alert-success" role="alert"><strong>Stable</strong> This feature is stable. There should no longer be breaking api changes without a deprecation warning.</div> * * <div doc-module-components="ui.grid.selection"></div> */ var module = angular.module('ui.grid.selection', ['ui.grid']); /** * @ngdoc object * @name ui.grid.selection.constant:uiGridSelectionConstants * * @description constants available in selection module */ module.constant('uiGridSelectionConstants', { featureName: "selection", selectionRowHeaderColName: 'selectionRowHeaderCol' }); //add methods to GridRow angular.module('ui.grid').config(['$provide', function ($provide) { $provide.decorator('GridRow', ['$delegate', function ($delegate) { /** * @ngdoc object * @name ui.grid.selection.api:GridRow * * @description GridRow prototype functions added for selection */ /** * @ngdoc object * @name enableSelection * @propertyOf ui.grid.selection.api:GridRow * @description Enable row selection for this row, only settable by internal code. * * The grouping feature, for example, might set group header rows to not be selectable. * <br/>Defaults to true */ /** * @ngdoc object * @name isSelected * @propertyOf ui.grid.selection.api:GridRow * @description Selected state of row. Should be readonly. Make any changes to selected state using setSelected(). * <br/>Defaults to false */ /** * @ngdoc function * @name setSelected * @methodOf ui.grid.selection.api:GridRow * @description Sets the isSelected property and updates the selectedCount * Changes to isSelected state should only be made via this function * @param {bool} selected value to set */ $delegate.prototype.setSelected = function (selected) { if (selected !== this.isSelected) { this.isSelected = selected; this.grid.selection.selectedCount += selected ? 1 : -1; } }; return $delegate; }]); }]); /** * @ngdoc service * @name ui.grid.selection.service:uiGridSelectionService * * @description Services for selection features */ module.service('uiGridSelectionService', ['$q', '$templateCache', 'uiGridSelectionConstants', 'gridUtil', function ($q, $templateCache, uiGridSelectionConstants, gridUtil) { var service = { initializeGrid: function (grid) { //add feature namespace and any properties to grid for needed /** * @ngdoc object * @name ui.grid.selection.grid:selection * * @description Grid properties and functions added for selection */ grid.selection = {}; grid.selection.lastSelectedRow = null; grid.selection.selectAll = false; /** * @ngdoc object * @name selectedCount * @propertyOf ui.grid.selection.grid:selection * @description Current count of selected rows * @example * var count = grid.selection.selectedCount */ grid.selection.selectedCount = 0; service.defaultGridOptions(grid.options); /** * @ngdoc object * @name ui.grid.selection.api:PublicApi * * @description Public Api for selection feature */ var publicApi = { events: { selection: { /** * @ngdoc event * @name rowSelectionChanged * @eventOf ui.grid.selection.api:PublicApi * @description is raised after the row.isSelected state is changed * @param {GridRow} row the row that was selected/deselected * @param {Event} event object if raised from an event */ rowSelectionChanged: function (scope, row, evt) { }, /** * @ngdoc event * @name rowSelectionChangedBatch * @eventOf ui.grid.selection.api:PublicApi * @description is raised after the row.isSelected state is changed * in bulk, if the `enableSelectionBatchEvent` option is set to true * (which it is by default). This allows more efficient processing * of bulk events. * @param {array} rows the rows that were selected/deselected * @param {Event} event object if raised from an event */ rowSelectionChangedBatch: function (scope, rows, evt) { } } }, methods: { selection: { /** * @ngdoc function * @name toggleRowSelection * @methodOf ui.grid.selection.api:PublicApi * @description Toggles data row as selected or unselected * @param {object} rowEntity gridOptions.data[] array instance * @param {Event} event object if raised from an event */ toggleRowSelection: function (rowEntity, evt) { var row = grid.getRow(rowEntity); if (row !== null) { service.toggleRowSelection(grid, row, evt, grid.options.multiSelect, grid.options.noUnselect); } }, /** * @ngdoc function * @name selectRow * @methodOf ui.grid.selection.api:PublicApi * @description Select the data row * @param {object} rowEntity gridOptions.data[] array instance * @param {Event} event object if raised from an event */ selectRow: function (rowEntity, evt) { var row = grid.getRow(rowEntity); if (row !== null && !row.isSelected) { service.toggleRowSelection(grid, row, evt, grid.options.multiSelect, grid.options.noUnselect); } }, /** * @ngdoc function * @name selectRowByVisibleIndex * @methodOf ui.grid.selection.api:PublicApi * @description Select the specified row by visible index (i.e. if you * specify row 0 you'll get the first visible row selected). In this context * visible means of those rows that are theoretically visible (i.e. not filtered), * rather than rows currently rendered on the screen. * @param {number} index index within the rowsVisible array * @param {Event} event object if raised from an event */ selectRowByVisibleIndex: function (rowNum, evt) { var row = grid.renderContainers.body.visibleRowCache[rowNum]; if (row !== null && typeof (row) !== 'undefined' && !row.isSelected) { service.toggleRowSelection(grid, row, evt, grid.options.multiSelect, grid.options.noUnselect); } }, /** * @ngdoc function * @name unSelectRow * @methodOf ui.grid.selection.api:PublicApi * @description UnSelect the data row * @param {object} rowEntity gridOptions.data[] array instance * @param {Event} event object if raised from an event */ unSelectRow: function (rowEntity, evt) { var row = grid.getRow(rowEntity); if (row !== null && row.isSelected) { service.toggleRowSelection(grid, row, evt, grid.options.multiSelect, grid.options.noUnselect); } }, /** * @ngdoc function * @name unSelectRowByVisibleIndex * @methodOf ui.grid.selection.api:PublicApi * @description Unselect the specified row by visible index (i.e. if you * specify row 0 you'll get the first visible row unselected). In this context * visible means of those rows that are theoretically visible (i.e. not filtered), * rather than rows currently rendered on the screen. * @param {number} index index within the rowsVisible array * @param {Event} event object if raised from an event */ unSelectRowByVisibleIndex: function (rowNum, evt) { var row = grid.renderContainers.body.visibleRowCache[rowNum]; if (row !== null && typeof (row) !== 'undefined' && row.isSelected) { service.toggleRowSelection(grid, row, evt, grid.options.multiSelect, grid.options.noUnselect); } }, /** * @ngdoc function * @name selectAllRows * @methodOf ui.grid.selection.api:PublicApi * @description Selects all rows. Does nothing if multiSelect = false * @param {Event} event object if raised from an event */ selectAllRows: function (evt) { if (grid.options.multiSelect === false) { return; } var changedRows = []; grid.rows.forEach(function (row) { if (!row.isSelected && row.enableSelection !== false) { row.setSelected(true); service.decideRaiseSelectionEvent(grid, row, changedRows, evt); } }); service.decideRaiseSelectionBatchEvent(grid, changedRows, evt); grid.selection.selectAll = true; }, /** * @ngdoc function * @name selectAllVisibleRows * @methodOf ui.grid.selection.api:PublicApi * @description Selects all visible rows. Does nothing if multiSelect = false * @param {Event} event object if raised from an event */ selectAllVisibleRows: function (event) { if (grid.options.multiSelect !== false) { var changedRows = []; var rowCache = []; if (grid.treeBase && grid.treeBase.tree) { rowCache = getAllTreeRows(grid.treeBase.tree); } else { rowCache = grid.rows; } for (var i = 0; i<rowCache.length; i++) { var row = rowCache[i]; if (row.visible) { if (!row.isSelected && row.enableSelection !== false) { row.setSelected(true); service.decideRaiseSelectionEvent(grid, row, changedRows, event); } } else { if (row.isSelected) { row.setSelected(false); service.decideRaiseSelectionEvent(grid, row, changedRows, event); } } } service.decideRaiseSelectionBatchEvent(grid, changedRows, event); grid.selection.selectAll = true; } }, /** * @ngdoc function * @name clearSelectedRows * @methodOf ui.grid.selection.api:PublicApi * @description Unselects all rows * @param {Event} event object if raised from an event */ clearSelectedRows: function (evt) { service.clearSelectedRows(grid, evt); }, /** * @ngdoc function * @name getSelectedRows * @methodOf ui.grid.selection.api:PublicApi * @description returns all selectedRow's entity references */ getSelectedRows: function () { return service.getSelectedRows(grid).map(function (gridRow) { return gridRow.entity; }); }, /** * @ngdoc function * @name getSelectedGridRows * @methodOf ui.grid.selection.api:PublicApi * @description returns all selectedRow's as gridRows */ getSelectedGridRows: function () { return service.getSelectedRows(grid); }, /** * @ngdoc function * @name getSelectedCount * @methodOf ui.grid.selection.api:PublicApi * @description returns the number of rows selected */ getSelectedCount: function () { return grid.selection.selectedCount; }, /** * @ngdoc function * @name setMultiSelect * @methodOf ui.grid.selection.api:PublicApi * @description Sets the current gridOption.multiSelect to true or false * @param {bool} multiSelect true to allow multiple rows */ setMultiSelect: function (multiSelect) { grid.options.multiSelect = multiSelect; }, /** * @ngdoc function * @name setModifierKeysToMultiSelect * @methodOf ui.grid.selection.api:PublicApi * @description Sets the current gridOption.modifierKeysToMultiSelect to true or false * @param {bool} modifierKeysToMultiSelect true to only allow multiple rows when using ctrlKey or shiftKey is used */ setModifierKeysToMultiSelect: function (modifierKeysToMultiSelect) { grid.options.modifierKeysToMultiSelect = modifierKeysToMultiSelect; }, /** * @ngdoc function * @name getSelectAllState * @methodOf ui.grid.selection.api:PublicApi * @description Returns whether or not the selectAll checkbox is currently ticked. The * grid doesn't automatically select rows when you add extra data - so when you add data * you need to explicitly check whether the selectAll is set, and then call setVisible rows * if it is */ getSelectAllState: function () { return grid.selection.selectAll; } } } }; grid.api.registerEventsFromObject(publicApi.events); grid.api.registerMethodsFromObject(publicApi.methods); }, defaultGridOptions: function (gridOptions) { //default option to true unless it was explicitly set to false /** * @ngdoc object * @name ui.grid.selection.api:GridOptions * * @description GridOptions for selection feature, these are available to be * set using the ui-grid {@link ui.grid.class:GridOptions gridOptions} */ /** * @ngdoc object * @name enableRowSelection * @propertyOf ui.grid.selection.api:GridOptions * @description Enable row selection for entire grid. * <br/>Defaults to true */ gridOptions.enableRowSelection = gridOptions.enableRowSelection !== false; /** * @ngdoc object * @name multiSelect * @propertyOf ui.grid.selection.api:GridOptions * @description Enable multiple row selection for entire grid * <br/>Defaults to true */ gridOptions.multiSelect = gridOptions.multiSelect !== false; /** * @ngdoc object * @name noUnselect * @propertyOf ui.grid.selection.api:GridOptions * @description Prevent a row from being unselected. Works in conjunction * with `multiselect = false` and `gridApi.selection.selectRow()` to allow * you to create a single selection only grid - a row is always selected, you * can only select different rows, you can't unselect the row. * <br/>Defaults to false */ gridOptions.noUnselect = gridOptions.noUnselect === true; /** * @ngdoc object * @name modifierKeysToMultiSelect * @propertyOf ui.grid.selection.api:GridOptions * @description Enable multiple row selection only when using the ctrlKey or shiftKey. Requires multiSelect to be true. * <br/>Defaults to false */ gridOptions.modifierKeysToMultiSelect = gridOptions.modifierKeysToMultiSelect === true; /** * @ngdoc object * @name enableRowHeaderSelection * @propertyOf ui.grid.selection.api:GridOptions * @description Enable a row header to be used for selection * <br/>Defaults to true */ gridOptions.enableRowHeaderSelection = gridOptions.enableRowHeaderSelection !== false; /** * @ngdoc object * @name enableFullRowSelection * @propertyOf ui.grid.selection.api:GridOptions * @description Enable selection by clicking anywhere on the row. Defaults to * false if `enableRowHeaderSelection` is true, otherwise defaults to false. */ if (typeof (gridOptions.enableFullRowSelection) === 'undefined') { gridOptions.enableFullRowSelection = !gridOptions.enableRowHeaderSelection; } /** * @ngdoc object * @name enableSelectAll * @propertyOf ui.grid.selection.api:GridOptions * @description Enable the select all checkbox at the top of the selectionRowHeader * <br/>Defaults to true */ gridOptions.enableSelectAll = gridOptions.enableSelectAll !== false; /** * @ngdoc object * @name enableSelectionBatchEvent * @propertyOf ui.grid.selection.api:GridOptions * @description If selected rows are changed in bulk, either via the API or * via the selectAll checkbox, then a separate event is fired. Setting this * option to false will cause the rowSelectionChanged event to be called multiple times * instead * <br/>Defaults to true */ gridOptions.enableSelectionBatchEvent = gridOptions.enableSelectionBatchEvent !== false; /** * @ngdoc object * @name selectionRowHeaderWidth * @propertyOf ui.grid.selection.api:GridOptions * @description can be used to set a custom width for the row header selection column * <br/>Defaults to 30px */ gridOptions.selectionRowHeaderWidth = angular.isDefined(gridOptions.selectionRowHeaderWidth) ? gridOptions.selectionRowHeaderWidth : 30; /** * @ngdoc object * @name enableFooterTotalSelected * @propertyOf ui.grid.selection.api:GridOptions * @description Shows the total number of selected items in footer if true. * <br/>Defaults to true. * <br/>GridOptions.showGridFooter must also be set to true. */ gridOptions.enableFooterTotalSelected = gridOptions.enableFooterTotalSelected !== false; /** * @ngdoc object * @name isRowSelectable * @propertyOf ui.grid.selection.api:GridOptions * @description Makes it possible to specify a method that evaluates for each row and sets its "enableSelection" property. */ gridOptions.isRowSelectable = angular.isDefined(gridOptions.isRowSelectable) ? gridOptions.isRowSelectable : angular.noop; }, /** * @ngdoc function * @name toggleRowSelection * @methodOf ui.grid.selection.service:uiGridSelectionService * @description Toggles row as selected or unselected * @param {Grid} grid grid object * @param {GridRow} row row to select or deselect * @param {Event} event object if resulting from event * @param {bool} multiSelect if false, only one row at time can be selected * @param {bool} noUnselect if true then rows cannot be unselected */ toggleRowSelection: function (grid, row, evt, multiSelect, noUnselect) { var selected = row.isSelected; if (row.enableSelection === false && !selected) { return; } var selectedRows; if (!multiSelect && !selected) { service.clearSelectedRows(grid, evt); } else if (!multiSelect && selected) { selectedRows = service.getSelectedRows(grid); if (selectedRows.length > 1) { selected = false; // Enable reselect of the row service.clearSelectedRows(grid, evt); } } if (selected && noUnselect) { // don't deselect the row } else { row.setSelected(!selected); if (row.isSelected === true) { grid.selection.lastSelectedRow = row; } selectedRows = service.getSelectedRows(grid); grid.selection.selectAll = grid.rows.length === selectedRows.length; grid.api.selection.raise.rowSelectionChanged(row, evt); toggleParentHeaders(grid, row, evt, multiSelect, noUnselect); } }, /** * @ngdoc function * @name shiftSelect * @methodOf ui.grid.selection.service:uiGridSelectionService * @description selects a group of rows from the last selected row using the shift key * @param {Grid} grid grid object * @param {GridRow} clicked row * @param {Event} event object if raised from an event * @param {bool} multiSelect if false, does nothing this is for multiSelect only */ shiftSelect: function (grid, row, evt, multiSelect) { if (!multiSelect) { return; } var selectedRows = service.getSelectedRows(grid); var fromRow = selectedRows.length > 0 ? grid.renderContainers.body.visibleRowCache.indexOf(grid.selection.lastSelectedRow) : 0; var toRow = grid.renderContainers.body.visibleRowCache.indexOf(row); //reverse select direction if (fromRow > toRow) { var tmp = fromRow; fromRow = toRow; toRow = tmp; } var changedRows = []; for (var i = fromRow; i <= toRow; i++) { var rowToSelect = grid.renderContainers.body.visibleRowCache[i]; if (rowToSelect) { if (!rowToSelect.isSelected && rowToSelect.enableSelection !== false) { rowToSelect.setSelected(true); grid.selection.lastSelectedRow = rowToSelect; service.decideRaiseSelectionEvent(grid, rowToSelect, changedRows, evt); } } } service.decideRaiseSelectionBatchEvent(grid, changedRows, evt); }, /** * @ngdoc function * @name getSelectedRows * @methodOf ui.grid.selection.service:uiGridSelectionService * @description Returns all the selected rows * @param {Grid} grid grid object */ getSelectedRows: function (grid) { var rows; if (grid.treeBase && grid.treeBase.tree) { rows = getAllTreeRows(grid.treeBase.tree); } else { rows = grid.rows; } var selectedRows = []; for (var i = 0; i<rows.length; i++) { if (rows[i].isSelected) { selectedRows.push(rows[i]); } } return selectedRows; }, /** * @ngdoc function * @name clearSelectedRows * @methodOf ui.grid.selection.service:uiGridSelectionService * @description Clears all selected rows * @param {Grid} grid grid object * @param {Event} event object if raised from an event */ clearSelectedRows: function (grid, evt) { var changedRows = []; service.getSelectedRows(grid).forEach(function (row) { if (row.isSelected) { row.setSelected(false); service.decideRaiseSelectionEvent(grid, row, changedRows, evt); } }); service.decideRaiseSelectionBatchEvent(grid, changedRows, evt); grid.selection.selectAll = false; grid.selection.selectedCount = 0; }, /** * @ngdoc function * @name decideRaiseSelectionEvent * @methodOf ui.grid.selection.service:uiGridSelectionService * @description Decides whether to raise a single event or a batch event * @param {Grid} grid grid object * @param {GridRow} row row that has changed * @param {array} changedRows an array to which we can append the changed * @param {Event} event object if raised from an event * row if we're doing batch events */ decideRaiseSelectionEvent: function (grid, row, changedRows, evt) { if (!grid.options.enableSelectionBatchEvent) { grid.api.selection.raise.rowSelectionChanged(row, evt); } else { changedRows.push(row); } }, /** * @ngdoc function * @name raiseSelectionEvent * @methodOf ui.grid.selection.service:uiGridSelectionService * @description Decides whether we need to raise a batch event, and * raises it if we do. * @param {Grid} grid grid object * @param {array} changedRows an array of changed rows, only populated * @param {Event} event object if raised from an event * if we're doing batch events */ decideRaiseSelectionBatchEvent: function (grid, changedRows, evt) { if (changedRows.length > 0) { grid.api.selection.raise.rowSelectionChangedBatch(changedRows, evt); } } }; return service; function toggleParentHeaders(grid, row, event, multiSelect, noUnselect){ if (row.treeNode &&row.treeNode.parentRow) { var parentRow = row.treeNode.parentRow; var siblingSelectedStatus = []; for (var i = 0; i < parentRow.treeNode.children.length; i++) { siblingSelectedStatus.push(parentRow.treeNode.children[i].row.isSelected); } var allSiblingsSelected = siblingSelectedStatus.indexOf(false) === -1; if (parentRow.isSelected !== allSiblingsSelected) { service.toggleRowSelection(grid, parentRow, event, multiSelect, noUnselect); } } } function getAllTreeRows(rowTree){ var selectedRows = []; for (var i = 0; i<rowTree.length; i++) { var node = rowTree[i]; selectedRows.push(node.row); selectedRows = selectedRows.concat(getAllTreeRows(node.children)); } return selectedRows; } }]); /** * @ngdoc directive * @name ui.grid.selection.directive:uiGridSelection * @element div * @restrict A * * @description Adds selection features to grid * * @example <example module="app"> <file name="app.js"> var app = angular.module('app', ['ui.grid', 'ui.grid.selection']); app.controller('MainCtrl', ['$scope', function ($scope) { $scope.data = [ { name: 'Bob', title: 'CEO' }, { name: 'Frank', title: 'Lowly Developer' } ]; $scope.columnDefs = [ {name: 'name', enableCellEdit: true}, {name: 'title', enableCellEdit: true} ]; }]); </file> <file name="index.html"> <div ng-controller="MainCtrl"> <div ui-grid="{ data: data, columnDefs: columnDefs }" ui-grid-selection></div> </div> </file> </example> */ module.directive('uiGridSelection', ['uiGridSelectionConstants', 'uiGridSelectionService', '$templateCache', 'uiGridConstants', function (uiGridSelectionConstants, uiGridSelectionService, $templateCache, uiGridConstants) { return { replace: true, priority: 0, require: '^uiGrid', scope: false, compile: function () { return { pre: function ($scope, $elm, $attrs, uiGridCtrl) { uiGridSelectionService.initializeGrid(uiGridCtrl.grid); if (uiGridCtrl.grid.options.enableRowHeaderSelection) { var selectionRowHeaderDef = { name: uiGridSelectionConstants.selectionRowHeaderColName, displayName: '', width: uiGridCtrl.grid.options.selectionRowHeaderWidth, minWidth: 10, cellTemplate: 'ui-grid/selectionRowHeader', headerCellTemplate: 'ui-grid/selectionHeaderCell', enableColumnResizing: false, enableColumnMenu: false, exporterSuppressExport: true, allowCellFocus: true }; uiGridCtrl.grid.addRowHeaderColumn(selectionRowHeaderDef, 0); } var processorSet = false; var processSelectableRows = function (rows) { rows.forEach(function (row) { row.enableSelection = uiGridCtrl.grid.options.isRowSelectable(row); }); return rows; }; var updateOptions = function () { if (uiGridCtrl.grid.options.isRowSelectable !== angular.noop && processorSet !== true) { uiGridCtrl.grid.registerRowsProcessor(processSelectableRows, 500); processorSet = true; } }; updateOptions(); var dataChangeDereg = uiGridCtrl.grid.registerDataChangeCallback(updateOptions, [uiGridConstants.dataChange.OPTIONS]); $scope.$on('$destroy', dataChangeDereg); }, post: function ($scope, $elm, $attrs, uiGridCtrl) { } }; } }; }]); module.directive('uiGridSelectionRowHeaderButtons', ['$templateCache', 'uiGridSelectionService', 'gridUtil', function ($templateCache, uiGridSelectionService, gridUtil) { return { replace: true, restrict: 'E', template: $templateCache.get('ui-grid/selectionRowHeaderButtons'), scope: true, require: '^uiGrid', link: function ($scope, $elm, $attrs, uiGridCtrl) { var self = uiGridCtrl.grid; $scope.selectButtonClick = selectButtonClick; $scope.selectButtonKeyDown = selectButtonKeyDown; // On IE, prevent mousedowns on the select button from starting a selection. // If this is not done and you shift+click on another row, the browser will select a big chunk of text if (gridUtil.detectBrowser() === 'ie') { $elm.on('mousedown', selectButtonMouseDown); } function selectButtonKeyDown(row, evt) { if (evt.keyCode === 32) { evt.preventDefault(); selectButtonClick(row, evt); } } function selectButtonClick(row, evt) { evt.stopPropagation(); if (row.groupHeader) { selectByKeyState(row, evt); var selectionState = row.isSelected; for (var i = 0; i < row.treeNode.children.length; i++) { if (row.treeNode.children[i].row.isSelected !== selectionState) { selectButtonClick(row.treeNode.children[i].row, evt); } } }else { selectByKeyState(row, evt); } } function selectByKeyState(row, evt){ if (evt.shiftKey) { uiGridSelectionService.shiftSelect(self, row, evt, self.options.multiSelect); } else if (evt.ctrlKey || evt.metaKey) { uiGridSelectionService.toggleRowSelection(self, row, evt, self.options.multiSelect, self.options.noUnselect); } else { uiGridSelectionService.toggleRowSelection(self, row, evt, (self.options.multiSelect && !self.options.modifierKeysToMultiSelect), self.options.noUnselect); } } function selectButtonMouseDown(evt) { if (evt.ctrlKey || evt.shiftKey) { evt.target.onselectstart = function () { return false; }; window.setTimeout(function () { evt.target.onselectstart = null; }, 0); } } $scope.$on('$destroy', function unbindEvents() { $elm.off(); }); } }; }]); module.directive('uiGridSelectionSelectAllButtons', ['$templateCache', 'uiGridSelectionService', function ($templateCache, uiGridSelectionService) { return { replace: true, restrict: 'E', template: $templateCache.get('ui-grid/selectionSelectAllButtons'), scope: false, link: function ($scope, $elm, $attrs, uiGridCtrl) { var self = $scope.col.grid; $scope.headerButtonKeyDown = function (evt) { if (evt.keyCode === 32 || evt.keyCode === 13) { evt.preventDefault(); $scope.headerButtonClick(evt); } }; $scope.headerButtonClick = function (evt) { if (self.selection.selectAll) { uiGridSelectionService.clearSelectedRows(self, evt); if (self.options.noUnselect) { self.api.selection.selectRowByVisibleIndex(0, evt); } self.selection.selectAll = false; } else if (self.options.multiSelect) { self.api.selection.selectAllVisibleRows(evt); self.selection.selectAll = true; } }; } }; }]); /** * @ngdoc directive * @name ui.grid.selection.directive:uiGridViewport * @element div * * @description Stacks on top of ui.grid.uiGridViewport to alter the attributes used * for the grid row */ module.directive('uiGridViewport', ['$compile', 'uiGridConstants', 'uiGridSelectionConstants', 'gridUtil', '$parse', 'uiGridSelectionService', function ($compile, uiGridConstants, uiGridSelectionConstants, gridUtil, $parse, uiGridSelectionService) { return { priority: -200, // run after default directive scope: false, compile: function ($elm, $attrs) { var rowRepeatDiv = angular.element($elm[0].querySelector('.ui-grid-canvas:not(.ui-grid-empty-base-layer-container)').children[0]); var existingNgClass = rowRepeatDiv.attr("ng-class"); var newNgClass = ''; if (existingNgClass) { newNgClass = existingNgClass.slice(0, -1) + ",'ui-grid-row-selected': row.isSelected}"; } else { newNgClass = "{'ui-grid-row-selected': row.isSelected}"; } rowRepeatDiv.attr("ng-class", newNgClass); return { pre: function ($scope, $elm, $attrs, controllers) { }, post: function ($scope, $elm, $attrs, controllers) { } }; } }; }]); /** * @ngdoc directive * @name ui.grid.selection.directive:uiGridCell * @element div * @restrict A * * @description Stacks on top of ui.grid.uiGridCell to provide selection feature */ module.directive('uiGridCell', ['$compile', 'uiGridConstants', 'uiGridSelectionConstants', 'gridUtil', '$parse', 'uiGridSelectionService', '$timeout', function ($compile, uiGridConstants, uiGridSelectionConstants, gridUtil, $parse, uiGridSelectionService, $timeout) { return { priority: -200, // run after default uiGridCell directive restrict: 'A', require: '?^uiGrid', scope: false, link: function ($scope, $elm, $attrs, uiGridCtrl) { var touchStartTime = 0; var touchTimeout = 300; // Bind to keydown events in the render container if (uiGridCtrl.grid.api.cellNav) { uiGridCtrl.grid.api.cellNav.on.viewPortKeyDown($scope, function (evt, rowCol) { if (rowCol === null || rowCol.row !== $scope.row || rowCol.col !== $scope.col) { return; } if (evt.keyCode === 32 && $scope.col.colDef.name === "selectionRowHeaderCol") { evt.preventDefault(); uiGridSelectionService.toggleRowSelection($scope.grid, $scope.row, evt, ($scope.grid.options.multiSelect && !$scope.grid.options.modifierKeysToMultiSelect), $scope.grid.options.noUnselect); $scope.$apply(); } // uiGridCellNavService.scrollToIfNecessary(uiGridCtrl.grid, rowCol.row, rowCol.col); }); } //$elm.bind('keydown', function (evt) { // if (evt.keyCode === 32 && $scope.col.colDef.name === "selectionRowHeaderCol") { // uiGridSelectionService.toggleRowSelection($scope.grid, $scope.row, evt, ($scope.grid.options.multiSelect && !$scope.grid.options.modifierKeysToMultiSelect), $scope.grid.options.noUnselect); // $scope.$apply(); // } //}); var selectCells = function (evt) { // if you click on expandable icon doesn't trigger selection if (evt.target.className === "ui-grid-icon-minus-squared" || evt.target.className === "ui-grid-icon-plus-squared") { return; } // if we get a click, then stop listening for touchend $elm.off('touchend', touchEnd); if (evt.shiftKey) { uiGridSelectionService.shiftSelect($scope.grid, $scope.row, evt, $scope.grid.options.multiSelect); } else if (evt.ctrlKey || evt.metaKey) { uiGridSelectionService.toggleRowSelection($scope.grid, $scope.row, evt, $scope.grid.options.multiSelect, $scope.grid.options.noUnselect); } else { uiGridSelectionService.toggleRowSelection($scope.grid, $scope.row, evt, ($scope.grid.options.multiSelect && !$scope.grid.options.modifierKeysToMultiSelect), $scope.grid.options.noUnselect); } $scope.$apply(); // don't re-enable the touchend handler for a little while - some devices generate both, and it will // take a little while to move your hand from the mouse to the screen if you have both modes of input $timeout(function () { $elm.on('touchend', touchEnd); }, touchTimeout); }; var touchStart = function (evt) { touchStartTime = (new Date()).getTime(); // if we get a touch event, then stop listening for click $elm.off('click', selectCells); }; var touchEnd = function (evt) { var touchEndTime = (new Date()).getTime(); var touchTime = touchEndTime - touchStartTime; if (touchTime < touchTimeout) { // short touch selectCells(evt); } // don't re-enable the click handler for a little while - some devices generate both, and it will // take a little while to move your hand from the screen to the mouse if you have both modes of input $timeout(function () { $elm.on('click', selectCells); }, touchTimeout); }; function registerRowSelectionEvents() { if ($scope.grid.options.enableRowSelection && $scope.grid.options.enableFullRowSelection) { $elm.addClass('ui-grid-disable-selection'); $elm.on('touchstart', touchStart); $elm.on('touchend', touchEnd); $elm.on('click', selectCells); $scope.registered = true; } } function deregisterRowSelectionEvents() { if ($scope.registered) { $elm.removeClass('ui-grid-disable-selection'); $elm.off('touchstart', touchStart); $elm.off('touchend', touchEnd); $elm.off('click', selectCells); $scope.registered = false; } } registerRowSelectionEvents(); // register a dataChange callback so that we can change the selection configuration dynamically // if the user changes the options var dataChangeDereg = $scope.grid.registerDataChangeCallback(function () { if ($scope.grid.options.enableRowSelection && $scope.grid.options.enableFullRowSelection && !$scope.registered) { registerRowSelectionEvents(); } else if ((!$scope.grid.options.enableRowSelection || !$scope.grid.options.enableFullRowSelection) && $scope.registered) { deregisterRowSelectionEvents(); } }, [uiGridConstants.dataChange.OPTIONS]); $elm.on('$destroy', dataChangeDereg); } }; }]); module.directive('uiGridGridFooter', ['$compile', 'uiGridConstants', 'gridUtil', function ($compile, uiGridConstants, gridUtil) { return { restrict: 'EA', replace: true, priority: -1000, require: '^uiGrid', scope: true, compile: function ($elm, $attrs) { return { pre: function ($scope, $elm, $attrs, uiGridCtrl) { if (!uiGridCtrl.grid.options.showGridFooter) { return; } gridUtil.getTemplate('ui-grid/gridFooterSelectedItems') .then(function (contents) { var template = angular.element(contents); var newElm = $compile(template)($scope); angular.element($elm[0].getElementsByClassName('ui-grid-grid-footer')[0]).append(newElm); }); }, post: function ($scope, $elm, $attrs, controllers) { } }; } }; }]); })();
1
11,969
Is there a better way to filter these other than relying on the hashKey. I few like this might now be trustworthy in the long run. If angular ever removes this property, this feature would be broken.
angular-ui-ui-grid
js
@@ -2012,7 +2012,11 @@ os_tls_thread_exit(local_state_t *local_state) /* We already set TLS to &uninit_tls in os_thread_exit() */ - if (dynamo_exited && !last_thread_tls_exited) { + /* Do not set last_thread_tls_exited if a client_thread is exiting. + * If set, get_thread_private_dcontext() returns NULL, which may cause + * other thread fault on using dcontext. + */ + if (dynamo_exited && dynamo_exited_synched && !last_thread_tls_exited) { last_thread_tls_exited = true; first_thread_tls_initialized = false; /* for possible re-attach */ }
1
/* ******************************************************************************* * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * *******************************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * os.c - Linux specific routines */ /* Easiest to match kernel stat struct by using 64-bit. * This limits us to 2.4+ kernel but that's ok. * I don't really want to get into requiring kernel headers to build * general release packages, though that would be fine for targeted builds. * There are 3 different stat syscalls (SYS_oldstat, SYS_stat, and SYS_stat64) * and using _LARGEFILE64_SOURCE with SYS_stat64 is the best match. */ #define _LARGEFILE64_SOURCE /* for mmap-related #defines */ #include <sys/types.h> #include <sys/mman.h> /* in case MAP_32BIT is missing */ #ifndef MAP_32BIT # define MAP_32BIT 0x40 #endif #ifndef MAP_ANONYMOUS # define MAP_ANONYMOUS MAP_ANON /* MAP_ANON on Mac */ #endif /* for open */ #include <sys/stat.h> #include <fcntl.h> #include "../globals.h" #include "../hashtable.h" #include <string.h> #include <unistd.h> /* for write and usleep and _exit */ #include <limits.h> #ifdef MACOS # include <sys/sysctl.h> /* for sysctl */ # ifndef SYS___sysctl /* The name was changed on Yosemite */ # define SYS___sysctl SYS_sysctl # endif # include <mach/mach_traps.h> /* for swtch_pri */ # include "include/syscall_mach.h" #endif #ifdef LINUX # include <sys/vfs.h> /* for statfs */ #elif defined(MACOS) # include <sys/mount.h> /* for statfs */ # include <mach/mach.h> # include <mach/task.h> # include <mach/semaphore.h> # include <mach/sync_policy.h> #endif #include <dirent.h> /* for getrlimit */ #include <sys/time.h> #include <sys/resource.h> #ifndef X64 struct compat_rlimit { uint rlim_cur; uint rlim_max; }; #endif #ifdef LINUX /* For clone and its flags, the manpage says to include sched.h with _GNU_SOURCE * defined. _GNU_SOURCE brings in unwanted extensions and causes name * conflicts. Instead, we include unix/sched.h which comes from the Linux * kernel headers. */ # include <linux/sched.h> #endif #include "module.h" /* elf */ #include "tls.h" #ifdef LINUX # include "module_private.h" /* for ELF_AUXV_TYPE and AT_PAGESZ */ #endif #if defined(X86) && defined(DEBUG) # include "os_asm_defines.asm" /* for TLS_SELF_OFFSET_ASM */ #endif #ifndef F_DUPFD_CLOEXEC /* in linux 2.6.24+ */ # define F_DUPFD_CLOEXEC 1030 #endif /* This is not always sufficient to identify a syscall return value. * For example, MacOS has some 32-bit syscalls that return 64-bit * values in xdx:xax. */ #define MCXT_SYSCALL_RES(mc) ((mc)->IF_X86_ELSE(xax, r0)) #if defined(AARCH64) # define ASM_R2 "x2" # define ASM_R3 "x3" # define READ_TP_TO_R3_DISP_IN_R2 \ "mrs "ASM_R3", tpidr_el0\n\t" \ "ldr "ASM_R3", ["ASM_R3", "ASM_R2"] \n\t" #elif defined(ARM) # define ASM_R2 "r2" # define ASM_R3 "r3" # define READ_TP_TO_R3_DISP_IN_R2 \ "mrc p15, 0, "ASM_R3", c13, c0, "STRINGIFY(USR_TLS_REG_OPCODE)" \n\t" \ "ldr "ASM_R3", ["ASM_R3", "ASM_R2"] \n\t" #endif /* ARM */ /* Prototype for all functions in .init_array. */ typedef int (*init_fn_t)(int argc, char **argv, char **envp); /* For STATIC_LIBRARY we do not cache environ so the app can change it. */ #ifndef STATIC_LIBRARY /* i#46: Private __environ pointer. Points at the environment variable array * on the stack, which is different from what libc __environ may point at. We * use the environment for following children and setting options, so its OK * that we don't see what libc says. */ char **our_environ; #endif #include <errno.h> /* avoid problems with use of errno as var name in rest of file */ #if !defined(STANDALONE_UNIT_TEST) && !defined(MACOS) # undef errno #endif /* we define __set_errno below */ /* must be prior to <link.h> => <elf.h> => INT*_{MIN,MAX} */ # include "instr.h" /* for get_app_segment_base() */ #include "decode_fast.h" /* decode_cti: maybe os_handle_mov_seg should be ifdef X86? */ #include <dlfcn.h> #include <stdlib.h> #include <stdio.h> #include <signal.h> #include <syslog.h> /* vsyslog */ #include "../vmareas.h" #ifdef RCT_IND_BRANCH # include "../rct.h" #endif #ifdef LINUX # include "include/syscall.h" /* our own local copy */ #else # include <sys/syscall.h> #endif #include "../module_shared.h" #include "os_private.h" #include "../synch.h" #include "memquery.h" #include "ksynch.h" #ifndef HAVE_MEMINFO_QUERY # include "memcache.h" #endif #ifdef CLIENT_INTERFACE # include "instrument.h" #endif /* Cross arch syscall nums for use with struct stat64. */ #ifdef X64 # ifdef SYS_stat # define SYSNUM_STAT SYS_stat # endif # define SYSNUM_FSTAT SYS_fstat #else # define SYSNUM_STAT SYS_stat64 # define SYSNUM_FSTAT SYS_fstat64 #endif #ifdef MACOS # define SYSNUM_EXIT_PROCESS SYS_exit # define SYSNUM_EXIT_THREAD SYS_bsdthread_terminate #else # define SYSNUM_EXIT_PROCESS SYS_exit_group # define SYSNUM_EXIT_THREAD SYS_exit #endif #ifdef ANDROID /* Custom prctl flags specific to Android (xref i#1861) */ # define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 #endif #ifdef NOT_DYNAMORIO_CORE_PROPER # undef ASSERT # undef ASSERT_NOT_IMPLEMENTED # undef ASSERT_NOT_TESTED # undef ASSERT_CURIOSITY # define ASSERT(x) /* nothing */ # define ASSERT_NOT_IMPLEMENTED(x) /* nothing */ # define ASSERT_NOT_TESTED(x) /* nothing */ # define ASSERT_CURIOSITY(x) /* nothing */ # undef LOG # undef DOSTATS # define LOG(...) /* nothing */ # define DOSTATS(...) /* nothing */ #else /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ /* Guards data written by os_set_app_thread_area(). */ DECLARE_CXTSWPROT_VAR(static mutex_t set_thread_area_lock, INIT_LOCK_FREE(set_thread_area_lock)); static bool first_thread_tls_initialized; static bool last_thread_tls_exited; tls_type_t tls_global_type; #ifndef HAVE_TLS /* We use a table lookup to find a thread's dcontext */ /* Our only current no-TLS target, VMKernel (VMX86_SERVER), doesn't have apps with * tons of threads anyway */ #define MAX_THREADS 512 typedef struct _tls_slot_t { thread_id_t tid; dcontext_t *dcontext; } tls_slot_t; /* Stored in heap for self-prot */ static tls_slot_t *tls_table; /* not static so deadlock_avoidance_unlock() can look for it */ DECLARE_CXTSWPROT_VAR(mutex_t tls_lock, INIT_LOCK_FREE(tls_lock)); #endif #ifdef CLIENT_INTERFACE /* Should we place this in a client header? Currently mentioned in * dr_raw_tls_calloc() docs. */ static bool client_tls_allocated[MAX_NUM_CLIENT_TLS]; DECLARE_CXTSWPROT_VAR(static mutex_t client_tls_lock, INIT_LOCK_FREE(client_tls_lock)); #endif #include <stddef.h> /* for offsetof */ #include <sys/utsname.h> /* for struct utsname */ /* forward decl */ static void handle_execve_post(dcontext_t *dcontext); static bool os_switch_lib_tls(dcontext_t *dcontext, bool to_app); static bool os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app); #ifdef X86 static bool os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base); #endif #ifdef LINUX static bool handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base, size_t old_size, uint old_prot, uint old_type); static void handle_app_brk(dcontext_t *dcontext, byte *lowest_brk/*if known*/, byte *old_brk, byte *new_brk); #endif /* full path to our own library, used for execve */ static char dynamorio_library_path[MAXIMUM_PATH]; /* just dir */ static char dynamorio_library_filepath[MAXIMUM_PATH]; /* Issue 20: path to other architecture */ static char dynamorio_alt_arch_path[MAXIMUM_PATH]; static char dynamorio_alt_arch_filepath[MAXIMUM_PATH]; /* just dir */ /* Makefile passes us LIBDIR_X{86,64} defines */ #define DR_LIBDIR_X86 STRINGIFY(LIBDIR_X86) #define DR_LIBDIR_X64 STRINGIFY(LIBDIR_X64) /* pc values delimiting dynamo dll image */ static app_pc dynamo_dll_start = NULL; static app_pc dynamo_dll_end = NULL; /* open-ended */ static app_pc executable_start = NULL; static app_pc executable_end = NULL; /* Used by get_application_name(). */ static char executable_path[MAXIMUM_PATH]; static char *executable_basename; /* does the kernel provide tids that must be used to distinguish threads in a group? */ static bool kernel_thread_groups; static bool kernel_64bit; pid_t pid_cached; static bool fault_handling_initialized; #ifdef PROFILE_RDTSC uint kilo_hertz; /* cpu clock speed */ #endif /* Xref PR 258731, dup of STDOUT/STDERR in case app wants to close them. */ DR_API file_t our_stdout = STDOUT_FILENO; DR_API file_t our_stderr = STDERR_FILENO; DR_API file_t our_stdin = STDIN_FILENO; /* we steal fds from the app */ static struct rlimit app_rlimit_nofile; /* cur rlimit set by app */ static int min_dr_fd; /* we store all DR files so we can prevent the app from changing them, * and so we can close them in a child of fork. * the table key is the fd and the payload is the set of DR_FILE_* flags. */ static generic_table_t *fd_table; #define INIT_HTABLE_SIZE_FD 6 /* should remain small */ #ifdef DEBUG static int num_fd_add_pre_heap; #endif #ifdef LINUX /* i#1004: brk emulation */ static byte *app_brk_map; static byte *app_brk_cur; static byte *app_brk_end; #endif #ifdef MACOS /* xref i#1404: we should expose these via the dr_get_os_version() API */ static int macos_version; # define MACOS_VERSION_SIERRA 16 # define MACOS_VERSION_EL_CAPITAN 15 # define MACOS_VERSION_YOSEMITE 14 # define MACOS_VERSION_MAVERICKS 13 # define MACOS_VERSION_MOUNTAIN_LION 12 # define MACOS_VERSION_LION 11 #endif static bool is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os); static void process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot, uint flags _IF_DEBUG(const char *map_type)); #ifdef LINUX static char * read_proc_self_exe(bool ignore_cache); #endif /* Libc independent directory iterator, similar to readdir. If we ever need * this on Windows we should generalize it and export it to clients. */ typedef struct _dir_iterator_t { file_t fd; int off; int end; const char *name; /* Name of the current entry. */ char buf[4 * MAXIMUM_PATH]; /* Expect stack alloc, so not too big. */ } dir_iterator_t; static void os_dir_iterator_start(dir_iterator_t *iter, file_t fd); static bool os_dir_iterator_next(dir_iterator_t *iter); /* XXX: If we generalize to Windows, will we need os_dir_iterator_stop()? */ /* vsyscall page. hardcoded at 0xffffe000 in earlier kernels, but * randomly placed since fedora2. * marked rx then: FIXME: should disallow this guy when that's the case! * random vsyscall page is identified in maps files as "[vdso]" * (kernel-provided fake shared library or Virt Dyn Shared Object). */ /* i#1583: vdso is now 2 pages, yet we assume vsyscall is on 1st page. */ app_pc vsyscall_page_start = NULL; /* pc of the end of the syscall instr itself */ app_pc vsyscall_syscall_end_pc = NULL; /* pc where kernel returns control after sysenter vsyscall */ app_pc vsyscall_sysenter_return_pc = NULL; /* pc where our hook-displaced code was copied */ app_pc vsyscall_sysenter_displaced_pc = NULL; #define VSYSCALL_PAGE_START_HARDCODED ((app_pc)(ptr_uint_t) 0xffffe000) #ifdef X64 /* i#430, in Red Hat Enterprise Server 5.6, vsyscall region is marked * not executable * ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall] */ # define VSYSCALL_REGION_MAPS_NAME "[vsyscall]" #endif /* i#1908: vdso and vsyscall are now split */ app_pc vdso_page_start = NULL; #if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) /* The pthreads library keeps errno in its pthread_descr data structure, * which it looks up by dispatching on the stack pointer. This doesn't work * when within dynamo. Thus, we define our own __errno_location() for use both * by us and the app, to prevent pthreads looking at the stack pointer when * out of the code cache. */ /* FIXME: maybe we should create 1st dcontext earlier so we don't need init_errno? * any problems with init_errno being set and then dcontext->errno being read? * FIXME: if a thread issues a dr_app_stop, then we don't want to use * this errno slot? But it may later do a start...probably ok to keep using * the slot. But, when threads die, they'll all use the same init_errno! */ static int init_errno; /* errno until 1st dcontext created */ int * __errno_location(void) { /* Each dynamo thread should have a separate errno */ dcontext_t *dcontext = get_thread_private_dcontext(); if (dcontext == NULL) return &init_errno; else { /* WARNING: init_errno is in data segment so can be RO! */ return &(dcontext->upcontext_ptr->dr_errno); } } #endif /* !STANDALONE_UNIT_TEST && !STATIC_LIBRARY */ #if defined(HAVE_TLS) && defined(CLIENT_INTERFACE) /* i#598 * (gdb) x/20i (*(errno_loc_t)0xf721e413) * 0xf721e413 <__errno_location>: push %ebp * 0xf721e414 <__errno_location+1>: mov %esp,%ebp * 0xf721e416 <__errno_location+3>: call <__x86.get_pc_thunk.cx> * 0xf721e41b <__errno_location+8>: add $0x166bd9,%ecx * 0xf721e421 <__errno_location+14>: mov -0x1c(%ecx),%eax * 0xf721e427 <__errno_location+20>: add %gs:0x0,%eax * 0xf721e42e <__errno_location+27>: pop %ebp * 0xf721e42f <__errno_location+28>: ret * * __errno_location calcuates the errno location by adding * TLS's base with errno's offset in TLS. * However, because the TLS has been switched in os_tls_init, * the calculated address is wrong. * We first get the errno offset in TLS at init time and * calculate correct address by adding the app's tls base. */ /* __errno_location on ARM: * 0xb6f0b290 <__errno_location>: ldr r3, [pc, #12] * 0xb6f0b292 <__errno_location+2>: mrc 15, 0, r0, cr13, cr0, {3} * 0xb6f0b296 <__errno_location+6>: add r3, pc * 0xb6f0b298 <__errno_location+8>: ldr r3, [r3, #0] * 0xb6f0b29a <__errno_location+10>: adds r0, r0, r3 * 0xb6f0b29c <__errno_location+12>: bx lr * It uses the predefined offset to get errno location in TLS, * and we should be able to reuse the code here. */ static int libc_errno_tls_offs; static int * our_libc_errno_loc(void) { void *app_tls = os_get_app_tls_base(NULL, TLS_REG_LIB); if (app_tls == NULL) return NULL; return (int *)(app_tls + libc_errno_tls_offs); } #endif /* i#238/PR 499179: libc errno preservation * * Errno location is per-thread so we store the * function globally and call it each time. Note that pthreads seems * to be the one who provides per-thread errno: using raw syscalls to * create threads, we end up with a global errno: * * > for i in linux.thread.*0/log.*; do grep 'libc errno' $i | head -1; done * libc errno loc: 0x00007f153de26698 * libc errno loc: 0x00007f153de26698 * > for i in pthreads.pthreads.*0/log.*; do grep 'libc errno' $i | head -1; done * libc errno loc: 0x00007fc24d1ce698 * libc errno loc: 0x00007fc24d1cd8b8 * libc errno loc: 0x00007fc24c7cc8b8 */ typedef int *(*errno_loc_t)(void); static errno_loc_t get_libc_errno_location(bool do_init) { static errno_loc_t libc_errno_loc; if (do_init) { module_iterator_t *mi = module_iterator_start(); while (module_iterator_hasnext(mi)) { module_area_t *area = module_iterator_next(mi); const char *modname = GET_MODULE_NAME(&area->names); /* We ensure matches start to avoid matching "libgolibc.so". * GET_MODULE_NAME never includes the path: i#138 will add path. */ if (modname != NULL && strstr(modname, "libc.so") == modname) { bool found = true; /* called during init when .data is writable */ libc_errno_loc = (errno_loc_t) get_proc_address(area->start, "__errno_location"); ASSERT(libc_errno_loc != NULL); LOG(GLOBAL, LOG_THREADS, 2, "libc errno loc func: "PFX"\n", libc_errno_loc); #ifdef CLIENT_INTERFACE /* Currently, the DR is loaded by system loader and hooked up * to app's libc. So right now, we still need this routine. * we can remove this after libc independency and/or * early injection */ if (INTERNAL_OPTION(private_loader)) { acquire_recursive_lock(&privload_lock); if (privload_lookup_by_base(area->start) != NULL) found = false; release_recursive_lock(&privload_lock); } #endif if (found) break; } } module_iterator_stop(mi); #if defined(HAVE_TLS) && defined(CLIENT_INTERFACE) /* i#598: init the libc errno's offset. If we didn't find libc above, * then we don't need to do this. */ if (INTERNAL_OPTION(private_loader) && libc_errno_loc != NULL) { void *priv_lib_tls_base = os_get_priv_tls_base(NULL, TLS_REG_LIB); ASSERT(priv_lib_tls_base != NULL); libc_errno_tls_offs = (void *)libc_errno_loc() - priv_lib_tls_base; libc_errno_loc = &our_libc_errno_loc; } #endif } return libc_errno_loc; } /* i#238/PR 499179: our __errno_location isn't affecting libc so until * we have libc independence or our own private isolated libc we need * to preserve the app's libc's errno */ int get_libc_errno(void) { #if defined(STANDALONE_UNIT_TEST) && (defined(MACOS) || defined(ANDROID)) return errno; #else # ifdef STANDALONE_UNIT_TEST errno_loc_t func = __errno_location; # else errno_loc_t func = get_libc_errno_location(false); # endif if (func == NULL) { /* libc hasn't been loaded yet or we're doing early injection. */ return 0; } else { int *loc = (*func)(); ASSERT(loc != NULL); LOG(THREAD_GET, LOG_THREADS, 5, "libc errno loc: "PFX"\n", loc); if (loc != NULL) return *loc; } return 0; #endif } /* N.B.: pthreads has two other locations it keeps on a per-thread basis: * h_errno and res_state. See glibc-2.2.4/linuxthreads/errno.c. * If dynamo ever modifies those we'll need to do to them what we now do to * errno. */ /* The environment vars exhibit totally messed up behavior when someone * does an execve of /bin/sh -- not sure what's going on, but using our * own implementation of unsetenv fixes all our problems. If we use * libc's, unsetenv either does nothing or ends up having getenv return * NULL for other vars that are obviously set (by iterating through environ). * FIXME: find out the real story here. */ int our_unsetenv(const char *name) { /* FIXME: really we should have some kind of synchronization */ size_t name_len; char **env = our_environ; if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) { return -1; } ASSERT(our_environ != NULL); if (our_environ == NULL) return -1; name_len = strlen(name); while (*env != NULL) { if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') { /* We have a match. Shift the subsequent entries. Keep going to * handle later matches. */ char **e; for (e = env; *e != NULL; e++) *e = *(e + 1); } else { env++; } } return 0; } /* Clobbers the name rather than shifting, to preserve auxv (xref i#909). */ bool disable_env(const char *name) { size_t name_len; char **env = our_environ; if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) { return false; } ASSERT(our_environ != NULL); if (our_environ == NULL) return false; name_len = strlen(name); while (*env != NULL) { if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') { /* We have a match. If we shift subsequent entries we'll mess * up access to auxv, which is after the env block, so we instead * disable the env var by changing its name. * We keep going to handle later matches. */ snprintf(*env, name_len, "__disabled__"); } env++; } return true; } /* i#46: Private getenv. */ char * our_getenv(const char *name) { char **env = our_environ; size_t i; size_t name_len; if (name == NULL || name[0] == '\0' || strchr(name, '=') != NULL) { return NULL; } ASSERT_MESSAGE(CHKLVL_ASSERTS, "our_environ is missing. _init() or " "dynamorio_set_envp() were not called", our_environ != NULL); if (our_environ == NULL) return NULL; name_len = strlen(name); for (i = 0; env[i] != NULL; i++) { if (strncmp(env[i], name, name_len) == 0 && env[i][name_len] == '=') { return env[i] + name_len + 1; } } return NULL; } /* Work around drpreload's _init going first. We can get envp in our own _init * routine down below, but drpreload.so comes first and calls * dynamorio_app_init before our own _init routine gets called. Apps using the * app API are unaffected because our _init routine will have run by then. For * STATIC_LIBRARY, we used to set our_environ in our_init(), but to support * the app setting DYNAMORIO_OPTIONS after our_init() runs, we now just use environ. */ DYNAMORIO_EXPORT void dynamorio_set_envp(char **envp) { our_environ = envp; } /* shared library init */ int our_init(int argc, char **argv, char **envp) { /* If we do not want to use drpreload.so, we can take over here: but when using * drpreload, this is called *after* we have already taken over. */ extern void dynamorio_app_take_over(void); bool takeover = false; #ifdef INIT_TAKE_OVER takeover = true; #endif #ifdef VMX86_SERVER /* PR 391765: take over here instead of using preload */ takeover = os_in_vmkernel_classic(); #endif #ifndef STATIC_LIBRARY if (our_environ != NULL) { /* Set by dynamorio_set_envp above. These should agree. */ ASSERT(our_environ == envp); } else { our_environ = envp; } #endif /* if using preload, no -early_inject */ #ifdef STATIC_LIBRARY if (!takeover) { const char *takeover_env = getenv("DYNAMORIO_TAKEOVER_IN_INIT"); if (takeover_env != NULL && strcmp(takeover_env, "1") == 0) { takeover = true; } } #endif if (takeover) { if (dynamorio_app_init() == 0 /* success */) { dynamorio_app_take_over(); } } return 0; } #if defined(STATIC_LIBRARY) || defined(STANDALONE_UNIT_TEST) /* If we're getting linked into a binary that already has an _init definition * like the app's exe or unit_tests, we add a pointer to our_init() to the * .init_array section. We can't use the constructor attribute because not all * toolchains pass the args and environment to the constructor. */ static init_fn_t # ifdef MACOS __attribute__ ((section ("__DATA,__mod_init_func"), aligned (sizeof (void *)), used)) # else __attribute__ ((section (".init_array"), aligned (sizeof (void *)), used)) # endif init_array[] = { our_init }; #else /* If we're a normal shared object, then we override _init. */ int _init(int argc, char **argv, char **envp) { # ifdef ANDROID /* i#1862: the Android loader passes *nothing* to lib init routines. We * rely on DR being listed before libc so we can read the TLS slot the * kernel set up. */ if (!get_kernel_args(&argc, &argv, &envp)) { /* XXX: scan the stack and look for known auxv patterns or sthg. */ argc = 0; argv = NULL; envp = NULL; } ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to find envp", envp != NULL); # endif return our_init(argc, argv, envp); } #endif bool kernel_is_64bit(void) { return kernel_64bit; } #ifdef MACOS /* XXX: if we get enough of these, move to os_macos.c or sthg */ static bool sysctl_query(int level0, int level1, void *buf, size_t bufsz) { int res; int name[2]; size_t len = bufsz; name[0] = level0; name[1] = level1; res = dynamorio_syscall(SYS___sysctl, 6, &name, 2, buf, &len, NULL, 0); return (res >= 0); } #endif static void get_uname(void) { /* assumption: only called at init, so we don't need any synch * or .data unprot */ static struct utsname uinfo; /* can be large, avoid stack overflow */ #ifdef MACOS if (!sysctl_query(CTL_KERN, KERN_OSTYPE, &uinfo.sysname, sizeof(uinfo.sysname)) || !sysctl_query(CTL_KERN, KERN_HOSTNAME, &uinfo.nodename, sizeof(uinfo.nodename)) || !sysctl_query(CTL_KERN, KERN_OSRELEASE, &uinfo.release, sizeof(uinfo.release)) || !sysctl_query(CTL_KERN, KERN_VERSION, &uinfo.version, sizeof(uinfo.version)) || !sysctl_query(CTL_HW, HW_MACHINE, &uinfo.machine, sizeof(uinfo.machine))) { ASSERT(false && "sysctl queries failed"); return; } #else DEBUG_DECLARE(int res =) dynamorio_syscall(SYS_uname, 1, (ptr_uint_t)&uinfo); ASSERT(res >= 0); #endif LOG(GLOBAL, LOG_TOP, 1, "uname:\n\tsysname: %s\n", uinfo.sysname); LOG(GLOBAL, LOG_TOP, 1, "\tnodename: %s\n", uinfo.nodename); LOG(GLOBAL, LOG_TOP, 1, "\trelease: %s\n", uinfo.release); LOG(GLOBAL, LOG_TOP, 1, "\tversion: %s\n", uinfo.version); LOG(GLOBAL, LOG_TOP, 1, "\tmachine: %s\n", uinfo.machine); if (strncmp(uinfo.machine, "x86_64", sizeof("x86_64")) == 0) kernel_64bit = true; #ifdef MACOS /* XXX: I would skip these checks for standalone so we don't have to set env * vars for frontends to see the options but I'm still afraid of some syscall * crash with no output: I'd rather have two messages than silent crashing. */ if (DYNAMO_OPTION(max_supported_os_version) != 0) { /* 0 disables */ /* We only support OSX 10.7.5 - 10.9.1. That means kernels 11.x-13.x. */ # define MIN_DARWIN_VERSION_SUPPORTED 11 int kernel_major; if (sscanf(uinfo.release, "%d", &kernel_major) != 1 || kernel_major > DYNAMO_OPTION(max_supported_os_version) || kernel_major < MIN_DARWIN_VERSION_SUPPORTED) { /* We make this non-fatal as it's likely DR will work */ SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(), get_application_pid(), uinfo.release); } macos_version = kernel_major; } #endif } /* os-specific initializations */ void os_init(void) { ksynch_init(); get_uname(); /* Populate global data caches. */ get_application_name(); get_application_base(); /* determine whether gettid is provided and needed for threads, * or whether getpid suffices. even 2.4 kernels have gettid * (maps to getpid), don't have an old enough target to test this. */ #ifdef MACOS kernel_thread_groups = (dynamorio_syscall(SYS_thread_selfid, 0) >= 0); #else kernel_thread_groups = (dynamorio_syscall(SYS_gettid, 0) >= 0); #endif LOG(GLOBAL, LOG_TOP|LOG_STATS, 1, "thread id is from %s\n", kernel_thread_groups ? "gettid" : "getpid"); #ifdef MACOS /* SYS_thread_selfid was added in 10.6. We have no simple way to get the * thread id on 10.5, so we don't support it. */ if (!kernel_thread_groups) { SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(), get_application_pid(), "Mac OSX 10.5 or earlier"); } #else ASSERT_CURIOSITY(kernel_thread_groups); #endif pid_cached = get_process_id(); #ifdef VMX86_SERVER vmk_init(); #endif signal_init(); /* We now set up an early fault handler for safe_read() (i#350) */ fault_handling_initialized = true; memquery_init(); #ifdef PROFILE_RDTSC if (dynamo_options.profile_times) { ASSERT_NOT_TESTED(); kilo_hertz = get_timer_frequency(); LOG(GLOBAL, LOG_TOP|LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz/1000); } #endif /* PROFILE_RDTSC */ /* Needs to be after heap_init */ IF_NO_MEMQUERY(memcache_init()); /* we didn't have heap in os_file_init() so create and add global logfile now */ fd_table = generic_hash_create(GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_FD, 80 /* load factor: not perf-critical */, HASHTABLE_SHARED | HASHTABLE_PERSISTENT, NULL _IF_DEBUG("fd table")); #ifdef DEBUG if (GLOBAL != INVALID_FILE) fd_table_add(GLOBAL, OS_OPEN_CLOSE_ON_FORK); #endif /* Ensure initialization */ get_dynamorio_dll_start(); #ifdef LINUX if (DYNAMO_OPTION(emulate_brk)) init_emulated_brk(NULL); #endif #ifdef ANDROID /* This must be set up earlier than privload_tls_init, and must be set up * for non-client-interface as well, as this initializes DR_TLS_BASE_OFFSET * (i#1931). */ init_android_version(); #endif } /* called before any logfiles are opened */ void os_file_init(void) { /* We steal fds from the app for better transparency. We lower the max file * descriptor limit as viewed by the app, and block SYS_dup{2,3} and * SYS_fcntl(F_DUPFD*) from creating a file explicitly in our space. We do * not try to stop incremental file opening from extending into our space: * if the app really is running out of fds, we'll give it some of ours: * after all we probably don't need all -steal_fds, and if we really need fds * we typically open them at startup. We also don't bother watching all * syscalls that take in fds from affecting our fds. */ if (DYNAMO_OPTION(steal_fds) > 0) { struct rlimit rlimit_nofile; /* SYS_getrlimit uses an old 32-bit-field struct so we want SYS_ugetrlimit */ if (dynamorio_syscall(IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)), 2, RLIMIT_NOFILE, &rlimit_nofile) != 0) { /* linux default is 1024 */ SYSLOG_INTERNAL_WARNING("getrlimit RLIMIT_NOFILE failed"); /* can't LOG yet */ rlimit_nofile.rlim_cur = 1024; rlimit_nofile.rlim_max = 1024; } /* pretend the limit is lower and reserve the top spots for us. * for simplicity and to give as much room as possible to app, * raise soft limit to equal hard limit. * if an app really depends on a low soft limit, they can run * with -steal_fds 0. */ if (rlimit_nofile.rlim_max > DYNAMO_OPTION(steal_fds)) { int res; min_dr_fd = rlimit_nofile.rlim_max - DYNAMO_OPTION(steal_fds); app_rlimit_nofile.rlim_max = min_dr_fd; app_rlimit_nofile.rlim_cur = app_rlimit_nofile.rlim_max; rlimit_nofile.rlim_cur = rlimit_nofile.rlim_max; res = dynamorio_syscall(SYS_setrlimit, 2, RLIMIT_NOFILE, &rlimit_nofile); if (res != 0) { SYSLOG_INTERNAL_WARNING("unable to raise RLIMIT_NOFILE soft limit: %d", res); } } else /* not fatal: we'll just end up using fds in app space */ SYSLOG_INTERNAL_WARNING("unable to reserve fds"); } /* we don't have heap set up yet so we init fd_table in os_init */ } /* we need to re-cache after a fork */ static char * get_application_pid_helper(bool ignore_cache) { static char pidstr[16]; if (!pidstr[0] || ignore_cache) { int pid = get_process_id(); snprintf(pidstr, sizeof(pidstr)-1, "%d", pid); } return pidstr; } /* get application pid, (cached), used for event logging */ char* get_application_pid() { return get_application_pid_helper(false); } /* i#907: Called during early injection before data section protection to avoid * issues with /proc/self/exe. */ void set_executable_path(const char *exe_path) { strncpy(executable_path, exe_path, BUFFER_SIZE_ELEMENTS(executable_path)); NULL_TERMINATE_BUFFER(executable_path); } /* The OSX kernel used to place the bare executable path above envp. * On recent XNU versions, the kernel now prefixes the executable path * with the string executable_path= so it can be parsed getenv style. */ #ifdef MACOS # define EXECUTABLE_KEY "executable_path=" #endif /* i#189: we need to re-cache after a fork */ static char * get_application_name_helper(bool ignore_cache, bool full_path) { if (!executable_path[0] || ignore_cache) { #ifdef VMX86_SERVER if (os_in_vmkernel_userworld()) { vmk_getnamefrompid(pid, executable_path, sizeof(executable_path)); } else #endif if (DYNAMO_OPTION(early_inject)) { ASSERT(executable_path[0] != '\0' && "i#907: Can't read /proc/self/exe for early injection"); } else { #ifdef LINUX /* Populate cache from /proc/self/exe link. */ strncpy(executable_path, read_proc_self_exe(ignore_cache), BUFFER_SIZE_ELEMENTS(executable_path)); #else /* OSX kernel puts full app exec path above envp */ char *c, **env = our_environ; do { env++; } while (*env != NULL); env++; /* Skip the NULL separating the envp array from exec_path */ c = *env; if (strncmp(EXECUTABLE_KEY, c, strlen(EXECUTABLE_KEY)) == 0) { c += strlen(EXECUTABLE_KEY); } /* If our frontends always absolute-ize paths prior to exec, * this should usually be absolute -- but we go ahead and * handle relative just in case (and to handle child processes). * We add the cur dir, but note that the resulting path can * still contain . or .. so it's not normalized (but it is a * correct absolute path). Xref i#1402, i#1406, i#1407. */ if (*c != '/') { int len; if (!os_get_current_dir(executable_path, BUFFER_SIZE_ELEMENTS(executable_path))) len = 0; else len = strlen(executable_path); snprintf(executable_path + len, BUFFER_SIZE_ELEMENTS(executable_path) - len, "%s%s", len > 0 ? "/" : "", c); } else strncpy(executable_path, c, BUFFER_SIZE_ELEMENTS(executable_path)); #endif NULL_TERMINATE_BUFFER(executable_path); /* FIXME: Fall back on /proc/self/cmdline and maybe argv[0] from * _init(). */ ASSERT(strlen(executable_path) > 0 && "readlink /proc/self/exe failed"); } } /* Get basename. */ if (executable_basename == NULL || ignore_cache) { executable_basename = strrchr(executable_path, '/'); executable_basename = (executable_basename == NULL ? executable_path : executable_basename + 1); } return (full_path ? executable_path : executable_basename); } /* get application name, (cached), used for event logging */ char * get_application_name(void) { return get_application_name_helper(false, true /* full path */); } /* Note: this is exported so that libdrpreload.so (preload.c) can use it to * get process names to do selective process following (PR 212034). The * alternative is to duplicate or compile in this code into libdrpreload.so, * which is messy. Besides, libdynamorio.so is already loaded into the process * and avaiable, so cleaner to just use functions from it. */ DYNAMORIO_EXPORT const char * get_application_short_name(void) { return get_application_name_helper(false, false /* short name */); } /* Processor information provided by kernel */ #define PROC_CPUINFO "/proc/cpuinfo" #define CPUMHZ_LINE_LENGTH 64 #define CPUMHZ_LINE_FORMAT "cpu MHz\t\t: %lu.%03lu\n" /* printed in /usr/src/linux-2.4/arch/i386/kernel/setup.c calibrated in time.c */ /* seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", cpu_khz / 1000, (cpu_khz % 1000)) */ /* e.g. cpu MHz : 1594.851 */ static timestamp_t get_timer_frequency_cpuinfo(void) { file_t cpuinfo; ssize_t nread; char *buf; char *mhz_line; ulong cpu_mhz = 1000; ulong cpu_khz = 0; cpuinfo = os_open(PROC_CPUINFO, OS_OPEN_READ); /* This can happen in a chroot or if /proc is disabled. */ if (cpuinfo == INVALID_FILE) return 1000 * 1000; /* 1 GHz */ /* cpu MHz is typically in the first 4096 bytes. If not, or we get a short * or interrupted read, our timer frequency estimate will be off, but it's * not the end of the world. * FIXME: Factor a buffered file reader out of our maps iterator if we want * to do this the right way. */ buf = global_heap_alloc(PAGE_SIZE HEAPACCT(ACCT_OTHER)); nread = os_read(cpuinfo, buf, PAGE_SIZE - 1); if (nread > 0) { buf[nread] = '\0'; mhz_line = strstr(buf, "cpu MHz\t\t:"); if (mhz_line != NULL && sscanf(mhz_line, CPUMHZ_LINE_FORMAT, &cpu_mhz, &cpu_khz) == 2) { LOG(GLOBAL, LOG_ALL, 2, "Processor speed exactly %lu.%03luMHz\n", cpu_mhz, cpu_khz); } } global_heap_free(buf, PAGE_SIZE HEAPACCT(ACCT_OTHER)); os_close(cpuinfo); return cpu_mhz * 1000 + cpu_khz; } timestamp_t get_timer_frequency() { #ifdef VMX86_SERVER if (os_in_vmkernel_userworld()) { return vmk_get_timer_frequency(); } #endif return get_timer_frequency_cpuinfo(); } /* DR has standardized on UTC time which counts from since Jan 1, 1601. * That's the Windows standard. But Linux uses the Epoch of Jan 1, 1970. */ #define UTC_TO_EPOCH_SECONDS 11644473600 /* seconds since 1601 */ uint query_time_seconds(void) { struct timeval current_time; uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, &current_time, NULL); #ifdef MACOS /* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */ if (macos_version < MACOS_VERSION_SIERRA) { if ((int)val < 0) return 0; return (uint)val + UTC_TO_EPOCH_SECONDS; } #endif if ((int)val >= 0) { return current_time.tv_sec + UTC_TO_EPOCH_SECONDS; } else { ASSERT_NOT_REACHED(); return 0; } } /* milliseconds since 1601 */ uint64 query_time_millis() { struct timeval current_time; uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, &current_time, NULL); #ifdef MACOS /* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */ if (macos_version < MACOS_VERSION_SIERRA) { if ((int)val > 0) { current_time.tv_sec = (uint) val; current_time.tv_usec = (uint)(val >> 32); } } #endif if ((int)val >= 0) { uint64 res = (((uint64)current_time.tv_sec) * 1000) + (current_time.tv_usec / 1000); res += UTC_TO_EPOCH_SECONDS * 1000; return res; } else { ASSERT_NOT_REACHED(); return 0; } } /* microseconds since 1601 */ uint64 query_time_micros() { struct timeval current_time; uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, &current_time, NULL); #ifdef MACOS /* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */ if (macos_version < MACOS_VERSION_SIERRA) { if ((int)val > 0) { current_time.tv_sec = (uint) val; current_time.tv_usec = (uint)(val >> 32); } } #endif if ((int)val >= 0) { uint64 res = (((uint64)current_time.tv_sec) * 1000000) + current_time.tv_usec; res += UTC_TO_EPOCH_SECONDS * 1000000; return res; } else { ASSERT_NOT_REACHED(); return 0; } } #ifdef RETURN_AFTER_CALL /* Finds the bottom of the call stack, presumably at program startup. */ /* This routine is a copycat of internal_dump_callstack and makes assumptions about program state, i.e. that frame pointers are valid and should be used only in well known points for release build. */ static app_pc find_stack_bottom() { app_pc retaddr = 0; int depth = 0; reg_t *fp; /* from dump_dr_callstack() */ asm("mov %%"ASM_XBP", %0" : "=m"(fp)); LOG(THREAD_GET, LOG_ALL, 3, "Find stack bottom:\n"); while (fp != NULL && is_readable_without_exception((byte *)fp, sizeof(reg_t)*2)) { retaddr = (app_pc)*(fp+1); /* presumably also readable */ LOG(THREAD_GET, LOG_ALL, 3, "\tframe ptr "PFX" => parent "PFX", ret = "PFX"\n", fp, *fp, retaddr); depth++; /* yes I've seen weird recursive cases before */ if (fp == (reg_t *) *fp || depth > 100) break; fp = (reg_t *) *fp; } return retaddr; } #endif /* RETURN_AFTER_CALL */ /* os-specific atexit cleanup */ void os_slow_exit(void) { signal_exit(); memquery_exit(); ksynch_exit(); generic_hash_destroy(GLOBAL_DCONTEXT, fd_table); fd_table = NULL; if (doing_detach) { vsyscall_page_start = NULL; IF_DEBUG(num_fd_add_pre_heap = 0;) } DELETE_LOCK(set_thread_area_lock); #ifdef CLIENT_INTERFACE DELETE_LOCK(client_tls_lock); #endif IF_NO_MEMQUERY(memcache_exit()); } /* os-specific atexit cleanup */ void os_fast_exit(void) { /* nothing */ } void os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t flags, int exit_code) { /* i#1319: we support a signal via 2nd byte */ bool use_signal = exit_code > 0x00ff; /* XXX: TERMINATE_THREAD not supported */ ASSERT_NOT_IMPLEMENTED(TEST(TERMINATE_PROCESS, flags)); if (use_signal) { int sig = (exit_code & 0xff00) >> 8; os_terminate_via_signal(dcontext, flags, sig); ASSERT_NOT_REACHED(); } if (TEST(TERMINATE_CLEANUP, flags)) { /* we enter from several different places, so rewind until top-level kstat */ KSTOP_REWIND_UNTIL(thread_measured); cleanup_and_terminate(dcontext, SYSNUM_EXIT_PROCESS, exit_code, 0, true/*whole process*/, 0, 0); } else { /* clean up may be impossible - just terminate */ config_exit(); /* delete .1config file */ exit_process_syscall(exit_code); } } void os_terminate(dcontext_t *dcontext, terminate_flags_t flags) { os_terminate_with_code(dcontext, flags, -1); } int os_timeout(int time_in_milliseconds) { ASSERT_NOT_IMPLEMENTED(false); return 0; } /************************************************************************ * SEGMENT STEALING * * Not easy to make truly transparent -- but the alternative of dispatch * by thread id on global memory has performance implications. * Pull the non-STEAL_SEGMENT code out of the cvs attic for a base if * transparency becomes more of a problem. */ #define TLS_LOCAL_STATE_OFFSET (offsetof(os_local_state_t, state)) /* offset from top of page */ #define TLS_OS_LOCAL_STATE 0x00 #define TLS_SELF_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, self)) #define TLS_THREAD_ID_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, tid)) #define TLS_DCONTEXT_OFFSET (TLS_OS_LOCAL_STATE + TLS_DCONTEXT_SLOT) #ifdef X86 # define TLS_MAGIC_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, magic)) #endif /* they should be used with os_tls_offset, so do not need add TLS_OS_LOCAL_STATE here */ #define TLS_APP_LIB_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_lib_tls_base)) #define TLS_APP_ALT_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_alt_tls_base)) #define TLS_APP_LIB_TLS_REG_OFFSET (offsetof(os_local_state_t, app_lib_tls_reg)) #define TLS_APP_ALT_TLS_REG_OFFSET (offsetof(os_local_state_t, app_alt_tls_reg)) /* N.B.: imm and offs are ushorts! * We use %c[0-9] to get gcc to emit an integer constant without a leading $ for * the segment offset. See the documentation here: * http://gcc.gnu.org/onlinedocs/gccint/Output-Template.html#Output-Template * Also, var needs to match the pointer size, or else we'll get stack corruption. * XXX: This is marked volatile prevent gcc from speculating this code before * checks for is_thread_tls_initialized(), but if we could find a more * precise constraint, then the compiler would be able to optimize better. See * glibc comments on THREAD_SELF. */ #ifdef X86 # define WRITE_TLS_SLOT_IMM(imm, var) \ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \ ASSERT(sizeof(var) == sizeof(void*)); \ asm volatile("mov %0, %"ASM_SEG":%c1" : : "r"(var), "i"(imm)); # define READ_TLS_SLOT_IMM(imm, var) \ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \ ASSERT(sizeof(var) == sizeof(void*)); \ asm volatile("mov %"ASM_SEG":%c1, %0" : "=r"(var) : "i"(imm)); # define WRITE_TLS_INT_SLOT_IMM(imm, var) \ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \ ASSERT(sizeof(var) == sizeof(int)); \ asm volatile("movl %0, %"ASM_SEG":%c1" : : "r"(var), "i"(imm)); # define READ_TLS_INT_SLOT_IMM(imm, var) \ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \ ASSERT(sizeof(var) == sizeof(int)); \ asm volatile("movl %"ASM_SEG":%c1, %0" : "=r"(var) : "i"(imm)); /* FIXME: need dedicated-storage var for _TLS_SLOT macros, can't use expr */ # define WRITE_TLS_SLOT(offs, var) \ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \ ASSERT(sizeof(var) == sizeof(void*)); \ ASSERT(sizeof(offs) == 2); \ asm("mov %0, %%"ASM_XAX : : "m"((var)) : ASM_XAX); \ asm("movzw"IF_X64_ELSE("q","l")" %0, %%"ASM_XDX : : "m"((offs)) : ASM_XDX); \ asm("mov %%"ASM_XAX", %"ASM_SEG":(%%"ASM_XDX")" : : : ASM_XAX, ASM_XDX); # define READ_TLS_SLOT(offs, var) \ ASSERT(sizeof(var) == sizeof(void*)); \ ASSERT(sizeof(offs) == 2); \ asm("movzw"IF_X64_ELSE("q","l")" %0, %%"ASM_XAX : : "m"((offs)) : ASM_XAX); \ asm("mov %"ASM_SEG":(%%"ASM_XAX"), %%"ASM_XAX : : : ASM_XAX); \ asm("mov %%"ASM_XAX", %0" : "=m"((var)) : : ASM_XAX); #elif defined(AARCHXX) /* Android needs indirection through a global. The Android toolchain has * trouble with relocations if we use a global directly in asm, so we convert to * a local variable in these macros. We pay the cost of the extra instructions * for Linux ARM to share the code. */ # define WRITE_TLS_SLOT_IMM(imm, var) do { \ uint _base_offs = DR_TLS_BASE_OFFSET; \ __asm__ __volatile__( \ "mov "ASM_R2", %0 \n\t" \ READ_TP_TO_R3_DISP_IN_R2 \ "str %1, ["ASM_R3", %2] \n\t" \ : : "r" (_base_offs), "r" (var), "i" (imm) \ : "memory", ASM_R2, ASM_R3); \ } while (0) # define READ_TLS_SLOT_IMM(imm, var) do { \ uint _base_offs = DR_TLS_BASE_OFFSET; \ __asm__ __volatile__( \ "mov "ASM_R2", %1 \n\t" \ READ_TP_TO_R3_DISP_IN_R2 \ "ldr %0, ["ASM_R3", %2] \n\t" \ : "=r" (var) \ : "r" (_base_offs), "i" (imm) \ : ASM_R2, ASM_R3); \ } while (0) # define WRITE_TLS_INT_SLOT_IMM WRITE_TLS_SLOT_IMM /* b/c 32-bit */ # define READ_TLS_INT_SLOT_IMM READ_TLS_SLOT_IMM /* b/c 32-bit */ # define WRITE_TLS_SLOT(offs, var) do { \ uint _base_offs = DR_TLS_BASE_OFFSET; \ __asm__ __volatile__( \ "mov "ASM_R2", %0 \n\t" \ READ_TP_TO_R3_DISP_IN_R2 \ "add "ASM_R3", "ASM_R3", %2 \n\t" \ "str %1, ["ASM_R3"] \n\t" \ : : "r" (_base_offs), "r" (var), "r" (offs) \ : "memory", ASM_R2, ASM_R3); \ } while (0) # define READ_TLS_SLOT(offs, var) do { \ uint _base_offs = DR_TLS_BASE_OFFSET; \ __asm__ __volatile__( \ "mov "ASM_R2", %1 \n\t" \ READ_TP_TO_R3_DISP_IN_R2 \ "add "ASM_R3", "ASM_R3", %2 \n\t" \ "ldr %0, ["ASM_R3"] \n\t" \ : "=r" (var) \ : "r" (_base_offs), "r" (offs) \ : ASM_R2, ASM_R3); \ } while (0) #endif /* X86/ARM */ #ifdef X86 /* We use this at thread init and exit to make it easy to identify * whether TLS is initialized (i#2089). * We assume alignment does not matter. */ static os_local_state_t uninit_tls; /* has .magic == 0 */ #endif static bool is_thread_tls_initialized(void) { #ifdef X86 if (INTERNAL_OPTION(safe_read_tls_init)) { /* Avoid faults during early init or during exit when we have no handler. * It's not worth extending the handler as the faults are a perf hit anyway. */ if (!first_thread_tls_initialized || last_thread_tls_exited) return false; /* To handle WSL (i#1986) where fs and gs start out equal to ss (0x2b), * and when the MSR is used having a zero selector, and other complexities, * we just do a blind safe read as the simplest solution once we're past * initial init and have a fault handler. * * i#2089: to avoid the perf cost of syscalls to verify the tid, and to * distinguish a fork child from a separate-group thread, we no longer read * the tid field and check that the TLS belongs to this particular thread: * instead we rely on clearing the .magic field for child threads and at * thread exit (to avoid a fault) and we simply check the field here. * A native app thread is very unlikely to match this. */ return safe_read_tls_magic() == TLS_MAGIC_VALID; } else { /* XXX i#2089: we're keeping this legacy code around until * we're confident that the safe read code above is safer, more * performant, and more robust. */ os_local_state_t *os_tls = NULL; ptr_uint_t cur_seg = read_thread_register(SEG_TLS); /* Handle WSL (i#1986) where fs and gs start out equal to ss (0x2b) */ if (cur_seg != 0 && cur_seg != read_thread_register(SEG_SS)) { /* XXX: make this a safe read: but w/o dcontext we need special asm support */ READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls); } # ifdef X64 if (os_tls == NULL && tls_dr_using_msr()) { /* When the MSR is used, the selector in the register remains 0. * We can't clear the MSR early in a new thread and then look for * a zero base here b/c if kernel decides to use GDT that zeroing * will set the selector, unless we want to assume we know when * the kernel uses the GDT. * Instead we make a syscall to get the tid. This should be ok * perf-wise b/c the common case is the non-zero above. */ byte *base = tls_get_fs_gs_segment_base(SEG_TLS); ASSERT(tls_global_type == TLS_TYPE_ARCH_PRCTL); if (base != (byte *) POINTER_MAX && base != NULL) { os_tls = (os_local_state_t *) base; } } # endif if (os_tls != NULL) { return (os_tls->tid == get_sys_thread_id() || /* The child of a fork will initially come here */ os_tls->state.spill_space.dcontext->owning_process == get_parent_id()); } else return false; } #elif defined(AARCHXX) byte **dr_tls_base_addr; if (tls_global_type == TLS_TYPE_NONE) return false; dr_tls_base_addr = (byte **)get_dr_tls_base_addr(); if (dr_tls_base_addr == NULL || *dr_tls_base_addr == NULL || /* We use the TLS slot's value to identify a now-exited thread (i#1578) */ *dr_tls_base_addr == TLS_SLOT_VAL_EXITED) return false; /* We would like to ASSERT is_dynamo_address(*tls_swap_slot) but that leads * to infinite recursion for an address not in the vm_reserve area, as * dynamo_vm_areas_start_reading() ending up calling * deadlock_avoidance_unlock() which calls get_thread_private_dcontext() * which comes here. */ return true; #endif } #if defined(X86) || defined(DEBUG) static bool is_thread_tls_allocated(void) { # ifdef X86 if (INTERNAL_OPTION(safe_read_tls_init)) { /* We use this routine to allow currently-native threads, for which * is_thread_tls_initialized() (and thus is_thread_initialized()) will * return false. * Caution: this will also return true on a fresh clone child. */ uint magic; if (!first_thread_tls_initialized || last_thread_tls_exited) return false; magic = safe_read_tls_magic(); return magic == TLS_MAGIC_VALID || magic == TLS_MAGIC_INVALID; } # endif return is_thread_tls_initialized(); } #endif /* converts a local_state_t offset to a segment offset */ ushort os_tls_offset(ushort tls_offs) { /* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */ IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(TLS_LOCAL_STATE_OFFSET == 0); return (TLS_LOCAL_STATE_OFFSET + tls_offs); } /* XXX: Will return NULL if called before os_thread_init(), which sets * ostd->dr_fs/gs_base. */ void * os_get_priv_tls_base(dcontext_t *dcontext, reg_id_t reg) { os_thread_data_t *ostd; IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(reg == TLS_REG_ALT || reg == TLS_REG_LIB); if (dcontext == NULL) dcontext = get_thread_private_dcontext(); if (dcontext == NULL) return NULL; ostd = (os_thread_data_t *)dcontext->os_field; if (reg == TLS_REG_LIB) return ostd->priv_lib_tls_base; else if (reg == TLS_REG_ALT) return ostd->priv_alt_tls_base; ASSERT_NOT_REACHED(); return NULL; } os_local_state_t * get_os_tls(void) { os_local_state_t *os_tls; ASSERT(is_thread_tls_initialized()); READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls); return os_tls; } /* Obtain TLS from dcontext directly, which succeeds in pre-thread-init * situations where get_os_tls() fails. */ static os_local_state_t * get_os_tls_from_dc(dcontext_t *dcontext) { byte *local_state; ASSERT(dcontext != NULL); local_state = (byte*)dcontext->local_state; if (local_state == NULL) return NULL; return (os_local_state_t *)(local_state - offsetof(os_local_state_t, state)); } #ifdef AARCHXX bool os_set_app_tls_base(dcontext_t *dcontext, reg_id_t reg, void *base) { os_local_state_t *os_tls; IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT); if (dcontext == NULL) dcontext = get_thread_private_dcontext(); /* we will be called only if TLS is initialized */ ASSERT(dcontext != NULL); os_tls = get_os_tls_from_dc(dcontext); if (reg == TLS_REG_LIB) { os_tls->app_lib_tls_base = base; LOG(THREAD, LOG_THREADS, 1, "TLS app lib base ="PFX"\n", base); return true; } else if (reg == TLS_REG_ALT) { os_tls->app_alt_tls_base = base; LOG(THREAD, LOG_THREADS, 1, "TLS app alt base ="PFX"\n", base); return true; } ASSERT_NOT_REACHED(); return false; } #endif void * os_get_app_tls_base(dcontext_t *dcontext, reg_id_t reg) { os_local_state_t *os_tls; IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT); if (dcontext == NULL) dcontext = get_thread_private_dcontext(); if (dcontext == NULL) { /* No dcontext means we haven't initialized TLS, so we haven't replaced * the app's segments. get_segment_base is expensive, but this should * be rare. Re-examine if it pops up in a profile. */ return get_segment_base(reg); } os_tls = get_os_tls_from_dc(dcontext); if (reg == TLS_REG_LIB) return os_tls->app_lib_tls_base; else if (reg == TLS_REG_ALT) return os_tls->app_alt_tls_base; ASSERT_NOT_REACHED(); return NULL; } ushort os_get_app_tls_base_offset(reg_id_t reg) { IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(TLS_LOCAL_STATE_OFFSET == 0); if (reg == TLS_REG_LIB) return TLS_APP_LIB_TLS_BASE_OFFSET; else if (reg == TLS_REG_ALT) return TLS_APP_ALT_TLS_BASE_OFFSET; ASSERT_NOT_REACHED(); return 0; } #ifdef X86 ushort os_get_app_tls_reg_offset(reg_id_t reg) { IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); ASSERT(TLS_LOCAL_STATE_OFFSET == 0); if (reg == TLS_REG_LIB) return TLS_APP_LIB_TLS_REG_OFFSET; else if (reg == TLS_REG_ALT) return TLS_APP_ALT_TLS_REG_OFFSET; ASSERT_NOT_REACHED(); return 0; } #endif void * get_tls(ushort tls_offs) { void *val; READ_TLS_SLOT(tls_offs, val); return val; } void set_tls(ushort tls_offs, void *value) { WRITE_TLS_SLOT(tls_offs, value); } /* Returns POINTER_MAX on failure. * Assumes that cs, ss, ds, and es are flat. * Should we export this to clients? For now they can get * this information via opnd_compute_address(). */ byte * get_segment_base(uint seg) { #ifdef X86 if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES) return NULL; # ifdef HAVE_TLS return tls_get_fs_gs_segment_base(seg); # else return (byte *) POINTER_MAX; #endif /* HAVE_TLS */ #elif defined(AARCHXX) /* XXX i#1551: should we rename/refactor to avoid "segment"? */ return (byte *) read_thread_register(seg); #endif } /* i#572: handle opnd_compute_address to return the application * segment base value. */ byte * get_app_segment_base(uint seg) { #ifdef X86 if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES) return NULL; #endif /* X86 */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { return get_tls(os_get_app_tls_base_offset(seg)); } return get_segment_base(seg); } local_state_extended_t * get_local_state_extended() { os_local_state_t *os_tls; ASSERT(is_thread_tls_initialized()); READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls); return &(os_tls->state); } local_state_t * get_local_state() { #ifdef HAVE_TLS return (local_state_t *) get_local_state_extended(); #else return NULL; #endif } #ifdef DEBUG void os_enter_dynamorio(void) { # ifdef ARM /* i#1578: check that app's tls value doesn't match our sentinel */ ASSERT(*(byte **)get_dr_tls_base_addr() != TLS_SLOT_VAL_EXITED); # endif } #endif /* i#107: handle segment register usage conflicts between app and dr: * os_handle_mov_seg updates the app's tls selector maintained by DR. * It is called before entering code cache in dispatch_enter_fcache. */ void os_handle_mov_seg(dcontext_t *dcontext, byte *pc) { #ifdef X86 instr_t instr; opnd_t opnd; reg_id_t seg; ushort sel = 0; our_modify_ldt_t *desc; int desc_idx; os_local_state_t *os_tls; os_thread_data_t *ostd; instr_init(dcontext, &instr); decode_cti(dcontext, pc, &instr); /* the first instr must be mov seg */ ASSERT(instr_get_opcode(&instr) == OP_mov_seg); opnd = instr_get_dst(&instr, 0); ASSERT(opnd_is_reg(opnd)); seg = opnd_get_reg(opnd); ASSERT(reg_is_segment(seg)); ostd = (os_thread_data_t *)dcontext->os_field; desc = (our_modify_ldt_t *)ostd->app_thread_areas; os_tls = get_os_tls(); /* get the selector value */ opnd = instr_get_src(&instr, 0); if (opnd_is_reg(opnd)) { sel = (ushort)reg_get_value_priv(opnd_get_reg(opnd), get_mcontext(dcontext)); } else { void *ptr; ptr = (ushort *)opnd_compute_address_priv(opnd, get_mcontext(dcontext)); ASSERT(ptr != NULL); if (!safe_read(ptr, sizeof(sel), &sel)) { /* FIXME: if invalid address, should deliver a signal to user. */ ASSERT_NOT_IMPLEMENTED(false); } } /* calculate the entry_number */ desc_idx = SELECTOR_INDEX(sel) - tls_min_index(); if (seg == TLS_REG_LIB) { os_tls->app_lib_tls_reg = sel; os_tls->app_lib_tls_base = (void *)(ptr_uint_t) desc[desc_idx].base_addr; } else { os_tls->app_alt_tls_reg = sel; os_tls->app_alt_tls_base = (void *)(ptr_uint_t) desc[desc_idx].base_addr; } instr_free(dcontext, &instr); LOG(THREAD_GET, LOG_THREADS, 2, "thread "TIDFMT" segment change %s to selector 0x%x => " "app lib tls base: "PFX", alt tls base: "PFX"\n", get_thread_id(), reg_names[seg], sel, os_tls->app_lib_tls_base, os_tls->app_alt_tls_base); #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_REACHED(); #endif /* X86/ARM */ } /* Initialization for TLS mangling (-mangle_app_seg on x86). * Must be called before DR setup its own segment. */ static void os_tls_app_seg_init(os_local_state_t *os_tls, void *segment) { app_pc app_lib_tls_base, app_alt_tls_base; #ifdef X86 int i, index; our_modify_ldt_t *desc; os_tls->app_lib_tls_reg = read_thread_register(TLS_REG_LIB); os_tls->app_alt_tls_reg = read_thread_register(TLS_REG_ALT); #endif app_lib_tls_base = get_segment_base(TLS_REG_LIB); app_alt_tls_base = get_segment_base(TLS_REG_ALT); /* If we're a non-initial thread, tls will be set to the parent's value, * or to &uninit_tls (i#2089), both of which will be is_dynamo_address(). */ os_tls->app_lib_tls_base = is_dynamo_address(app_lib_tls_base) ? NULL : app_lib_tls_base; os_tls->app_alt_tls_base = is_dynamo_address(app_alt_tls_base) ? NULL : app_alt_tls_base; #ifdef X86 /* get all TLS thread area value */ /* XXX: is get_thread_area supported in 64-bit kernel? * It has syscall number 211. * It works for a 32-bit application running in a 64-bit kernel. * It returns error value -38 for a 64-bit app in a 64-bit kernel. */ desc = &os_tls->os_seg_info.app_thread_areas[0]; tls_initialize_indices(os_tls); index = tls_min_index(); for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) { tls_get_descriptor(i + index, &desc[i]); } #endif /* X86 */ os_tls->os_seg_info.dr_tls_base = segment; os_tls->os_seg_info.priv_alt_tls_base = IF_X86_ELSE(segment, NULL); /* now allocate the tls segment for client libraries */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { os_tls->os_seg_info.priv_lib_tls_base = IF_UNIT_TEST_ELSE(os_tls->app_lib_tls_base, privload_tls_init(os_tls->app_lib_tls_base)); } #ifdef X86 LOG(THREAD_GET, LOG_THREADS, 1, "thread "TIDFMT" app lib tls reg: 0x%x, alt tls reg: 0x%x\n", get_thread_id(), os_tls->app_lib_tls_reg, os_tls->app_alt_tls_reg); #endif LOG(THREAD_GET, LOG_THREADS, 1, "thread "TIDFMT" app lib tls base: "PFX", alt tls base: "PFX"\n", get_thread_id(), os_tls->app_lib_tls_base, os_tls->app_alt_tls_base); LOG(THREAD_GET, LOG_THREADS, 1, "thread "TIDFMT" priv lib tls base: "PFX", alt tls base: "PFX", " "DR's tls base: "PFX"\n", get_thread_id(), os_tls->os_seg_info.priv_lib_tls_base, os_tls->os_seg_info.priv_alt_tls_base, os_tls->os_seg_info.dr_tls_base); } void os_tls_init(void) { #ifdef X86 ASSERT(TLS_MAGIC_OFFSET_ASM == TLS_MAGIC_OFFSET); ASSERT(TLS_SELF_OFFSET_ASM == TLS_SELF_OFFSET); #endif #ifdef HAVE_TLS /* We create a 1-page segment with an LDT entry for each thread and load its * selector into fs/gs. * FIXME PR 205276: this whole scheme currently does not check if app is using * segments need to watch modify_ldt syscall */ /* FIXME: heap_mmap marks as exec, we just want RW */ byte *segment = heap_mmap(PAGE_SIZE); os_local_state_t *os_tls = (os_local_state_t *) segment; LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init for thread "TIDFMT"\n", get_thread_id()); ASSERT(!is_thread_tls_initialized()); /* MUST zero out dcontext slot so uninit access gets NULL */ memset(segment, 0, PAGE_SIZE); /* store key data in the tls itself */ os_tls->self = os_tls; os_tls->tid = get_sys_thread_id(); os_tls->tls_type = TLS_TYPE_NONE; #ifdef X86 os_tls->magic = TLS_MAGIC_VALID; #endif /* We save DR's TLS segment base here so that os_get_dr_tls_base() will work * even when -no_mangle_app_seg is set. If -mangle_app_seg is set, this * will be overwritten in os_tls_app_seg_init(). */ os_tls->os_seg_info.dr_tls_base = segment; ASSERT(proc_is_cache_aligned(os_tls->self + TLS_LOCAL_STATE_OFFSET)); /* Verify that local_state_extended_t should indeed be used. */ ASSERT(DYNAMO_OPTION(ibl_table_in_tls)); /* initialize DR TLS seg base before replacing app's TLS in tls_thread_init */ if (MACHINE_TLS_IS_DR_TLS) os_tls_app_seg_init(os_tls, segment); tls_thread_init(os_tls, segment); ASSERT(os_tls->tls_type != TLS_TYPE_NONE); /* store type in global var for convenience: should be same for all threads */ tls_global_type = os_tls->tls_type; /* FIXME: this should be a SYSLOG fatal error? Should fall back on !HAVE_TLS? * Should have create_ldt_entry() return failure instead of asserting, then. */ #else tls_table = (tls_slot_t *) global_heap_alloc(MAX_THREADS*sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER)); memset(tls_table, 0, MAX_THREADS*sizeof(tls_slot_t)); #endif if (!first_thread_tls_initialized) { first_thread_tls_initialized = true; if (last_thread_tls_exited) /* re-attach */ last_thread_tls_exited = false; } ASSERT(is_thread_tls_initialized()); } static bool should_zero_tls_at_thread_exit() { #ifdef X86 /* i#2089: For a thread w/o CLONE_SIGHAND we cannot handle a fault, so we want to * leave &uninit_tls (which was put in place in os_thread_exit()) as long as * possible. For non-detach, that means until the exit. */ return !INTERNAL_OPTION(safe_read_tls_init) || doing_detach; #else return true; #endif } /* TLS exit for the current thread who must own local_state. */ void os_tls_thread_exit(local_state_t *local_state) { #ifdef HAVE_TLS /* We assume (assert below) that local_state_t's start == local_state_extended_t */ os_local_state_t *os_tls = (os_local_state_t *) (((byte*)local_state) - offsetof(os_local_state_t, state)); tls_type_t tls_type = os_tls->tls_type; int index = os_tls->ldt_index; ASSERT(offsetof(local_state_t, spill_space) == offsetof(local_state_extended_t, spill_space)); if (should_zero_tls_at_thread_exit()) { tls_thread_free(tls_type, index); # if defined(X86) && defined(X64) if (tls_type == TLS_TYPE_ARCH_PRCTL) { /* syscall re-sets gs register so re-clear it */ if (read_thread_register(SEG_TLS) != 0) { static const ptr_uint_t zero = 0; WRITE_DR_SEG(zero); /* macro needs lvalue! */ } } # endif } /* We already set TLS to &uninit_tls in os_thread_exit() */ if (dynamo_exited && !last_thread_tls_exited) { last_thread_tls_exited = true; first_thread_tls_initialized = false; /* for possible re-attach */ } #endif } /* Frees local_state. If the calling thread is exiting (i.e., * !other_thread) then also frees kernel resources for the calling * thread; if other_thread then that may not be possible. */ void os_tls_exit(local_state_t *local_state, bool other_thread) { #ifdef HAVE_TLS # ifdef X86 static const ptr_uint_t zero = 0; # endif /* X86 */ /* We can't read from fs: as we can be called from other threads */ /* ASSUMPTION: local_state_t is laid out at same start as local_state_extended_t */ os_local_state_t *os_tls = (os_local_state_t *) (((byte*)local_state) - offsetof(os_local_state_t, state)); # ifdef X86 /* If the MSR is in use, writing to the reg faults. We rely on it being 0 * to indicate that. */ if (!other_thread && read_thread_register(SEG_TLS) != 0 && should_zero_tls_at_thread_exit()) { WRITE_DR_SEG(zero); /* macro needs lvalue! */ } # endif /* X86 */ /* For another thread we can't really make these syscalls so we have to * leave it un-cleaned-up. That's fine if the other thread is exiting: * but for detach (i#95) we get the other thread to run this code. */ if (!other_thread) os_tls_thread_exit(local_state); /* We can't free prior to tls_thread_free() in case that routine refs os_tls */ heap_munmap(os_tls->self, PAGE_SIZE); #else global_heap_free(tls_table, MAX_THREADS*sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER)); DELETE_LOCK(tls_lock); #endif } static int os_tls_get_gdt_index(dcontext_t *dcontext) { os_local_state_t *os_tls = (os_local_state_t *) (((byte*)dcontext->local_state) - offsetof(os_local_state_t, state)); if (os_tls->tls_type == TLS_TYPE_GDT) return os_tls->ldt_index; else return -1; } void os_tls_pre_init(int gdt_index) { #ifdef X86 /* Only set to above 0 for tls_type == TLS_TYPE_GDT */ if (gdt_index > 0) { /* PR 458917: clear gdt slot to avoid leak across exec */ DEBUG_DECLARE(bool ok;) static const ptr_uint_t zero = 0; /* Be sure to clear the selector before anything that might * call get_thread_private_dcontext() */ WRITE_DR_SEG(zero); /* macro needs lvalue! */ DEBUG_DECLARE(ok = ) tls_clear_descriptor(gdt_index); ASSERT(ok); } #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ } #ifdef CLIENT_INTERFACE /* Allocates num_slots tls slots aligned with alignment align */ bool os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment) { bool res = false; uint i, count = 0; int start = -1; uint offs = offsetof(os_local_state_t, client_tls); if (num_slots == 0 || num_slots > MAX_NUM_CLIENT_TLS) return false; mutex_lock(&client_tls_lock); for (i = 0; i < MAX_NUM_CLIENT_TLS; i++) { if (!client_tls_allocated[i] && /* ALIGNED doesn't work for 0 */ (alignment == 0 || ALIGNED(offs + i*sizeof(void*), alignment))) { if (start == -1) start = i; count++; if (count >= num_slots) break; } else { start = -1; count = 0; } } if (count >= num_slots) { for (i = 0; i < num_slots; i++) client_tls_allocated[i + start] = true; *offset = offs + start*sizeof(void*); res = true; } mutex_unlock(&client_tls_lock); return res; } bool os_tls_cfree(uint offset, uint num_slots) { uint i; uint offs = (offset - offsetof(os_local_state_t, client_tls))/sizeof(void*); bool ok = true; mutex_lock(&client_tls_lock); for (i = 0; i < num_slots; i++) { if (!client_tls_allocated[i + offs]) ok = false; client_tls_allocated[i + offs] = false; } mutex_unlock(&client_tls_lock); return ok; } #endif void os_thread_init(dcontext_t *dcontext) { os_local_state_t *os_tls = get_os_tls(); os_thread_data_t *ostd = (os_thread_data_t *) heap_alloc(dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER)); dcontext->os_field = (void *) ostd; /* make sure stack fields, etc. are 0 now so they can be initialized on demand * (don't have app esp register handy here to init now) */ memset(ostd, 0, sizeof(*ostd)); ksynch_init_var(&ostd->suspended); ksynch_init_var(&ostd->wakeup); ksynch_init_var(&ostd->resumed); ksynch_init_var(&ostd->terminated); ksynch_init_var(&ostd->detached); #ifdef RETURN_AFTER_CALL /* We only need the stack bottom for the initial thread, and due to thread * init now preceding vm_areas_init(), we initialize in find_executable_vm_areas() */ ostd->stack_bottom_pc = NULL; #endif ASSIGN_INIT_LOCK_FREE(ostd->suspend_lock, suspend_lock); signal_thread_init(dcontext); /* i#107, initialize thread area information, * the value was first get in os_tls_init and stored in os_tls */ ostd->priv_lib_tls_base = os_tls->os_seg_info.priv_lib_tls_base; ostd->priv_alt_tls_base = os_tls->os_seg_info.priv_alt_tls_base; ostd->dr_tls_base = os_tls->os_seg_info.dr_tls_base; LOG(THREAD, LOG_THREADS, 1, "TLS app lib base ="PFX"\n", os_tls->app_lib_tls_base); LOG(THREAD, LOG_THREADS, 1, "TLS app alt base ="PFX"\n", os_tls->app_alt_tls_base); LOG(THREAD, LOG_THREADS, 1, "TLS priv lib base ="PFX"\n", ostd->priv_lib_tls_base); LOG(THREAD, LOG_THREADS, 1, "TLS priv alt base ="PFX"\n", ostd->priv_alt_tls_base); LOG(THREAD, LOG_THREADS, 1, "TLS DynamoRIO base="PFX"\n", ostd->dr_tls_base); #ifdef X86 if (INTERNAL_OPTION(mangle_app_seg)) { ostd->app_thread_areas = heap_alloc(dcontext, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER)); memcpy(ostd->app_thread_areas, os_tls->os_seg_info.app_thread_areas, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS); } #endif LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is "PFX"\n", IF_X86_ELSE("gs", "tpidruro"), get_segment_base(IF_X86_ELSE(SEG_GS, DR_REG_TPIDRURO))); LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is "PFX"\n", IF_X86_ELSE("fs", "tpidrurw"), get_segment_base(IF_X86_ELSE(SEG_FS, DR_REG_TPIDRURW))); #ifdef MACOS /* XXX: do we need to free/close dcontext->thread_port? I don't think so. */ dcontext->thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0); LOG(THREAD, LOG_ALL, 1, "Mach thread port: %d\n", dcontext->thread_port); #endif } void os_thread_exit(dcontext_t *dcontext, bool other_thread) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; /* i#237/PR 498284: if we had a vfork child call execve we need to clean up * the env vars. */ if (dcontext->thread_record->execve) handle_execve_post(dcontext); DELETE_LOCK(ostd->suspend_lock); signal_thread_exit(dcontext, other_thread); ksynch_free_var(&ostd->suspended); ksynch_free_var(&ostd->wakeup); ksynch_free_var(&ostd->resumed); ksynch_free_var(&ostd->terminated); ksynch_free_var(&ostd->detached); #ifdef X86 if (ostd->clone_tls != NULL) { if (!other_thread) { /* Avoid faults in is_thread_tls_initialized() */ /* FIXME i#2088: we need to restore the app's aux seg, if any, instead. */ os_set_dr_tls_base(dcontext, NULL, (byte *)&uninit_tls); } DODEBUG({ HEAP_TYPE_FREE(dcontext, ostd->clone_tls, os_local_state_t, ACCT_THREAD_MGT, UNPROTECTED); }); } #endif /* for non-debug we do fast exit path and don't free local heap */ DODEBUG({ if (MACHINE_TLS_IS_DR_TLS) { #ifdef X86 heap_free(dcontext, ostd->app_thread_areas, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER)); #endif #ifdef CLIENT_INTERFACE if (INTERNAL_OPTION(private_loader)) privload_tls_exit(IF_UNIT_TEST_ELSE(NULL, ostd->priv_lib_tls_base)); #endif } heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER)); }); } /* Happens in the parent prior to fork. */ static void os_fork_pre(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; /* Otherwise a thread might wait for us. */ ASSERT_OWN_NO_LOCKS(); ASSERT(ostd->fork_threads == NULL && ostd->fork_num_threads == 0); /* i#239: Synch with all other threads to ensure that they are holding no * locks across the fork. * FIXME i#26: Suspend signals received before initializing siginfo are * squelched, so we won't be able to suspend threads that are initializing. */ LOG(GLOBAL, 2, LOG_SYSCALLS|LOG_THREADS, "fork: synching with other threads to prevent deadlock in child\n"); if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER, &ostd->fork_threads, &ostd->fork_num_threads, THREAD_SYNCH_VALID_MCONTEXT, /* If we fail to suspend a thread, there is a * risk of deadlock in the child, so it's worth * retrying on failure. */ THREAD_SYNCH_SUSPEND_FAILURE_RETRY)) { /* If we failed to synch with all threads, we live with the possiblity * of deadlock and continue as normal. */ LOG(GLOBAL, 1, LOG_SYSCALLS|LOG_THREADS, "fork: synch failed, possible deadlock in child\n"); ASSERT_CURIOSITY(false); } /* We go back to the code cache to execute the syscall, so we can't hold * locks. If the synch succeeded, no one else is running, so it should be * safe to release these locks. However, if there are any rogue threads, * then releasing these locks will allow them to synch and create threads. * Such threads could be running due to synch failure or presence of * non-suspendable client threads. We keep our data in ostd to prevent some * conflicts, but there are some unhandled corner cases. */ mutex_unlock(&thread_initexit_lock); mutex_unlock(&all_threads_synch_lock); } /* Happens after the fork in both the parent and child. */ static void os_fork_post(dcontext_t *dcontext, bool parent) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; /* Re-acquire the locks we released before the fork. */ mutex_lock(&all_threads_synch_lock); mutex_lock(&thread_initexit_lock); /* Resume the other threads that we suspended. */ if (parent) { LOG(GLOBAL, 2, LOG_SYSCALLS|LOG_THREADS, "fork: resuming other threads after fork\n"); } end_synch_with_all_threads(ostd->fork_threads, ostd->fork_num_threads, parent/*resume in parent, not in child*/); ostd->fork_threads = NULL; /* Freed by end_synch_with_all_threads. */ ostd->fork_num_threads = 0; } /* this one is called before child's new logfiles are set up */ void os_fork_init(dcontext_t *dcontext) { int iter; /* We use a larger data size than file_t to avoid clobbering our stack (i#991) */ ptr_uint_t fd; ptr_uint_t flags; /* Static assert would save debug build overhead: could use array bound trick */ ASSERT(sizeof(file_t) <= sizeof(ptr_uint_t)); /* i#239: If there were unsuspended threads across the fork, we could have * forked while another thread held locks. We reset the locks and try to * cope with any intermediate state left behind from the parent. If we * encounter more deadlocks after fork, we can add more lock and data resets * on a case by case basis. */ mutex_fork_reset(&all_threads_synch_lock); mutex_fork_reset(&thread_initexit_lock); os_fork_post(dcontext, false/*!parent*/); /* re-populate cached data that contains pid */ pid_cached = get_process_id(); get_application_pid_helper(true); get_application_name_helper(true, true /* not important */); /* close all copies of parent files */ TABLE_RWLOCK(fd_table, write, lock); iter = 0; do { iter = generic_hash_iterate_next(GLOBAL_DCONTEXT, fd_table, iter, &fd, (void **)&flags); if (iter < 0) break; if (TEST(OS_OPEN_CLOSE_ON_FORK, flags)) { close_syscall((file_t)fd); iter = generic_hash_iterate_remove(GLOBAL_DCONTEXT, fd_table, iter, fd); } } while (true); TABLE_RWLOCK(fd_table, write, unlock); } static void os_swap_dr_tls(dcontext_t *dcontext, bool to_app) { #ifdef X86 /* If the option is off, we really should swap it (xref i#107/i#2088 comments * in os_swap_context()) but there are few consequences of not doing it, and we * have no code set up separate from the i#2089 scheme here. */ if (!INTERNAL_OPTION(safe_read_tls_init)) return; if (to_app) { /* i#2089: we want the child to inherit a TLS with invalid .magic, but we * need our own syscall execution and post-syscall code to have valid scratch * and dcontext values. We can't clear our own magic b/c we don't know when * the child will be scheduled, so we use a copy of our TLS. We carefully * never have a valid magic there in case a prior child is still unscheduled. * * We assume the child will not modify this TLS copy in any way. * CLONE_SETTLS touc * hes the other segment (we'll have to watch for * addition of CLONE_SETTLS_AUX). The parent will use the scratch space * returning from the syscall to dispatch, but we restore via os_clone_post() * immediately before anybody calls get_thread_private_dcontext() or * anything. */ /* FIXME i#2088: to preserve the app's aux seg, if any, we should pass it * and the seg reg value via the clone record (like we do for ARM today). */ os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field; os_local_state_t *cur_tls = get_os_tls_from_dc(dcontext); if (ostd->clone_tls == NULL) { ostd->clone_tls = (os_local_state_t *) HEAP_TYPE_ALLOC(dcontext, os_local_state_t, ACCT_THREAD_MGT, UNPROTECTED); LOG(THREAD, LOG_THREADS, 2, "TLS copy is "PFX"\n", ostd->clone_tls); } /* Leave no window where a prior uninit child could read valid magic by * invalidating prior to copying. */ cur_tls->magic = TLS_MAGIC_INVALID; memcpy(ostd->clone_tls, cur_tls, sizeof(*ostd->clone_tls)); cur_tls->magic = TLS_MAGIC_VALID; ostd->clone_tls->self = ostd->clone_tls; os_set_dr_tls_base(dcontext, NULL, (byte *)ostd->clone_tls); } else { /* i#2089: restore the parent's DR TLS */ os_local_state_t *real_tls = get_os_tls_from_dc(dcontext); /* For dr_app_start we can end up here with nothing to do, so we check. */ if (get_segment_base(SEG_TLS) != (byte *)real_tls) { DEBUG_DECLARE(os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field); ASSERT(get_segment_base(SEG_TLS) == (byte *)ostd->clone_tls); /* We assume there's no need to copy the scratch slots back */ os_set_dr_tls_base(dcontext, real_tls, (byte *)real_tls); } } #endif } static void os_clone_pre(dcontext_t *dcontext) { /* We switch the lib tls segment back to app's segment. * Please refer to comment on os_switch_lib_tls. */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { os_switch_lib_tls(dcontext, true/*to app*/); } os_swap_dr_tls(dcontext, true/*to app*/); } /* This is called from dispatch prior to post_system_call() */ void os_clone_post(dcontext_t *dcontext) { os_swap_dr_tls(dcontext, false/*to DR*/); } byte * os_get_dr_tls_base(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; return ostd->dr_tls_base; } /* We only bother swapping the library segment if we're using the private * loader. */ bool os_should_swap_state(void) { #ifdef X86 /* -private_loader currently implies -mangle_app_seg, but let's be safe. */ return (INTERNAL_OPTION(mangle_app_seg) && IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)); #elif defined(AARCHXX) /* FIXME i#1582: this should return true, but there is a lot of complexity * getting os_switch_seg_to_context() to do the right then when called * at main thread init, secondary thread init, early and late injection, * and thread exit, since it is fragile with its writes to app TLS. */ return false; #endif } bool os_using_app_state(dcontext_t *dcontext) { #ifdef X86 /* FIXME: This could be optimized to avoid the syscall by keeping state in * the dcontext. */ if (INTERNAL_OPTION(mangle_app_seg)) { return (get_segment_base(TLS_REG_LIB) == os_get_app_tls_base(dcontext, TLS_REG_LIB)); } #endif /* We're always in the app state if we're not mangling. */ return true; } /* Similar to PEB swapping on Windows, this call will switch between DR's * private lib segment base and the app's segment base. * i#107/i#2088: If the app wants to use SEG_TLS, we should also switch that back at * this boundary, but there are many places where we simply assume it is always * installed. */ void os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags) { if (os_should_swap_state()) os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app); if (TEST(DR_STATE_DR_TLS, flags)) os_swap_dr_tls(dcontext, to_app); } void os_swap_context_go_native(dcontext_t *dcontext, dr_state_flags_t flags) { #ifdef AARCHXX /* FIXME i#1582: remove this routine once os_should_swap_state() * is not disabled and we can actually call * os_swap_context_go_native() safely from multiple places. */ os_switch_seg_to_context(dcontext, LIB_SEG_TLS, true/*to app*/); #else os_swap_context(dcontext, true/*to app*/, flags); #endif } void os_thread_under_dynamo(dcontext_t *dcontext) { os_swap_context(dcontext, false/*to dr*/, DR_STATE_GO_NATIVE); start_itimer(dcontext); } void os_thread_not_under_dynamo(dcontext_t *dcontext) { stop_itimer(dcontext); os_swap_context(dcontext, true/*to app*/, DR_STATE_GO_NATIVE); } void os_process_under_dynamorio_initiate(dcontext_t *dcontext) { LOG(GLOBAL, LOG_THREADS, 1, "process now under DR\n"); /* We only support regular process-wide signal handlers for delayed takeover. */ /* i#2161: we ignore alarm signals during the attach process to avoid races. */ signal_reinstate_handlers(dcontext, true/*ignore alarm*/); hook_vsyscall(dcontext, false); } void os_process_under_dynamorio_complete(dcontext_t *dcontext) { /* i#2161: only now do we un-ignore alarm signals. */ signal_reinstate_alarm_handlers(dcontext); } void os_process_not_under_dynamorio(dcontext_t *dcontext) { /* We only support regular process-wide signal handlers for mixed-mode control. */ signal_remove_handlers(dcontext); unhook_vsyscall(); LOG(GLOBAL, LOG_THREADS, 1, "process no longer under DR\n"); } bool detach_do_not_translate(thread_record_t *tr) { return false; } void detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc) { /* Nothing to do. */ } void detach_finalize_cleanup(void) { /* Nothing to do. */ } static pid_t get_process_group_id() { return dynamorio_syscall(SYS_getpgid, 0); } #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ process_id_t get_process_id() { return dynamorio_syscall(SYS_getpid, 0); } #ifndef NOT_DYNAMORIO_CORE_PROPER /* around most of file, to exclude preload */ process_id_t get_parent_id(void) { return dynamorio_syscall(SYS_getppid, 0); } thread_id_t get_sys_thread_id(void) { #ifdef MACOS if (kernel_thread_groups) return dynamorio_syscall(SYS_thread_selfid, 0); #else if (kernel_thread_groups) return dynamorio_syscall(SYS_gettid, 0); #endif return dynamorio_syscall(SYS_getpid, 0); } thread_id_t get_thread_id(void) { /* i#228/PR 494330: making a syscall here is a perf bottleneck since we call * this routine in read and recursive locks so use the TLS value instead */ thread_id_t id = get_tls_thread_id(); if (id != INVALID_THREAD_ID) return id; else return get_sys_thread_id(); } thread_id_t get_tls_thread_id(void) { ptr_int_t tid; /* can't use thread_id_t since it's 32-bits */ if (!is_thread_tls_initialized()) return INVALID_THREAD_ID; READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, tid); /* it reads 8-bytes into the memory, which includes app_gs and app_fs. * 0x000000007127357b <get_tls_thread_id+37>: mov %gs:(%rax),%rax * 0x000000007127357f <get_tls_thread_id+41>: mov %rax,-0x8(%rbp) * so we remove the TRUNCATE check and trucate it on return. */ return (thread_id_t) tid; } /* returns the thread-private dcontext pointer for the calling thread */ dcontext_t* get_thread_private_dcontext(void) { #ifdef HAVE_TLS dcontext_t *dcontext; /* We have to check this b/c this is called from __errno_location prior * to os_tls_init, as well as after os_tls_exit, and early in a new * thread's initialization (see comments below on that). */ if (!is_thread_tls_initialized()) return (IF_CLIENT_INTERFACE(standalone_library ? GLOBAL_DCONTEXT :) NULL); /* We used to check tid and return NULL to distinguish parent from child, but * that was affecting performance (xref PR 207366: but I'm leaving the assert in * for now so debug build will still incur it). So we fixed the cases that * needed that: * * - dynamo_thread_init() calling is_thread_initialized() for a new thread * created via clone or the start/stop interface: so we have * is_thread_initialized() pay the get_thread_id() cost. * - new_thread_setup()'s ENTER_DR_HOOK kstats, or a crash and the signal * handler asking about dcontext: we have new_thread_dynamo_start() * clear the segment register for us early on. * - child of fork (ASSERT_OWN_NO_LOCKS, etc. on re-entering DR): * here we just suppress the assert: we'll use this same dcontext. * xref PR 209518 where w/o this fix we used to need an extra KSTOP. * * An alternative would be to have the parent thread clear the segment * register, or even set up the child's TLS ahead of time ourselves * (and special-case so that we know if at clone syscall the app state is not * quite correct: but we're already stealing a register there: PR 286194). * We could also have the kernel set up TLS for us (PR 285898). * * For hotp_only or non-full-control (native_exec, e.g.) (PR 212012), this * routine is not the only issue: we have to catch all new threads since * hotp_only gateways assume tls is set up. * Xref PR 192231. */ /* PR 307698: this assert causes large slowdowns (also xref PR 207366) */ DOCHECK(CHKLVL_DEFAULT+1, { ASSERT(get_tls_thread_id() == get_sys_thread_id() || /* ok for fork as mentioned above */ pid_cached != get_process_id()); }); READ_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext); return dcontext; #else /* Assumption: no lock needed on a read => no race conditions between * reading and writing same tid! Since both get and set are only for * the current thread, they cannot both execute simultaneously for the * same tid, right? */ thread_id_t tid = get_thread_id(); int i; if (tls_table != NULL) { for (i=0; i<MAX_THREADS; i++) { if (tls_table[i].tid == tid) { return tls_table[i].dcontext; } } } return NULL; #endif } /* sets the thread-private dcontext pointer for the calling thread */ void set_thread_private_dcontext(dcontext_t *dcontext) { #ifdef HAVE_TLS ASSERT(is_thread_tls_allocated()); WRITE_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext); #else thread_id_t tid = get_thread_id(); int i; bool found = false; ASSERT(tls_table != NULL); mutex_lock(&tls_lock); for (i=0; i<MAX_THREADS; i++) { if (tls_table[i].tid == tid) { if (dcontext == NULL) { /* if setting to NULL, clear the entire slot for reuse */ tls_table[i].tid = 0; } tls_table[i].dcontext = dcontext; found = true; break; } } if (!found) { if (dcontext == NULL) { /* don't do anything...but why would this happen? */ } else { /* look for an empty slot */ for (i=0; i<MAX_THREADS; i++) { if (tls_table[i].tid == 0) { tls_table[i].tid = tid; tls_table[i].dcontext = dcontext; found = true; break; } } } } mutex_unlock(&tls_lock); ASSERT(found); #endif } /* replaces old with new * use for forking: child should replace parent's id with its own */ static void replace_thread_id(thread_id_t old, thread_id_t new) { #ifdef HAVE_TLS thread_id_t new_tid = new; ASSERT(is_thread_tls_initialized()); DOCHECK(1, { thread_id_t old_tid; READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid); ASSERT(old_tid == old); }); WRITE_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid); #else int i; mutex_lock(&tls_lock); for (i=0; i<MAX_THREADS; i++) { if (tls_table[i].tid == old) { tls_table[i].tid = new; break; } } mutex_unlock(&tls_lock); #endif } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ /* translate permission string to platform independent protection bits */ uint permstr_to_memprot(const char * const perm) { uint mem_prot = 0; if (perm == NULL || *perm == '\0') return mem_prot; if (perm[2]=='x') mem_prot |= MEMPROT_EXEC; if (perm[1]=='w') mem_prot |= MEMPROT_WRITE; if (perm[0]=='r') mem_prot |= MEMPROT_READ; return mem_prot; } /* translate platform independent protection bits to native flags */ uint memprot_to_osprot(uint prot) { uint mmap_prot = 0; if (TEST(MEMPROT_EXEC, prot)) mmap_prot |= PROT_EXEC; if (TEST(MEMPROT_READ, prot)) mmap_prot |= PROT_READ; if (TEST(MEMPROT_WRITE, prot)) mmap_prot |= PROT_WRITE; return mmap_prot; } #ifndef NOT_DYNAMORIO_CORE_PROPER /* translate native flags to platform independent protection bits */ static inline uint osprot_to_memprot(uint prot) { uint mem_prot = 0; if (TEST(PROT_EXEC, prot)) mem_prot |= MEMPROT_EXEC; if (TEST(PROT_READ, prot)) mem_prot |= MEMPROT_READ; if (TEST(PROT_WRITE, prot)) mem_prot |= MEMPROT_WRITE; return mem_prot; } #endif /* returns osprot flags preserving all native protection flags except * for RWX, which are replaced according to memprot */ uint osprot_replace_memprot(uint old_osprot, uint memprot) { /* Note only protection flags PROT_ are relevant to mprotect() * and they are separate from any other MAP_ flags passed to mmap() */ uint new_osprot = memprot_to_osprot(memprot); return new_osprot; } /* libc independence */ static inline long mprotect_syscall(byte *p, size_t size, uint prot) { return dynamorio_syscall(SYS_mprotect, 3, p, size, prot); } bool mmap_syscall_succeeded(byte *retval) { ptr_int_t result = (ptr_int_t) retval; /* libc interprets up to -PAGE_SIZE as an error, and you never know if * some weird errno will be used by say vmkernel (xref PR 365331) */ bool fail = (result < 0 && result >= -PAGE_SIZE); ASSERT_CURIOSITY(!fail || IF_VMX86(result == -ENOENT ||) IF_VMX86(result == -ENOSPC ||) result == -EBADF || result == -EACCES || result == -EINVAL || result == -ETXTBSY || result == -EAGAIN || result == -ENOMEM || result == -ENODEV || result == -EFAULT || result == -EPERM); return !fail; } /* N.B.: offs should be in pages for 32-bit Linux */ static inline byte * mmap_syscall(byte *addr, size_t len, ulong prot, ulong flags, ulong fd, ulong offs) { #if defined(MACOS) && !defined(X64) return (byte *)(ptr_int_t) dynamorio_syscall(SYS_mmap, 7, addr, len, prot, flags, fd, /* represent 64-bit arg as 2 32-bit args */ offs, 0); #else return (byte *)(ptr_int_t) dynamorio_syscall(IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)), 6, addr, len, prot, flags, fd, offs); #endif } static inline long munmap_syscall(byte *addr, size_t len) { return dynamorio_syscall(SYS_munmap, 2, addr, len); } #ifndef NOT_DYNAMORIO_CORE_PROPER /* free memory allocated from os_raw_mem_alloc */ bool os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code) { long rc; ASSERT(error_code != NULL); ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); rc = munmap_syscall(p, size); if (rc != 0) { *error_code = -rc; } else { *error_code = HEAP_ERROR_SUCCESS; } return (rc == 0); } /* try to alloc memory at preferred from os directly, * caller is required to handle thread synchronization and to update */ void * os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags, heap_error_code_t *error_code) { byte *p; uint os_prot = memprot_to_osprot(prot); uint os_flags = MAP_PRIVATE | MAP_ANONYMOUS | (TEST(RAW_ALLOC_32BIT, flags) ? MAP_32BIT : 0); ASSERT(error_code != NULL); /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); p = mmap_syscall(preferred, size, os_prot, os_flags, -1, 0); if (!mmap_syscall_succeeded(p)) { *error_code = -(heap_error_code_t)(ptr_int_t)p; LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed"PFX"\n", size, p); return NULL; } if (preferred != NULL && p != preferred) { *error_code = HEAP_ERROR_NOT_AT_PREFERRED; os_raw_mem_free(p, size, flags, error_code); LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed"PFX"\n", size, p); return NULL; } LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: "SZFMT" bytes @ "PFX"\n", size, p); return p; } #ifdef LINUX void init_emulated_brk(app_pc exe_end) { ASSERT(DYNAMO_OPTION(emulate_brk)); if (app_brk_map != NULL) return; /* i#1004: emulate brk via a separate mmap. * The real brk starts out empty, but we need at least a page to have an * mmap placeholder. */ app_brk_map = mmap_syscall(exe_end, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); ASSERT(mmap_syscall_succeeded(app_brk_map)); app_brk_cur = app_brk_map; app_brk_end = app_brk_map + PAGE_SIZE; } static byte * emulate_app_brk(dcontext_t *dcontext, byte *new_val) { byte *old_brk = app_brk_cur; ASSERT(DYNAMO_OPTION(emulate_brk)); LOG(THREAD, LOG_HEAP, 2, "%s: cur="PFX", requested="PFX"\n", __FUNCTION__, app_brk_cur, new_val); new_val = (byte *) ALIGN_FORWARD(new_val, PAGE_SIZE); if (new_val == NULL || new_val == app_brk_cur || /* Not allowed to shrink below original base */ new_val < app_brk_map) { /* Just return cur val */ } else if (new_val < app_brk_cur) { /* Shrink */ if (munmap_syscall(new_val, app_brk_cur - new_val) == 0) { app_brk_cur = new_val; app_brk_end = new_val; } } else if (new_val < app_brk_end) { /* We've already allocated the space */ app_brk_cur = new_val; } else { /* Expand */ byte *remap = (byte *) dynamorio_syscall(SYS_mremap, 4, app_brk_map, app_brk_end - app_brk_map, new_val - app_brk_map, 0/*do not move*/); if (mmap_syscall_succeeded(remap)) { ASSERT(remap == app_brk_map); app_brk_cur = new_val; app_brk_end = new_val; } else { LOG(THREAD, LOG_HEAP, 1, "%s: mremap to "PFX" failed\n", __FUNCTION__, new_val); } } if (app_brk_cur != old_brk) handle_app_brk(dcontext, app_brk_map, old_brk, app_brk_cur); return app_brk_cur; } #endif /* LINUX */ #if defined(CLIENT_INTERFACE) && defined(LINUX) DR_API /* XXX: could add dr_raw_mem_realloc() instead of dr_raw_mremap() -- though there * is no realloc for Windows: supposed to reserve yourself and then commit in * pieces. */ void * dr_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags, void *new_address) { byte *res; dr_mem_info_t info; dcontext_t *dcontext = get_thread_private_dcontext(); /* i#173: we need prot + type from prior to mremap */ DEBUG_DECLARE(bool ok =) query_memory_ex(old_address, &info); /* XXX: this could be a large region w/ multiple protection regions * inside. For now we assume our handling of it doesn't care. */ ASSERT(ok); if (is_pretend_or_executable_writable(old_address)) info.prot |= DR_MEMPROT_WRITE; /* we just unconditionally send the 5th param */ res = (byte *) dynamorio_syscall(SYS_mremap, 5, old_address, old_size, new_size, flags, new_address); handle_app_mremap(dcontext, res, new_size, old_address, old_size, info.prot, info.size); return res; } DR_API void * dr_raw_brk(void *new_address) { dcontext_t *dcontext = get_thread_private_dcontext(); if (DYNAMO_OPTION(emulate_brk)) { /* i#1004: emulate brk via a separate mmap */ return (void *) emulate_app_brk(dcontext, (byte *)new_address); } else { /* We pay the cost of 2 syscalls. This should be infrequent enough that * it doesn't mater. */ if (new_address == NULL) { /* Just a query */ return (void *) dynamorio_syscall(SYS_brk, 1, new_address); } else { byte *old_brk = (byte *) dynamorio_syscall(SYS_brk, 1, 0); byte *res = (byte *) dynamorio_syscall(SYS_brk, 1, new_address); handle_app_brk(dcontext, NULL, old_brk, res); return res; } } } #endif /* CLIENT_INTERFACE && LINUX */ /* caller is required to handle thread synchronization and to update dynamo vm areas */ void os_heap_free(void *p, size_t size, heap_error_code_t *error_code) { long rc; ASSERT(error_code != NULL); if (!dynamo_exited) LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: %d bytes @ "PFX"\n", size, p); rc = munmap_syscall(p, size); if (rc != 0) { *error_code = -rc; } else { *error_code = HEAP_ERROR_SUCCESS; } ASSERT(rc == 0); } /* reserve virtual address space without committing swap space for it, and of course no physical pages since it will never be touched */ /* to be transparent, we do not use sbrk, and are * instead using mmap, and asserting that all os_heap requests are for * reasonably large pieces of memory */ void * os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code, bool executable) { void *p; uint prot = PROT_NONE; #ifdef VMX86_SERVER /* PR 365331: we need to be in the mmap_text region for code cache and * gencode (PROT_EXEC). */ ASSERT(!os_in_vmkernel_userworld() || !executable || preferred == NULL || ((byte *)preferred >= os_vmk_mmap_text_start() && ((byte *)preferred)+size <= os_vmk_mmap_text_end())); /* Note that a preferred address overrides PROT_EXEC and a mmap_data * address will be honored, even though any execution there will fault. */ /* FIXME: note that PROT_EXEC => read access, so our guard pages and other * non-committed memory, while not writable, is readable. * Plus, we can't later clear all prot bits for userworld mmap due to PR 107872 * (PR 365748 covers fixing this for us). * But in most uses we should get our preferred vmheap and shouldn't run * out of vmheap, so this should be a corner-case issue. */ if (executable) prot = PROT_EXEC; #endif /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); ASSERT(error_code != NULL); /* FIXME: note that this memory is in fact still committed - see man mmap */ /* FIXME: case 2347 on Linux or -vm_reserve should be set to false */ /* FIXME: Need to actually get a mmap-ing with |MAP_NORESERVE */ p = mmap_syscall(preferred, size, prot, MAP_PRIVATE|MAP_ANONYMOUS IF_X64(| (DYNAMO_OPTION(heap_in_lower_4GB) ? MAP_32BIT : 0)), -1, 0); if (!mmap_syscall_succeeded(p)) { *error_code = -(heap_error_code_t)(ptr_int_t)p; LOG(GLOBAL, LOG_HEAP, 4, "os_heap_reserve %d bytes failed "PFX"\n", size, p); return NULL; } else if (preferred != NULL && p != preferred) { /* We didn't get the preferred address. To harmonize with windows behavior and * give greater control we fail the reservation. */ heap_error_code_t dummy; *error_code = HEAP_ERROR_NOT_AT_PREFERRED; os_heap_free(p, size, &dummy); ASSERT(dummy == HEAP_ERROR_SUCCESS); LOG(GLOBAL, LOG_HEAP, 4, "os_heap_reserve %d bytes at "PFX" not preferred "PFX"\n", size, preferred, p); return NULL; } else { *error_code = HEAP_ERROR_SUCCESS; } LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: %d bytes @ "PFX"\n", size, p); #ifdef VMX86_SERVER /* PR 365331: ensure our memory is all in the mmap_text region */ ASSERT(!os_in_vmkernel_userworld() || !executable || ((byte *)p >= os_vmk_mmap_text_start() && ((byte *)p) + size <= os_vmk_mmap_text_end())); #endif #if defined(ANDROID) && defined(DEBUG) /* We don't label in release to be more transparent */ dynamorio_syscall(SYS_prctl, 5, PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "DynamoRIO-internal"); #endif return p; } static bool find_free_memory_in_region(byte *start, byte *end, size_t size, byte **found_start OUT, byte **found_end OUT) { memquery_iter_t iter; /* XXX: despite /proc/sys/vm/mmap_min_addr == PAGE_SIZE, mmap won't * give me that address if I use it as a hint. */ app_pc last_end = (app_pc) (PAGE_SIZE*16); bool found = false; memquery_iterator_start(&iter, NULL, false/*won't alloc*/); while (memquery_iterator_next(&iter)) { if (iter.vm_start >= start && MIN(iter.vm_start, end) - MAX(last_end, start) >= size) { if (found_start != NULL) *found_start = MAX(last_end, start); if (found_end != NULL) *found_end = MIN(iter.vm_start, end); found = true; break; } if (iter.vm_start >= end) break; last_end = iter.vm_end; } memquery_iterator_stop(&iter); return found; } void * os_heap_reserve_in_region(void *start, void *end, size_t size, heap_error_code_t *error_code, bool executable) { byte *p = NULL; byte *try_start = NULL; ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE)); ASSERT(ALIGNED(size, PAGE_SIZE)); LOG(GLOBAL, LOG_HEAP, 3, "os_heap_reserve_in_region: "SZFMT" bytes in "PFX"-"PFX"\n", size, start, end); /* if no restriction on location use regular os_heap_reserve() */ if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX) return os_heap_reserve(NULL, size, error_code, executable); /* loop to handle races */ while (find_free_memory_in_region(start, end, size, &try_start, NULL)) { p = os_heap_reserve(try_start, size, error_code, executable); if (p != NULL) { ASSERT(*error_code == HEAP_ERROR_SUCCESS); ASSERT(p >= (byte *)start && p + size <= (byte *)end); break; } } if (p == NULL) *error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION; else *error_code = HEAP_ERROR_SUCCESS; LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve_in_region: reserved "SZFMT" bytes @ "PFX" in "PFX"-"PFX"\n", size, p, start, end); return p; } /* commit previously reserved with os_heap_reserve pages */ /* returns false when out of memory */ /* A replacement of os_heap_alloc can be constructed by using os_heap_reserve and os_heap_commit on a subset of the reserved pages. */ /* caller is required to handle thread synchronization */ bool os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code) { uint os_prot = memprot_to_osprot(prot); long res; /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); ASSERT(p); ASSERT(error_code != NULL); /* FIXME: note that the memory would not be not truly committed if we have */ /* not actually marked a mmap-ing without MAP_NORESERVE */ res = mprotect_syscall(p, size, os_prot); if (res != 0) { *error_code = -res; return false; } else { *error_code = HEAP_ERROR_SUCCESS; } LOG(GLOBAL, LOG_HEAP, 2, "os_heap_commit: %d bytes @ "PFX"\n", size, p); return true; } /* caller is required to handle thread synchronization and to update dynamo vm areas */ void os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code) { int rc; ASSERT(error_code != NULL); if (!dynamo_exited) LOG(GLOBAL, LOG_HEAP, 4, "os_heap_decommit: %d bytes @ "PFX"\n", size, p); *error_code = HEAP_ERROR_SUCCESS; /* FIXME: for now do nothing since os_heap_reserve has in fact committed the memory */ rc = 0; /* TODO: p = mmap_syscall(p, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); we should either do a mremap() or we can do a munmap() followed 'quickly' by a mmap() - also see above the comment that os_heap_reserve() in fact is not so lightweight */ ASSERT(rc == 0); } bool os_heap_systemwide_overcommit(heap_error_code_t last_error_code) { /* FIXME: conservative answer yes */ return true; } bool os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit) { /* FIXME - NYI */ return false; } /* yield the current thread */ void os_thread_yield() { #ifdef MACOS /* XXX i#1291: use raw syscall instead */ swtch_pri(0); #else dynamorio_syscall(SYS_sched_yield, 0); #endif } bool thread_signal(process_id_t pid, thread_id_t tid, int signum) { #ifdef MACOS /* FIXME i#58: this takes in a thread port. Need to map thread id to port. * Need to figure out whether we support raw Mach threads w/o pthread on top. */ ASSERT_NOT_IMPLEMENTED(false); return false; #else /* FIXME: for non-NPTL use SYS_kill */ /* Note that the pid is equivalent to the thread group id. * However, we can have threads sharing address space but not pid * (if created via CLONE_VM but not CLONE_THREAD), so make sure to * use the pid of the target thread, not our pid. */ return (dynamorio_syscall(SYS_tgkill, 3, pid, tid, signum) == 0); #endif } static bool known_thread_signal(thread_record_t *tr, int signum) { #ifdef MACOS ptr_int_t res; if (tr->dcontext == NULL) return FALSE; res = dynamorio_syscall(SYS___pthread_kill, 2, tr->dcontext->thread_port, signum); LOG(THREAD_GET, LOG_ALL, 3, "%s: signal %d to port %d => %ld\n", __FUNCTION__, signum, tr->dcontext->thread_port, res); return res == 0; #else return thread_signal(tr->pid, tr->id, signum); #endif } void os_thread_sleep(uint64 milliseconds) { #ifdef MACOS semaphore_t sem = MACH_PORT_NULL; int res; #else struct timespec remain; int count = 0; #endif struct timespec req; req.tv_sec = (milliseconds / 1000); /* docs say can go up to 1000000000, but doesn't work on FC9 */ req.tv_nsec = (milliseconds % 1000) * 1000000; #ifdef MACOS if (sem == MACH_PORT_NULL) { DEBUG_DECLARE(kern_return_t res =) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0); ASSERT(res == KERN_SUCCESS); } res = dynamorio_syscall(SYSNUM_NO_CANCEL(SYS___semwait_signal), 6, sem, MACH_PORT_NULL, 1, 1, (int64_t)req.tv_sec, (int32_t)req.tv_nsec); if (res == -EINTR) { /* FIXME i#58: figure out how much time elapsed and re-wait */ } #else /* FIXME: if we need accurate sleeps in presence of itimers we should * be using SYS_clock_nanosleep w/ an absolute time instead of relative */ while (dynamorio_syscall(SYS_nanosleep, 2, &req, &remain) == -EINTR) { /* interrupted by signal or something: finish the interval */ ASSERT_CURIOSITY_ONCE(remain.tv_sec <= req.tv_sec && (remain.tv_sec < req.tv_sec || /* there seems to be some rounding, and sometimes * remain nsec > req nsec (I've seen 40K diff) */ req.tv_nsec - remain.tv_nsec < 100000 || req.tv_nsec - remain.tv_nsec > -100000)); /* not unusual for client threads to use itimers and have their run * routine sleep forever */ if (count++ > 3 && !IS_CLIENT_THREAD(get_thread_private_dcontext())) { ASSERT_NOT_REACHED(); break; /* paranoid */ } req = remain; } #endif } bool os_thread_suspend(thread_record_t *tr) { os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field; ASSERT(ostd != NULL); /* See synch comments in os_thread_resume: the mutex held there * prevents prematurely sending a re-suspend signal. */ mutex_lock(&ostd->suspend_lock); ostd->suspend_count++; ASSERT(ostd->suspend_count > 0); /* If already suspended, do not send another signal. However, we do * need to ensure the target is suspended in case of a race, so we can't * just return. */ if (ostd->suspend_count == 1) { /* PR 212090: we use a custom signal handler to suspend. We wait * here until the target reaches the suspend point, and leave it * up to the caller to check whether it is a safe suspend point, * to match Windows behavior. */ ASSERT(ksynch_get_value(&ostd->suspended) == 0); if (!known_thread_signal(tr, SUSPEND_SIGNAL)) { ostd->suspend_count--; mutex_unlock(&ostd->suspend_lock); return false; } } /* we can unlock before the wait loop b/c we're using a separate "resumed" * int and os_thread_resume holds the lock across its wait. this way a resume * can proceed as soon as the suspended thread is suspended, before the * suspending thread gets scheduled again. */ mutex_unlock(&ostd->suspend_lock); while (ksynch_get_value(&ostd->suspended) == 0) { /* For Linux, waits only if the suspended flag is not set as 1. Return value * doesn't matter because the flag will be re-checked. */ ksynch_wait(&ostd->suspended, 0); if (ksynch_get_value(&ostd->suspended) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); } } return true; } bool os_thread_resume(thread_record_t *tr) { os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field; ASSERT(ostd != NULL); /* This mutex prevents sending a re-suspend signal before the target * reaches a safe post-resume point from a first suspend signal. * Given that race, we can't just use atomic_add_exchange_int + * atomic_dec_becomes_zero on suspend_count. */ mutex_lock(&ostd->suspend_lock); ASSERT(ostd->suspend_count > 0); /* PR 479750: if do get here and target is not suspended then abort * to avoid possible deadlocks */ if (ostd->suspend_count == 0) { mutex_unlock(&ostd->suspend_lock); return true; /* the thread is "resumed", so success status */ } ostd->suspend_count--; if (ostd->suspend_count > 0) { mutex_unlock(&ostd->suspend_lock); return true; /* still suspended */ } ksynch_set_value(&ostd->wakeup, 1); ksynch_wake(&ostd->wakeup); while (ksynch_get_value(&ostd->resumed) == 0) { /* For Linux, waits only if the resumed flag is not set as 1. Return value * doesn't matter because the flag will be re-checked. */ ksynch_wait(&ostd->resumed, 0); if (ksynch_get_value(&ostd->resumed) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); } } ksynch_set_value(&ostd->wakeup, 0); ksynch_set_value(&ostd->resumed, 0); mutex_unlock(&ostd->suspend_lock); return true; } bool os_thread_terminate(thread_record_t *tr) { /* PR 297902: for NPTL sending SIGKILL will take down the whole group: * so instead we send SIGUSR2 and have a flag set telling * target thread to execute SYS_exit */ os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field; ASSERT(ostd != NULL); ostd->terminate = true; /* Even if the thread is currently suspended, it's simpler to send it * another signal than to resume it. */ return known_thread_signal(tr, SUSPEND_SIGNAL); } bool is_thread_terminated(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; ASSERT(ostd != NULL); return (ksynch_get_value(&ostd->terminated) == 1); } static void os_wait_thread_futex(KSYNCH_TYPE *var) { while (ksynch_get_value(var) == 0) { /* On Linux, waits only if var is not set as 1. Return value * doesn't matter because var will be re-checked. */ ksynch_wait(var, 0); if (ksynch_get_value(var) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); } } } void os_wait_thread_terminated(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; ASSERT(ostd != NULL); os_wait_thread_futex(&ostd->terminated); } void os_wait_thread_detached(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; ASSERT(ostd != NULL); os_wait_thread_futex(&ostd->detached); } void os_signal_thread_detach(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; ASSERT(ostd != NULL); ostd->do_detach = true; } bool thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc) { /* PR 212090: only works when target is suspended by us, and * we then take the signal context */ os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field; ASSERT(ostd != NULL); ASSERT(ostd->suspend_count > 0); if (ostd->suspend_count == 0) return false; ASSERT(ostd->suspended_sigcxt != NULL); sigcontext_to_mcontext(mc, ostd->suspended_sigcxt); return true; } bool thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc) { /* PR 212090: only works when target is suspended by us, and * we then replace the signal context */ os_thread_data_t *ostd = (os_thread_data_t *) tr->dcontext->os_field; ASSERT(ostd != NULL); ASSERT(ostd->suspend_count > 0); if (ostd->suspend_count == 0) return false; ASSERT(ostd->suspended_sigcxt != NULL); mcontext_to_sigcontext(ostd->suspended_sigcxt, mc); return true; } bool is_thread_currently_native(thread_record_t *tr) { return (!tr->under_dynamo_control || /* start/stop doesn't change under_dynamo_control and has its own field */ (tr->dcontext != NULL && tr->dcontext->currently_stopped)); } #ifdef CLIENT_SIDELINE /* PR 222812: tied to sideline usage */ # ifdef LINUX /* XXX i#58: just until we have Mac support */ static void client_thread_run(void) { void (*func)(void *param); dcontext_t *dcontext; byte *xsp; GET_STACK_PTR(xsp); void *crec = get_clone_record((reg_t)xsp); IF_DEBUG(int rc = ) dynamo_thread_init(get_clone_record_dstack(crec), NULL, true); ASSERT(rc != -1); /* this better be a new thread */ dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n", get_thread_id()); /* We stored the func and args in particular clone record fields */ func = (void (*)(void *param)) signal_thread_inherit(dcontext, crec); void *arg = (void *) get_clone_record_app_xsp(crec); LOG(THREAD, LOG_ALL, 1, "func="PFX", arg="PFX"\n", func, arg); (*func)(arg); LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n", get_thread_id()); cleanup_and_terminate(dcontext, SYS_exit, 0, 0, false/*just thread*/, IF_MACOS_ELSE(dcontext->thread_port, 0), 0); } # endif /* i#41/PR 222812: client threads * * thread must have dcontext since many API routines require one and we * don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058) * * reversed the old design of not using dstack (partly b/c want dcontext) * and I'm using the same parent-creates-dstack and clone_record_t design * to create linux threads: dstack should be big enough for client threads * (xref PR 202669) * * reversed the old design of explicit dr_terminate_client_thread(): now * the thread is auto-terminated and stack cleaned up on return from run * function */ DR_API bool dr_create_client_thread(void (*func)(void *param), void *arg) { #ifdef LINUX dcontext_t *dcontext = get_thread_private_dcontext(); byte *xsp; /* We do not pass SIGCHLD since don't want signal to parent and don't support * waiting on child. * We do not pass CLONE_THREAD so that the new thread is in its own thread * group, allowing it to have private itimers and not receive any signals * sent to the app's thread groups. It also makes the thread not show up in * the thread list for the app, making it more invisible. */ uint flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND IF_NOT_X64(| CLONE_SETTLS) /* CLONE_THREAD required. Signals and itimers are private anyway. */ IF_VMX86(| (os_in_vmkernel_userworld() ? CLONE_THREAD : 0)); pre_second_thread(); /* need to share signal handler table, prior to creating clone record */ handle_clone(dcontext, flags); void *crec = create_clone_record(dcontext, (reg_t*)&xsp); /* make sure client_thread_run can get the func and arg, and that * signal_thread_inherit gets the right syscall info */ set_clone_record_fields(crec, (reg_t) arg, (app_pc) func, SYS_clone, flags); /* i#501 switch to app's tls before creating client thread */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) os_switch_lib_tls(dcontext, true/*to app*/); # if defined(X86) && !defined(X64) /* For the TCB we simply share the parent's. On Linux we could just inherit * the same selector but not for VMX86_SERVER so we specify for both for * 32-bit. Most of the fields are pthreads-specific and we assume the ones * that will be used (such as tcbhead_t.sysinfo @0x10) are read-only. */ our_modify_ldt_t desc; /* if get_segment_base() returned size too we could use it */ uint index = tls_priv_lib_index(); ASSERT(index != -1); if (!tls_get_descriptor(index, &desc)) { LOG(THREAD, LOG_ALL, 1, "%s: client thread tls get entry %d failed\n", __FUNCTION__, index); return false; } # endif LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp="PFX" dstack="PFX"\n", xsp, get_clone_record_dstack(crec)); thread_id_t newpid = dynamorio_clone(flags, xsp, NULL, IF_X86_ELSE(IF_X64_ELSE(NULL, &desc), NULL), NULL, client_thread_run); /* i#501 switch to app's tls before creating client thread */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) os_switch_lib_tls(dcontext, false/*to dr*/); if (newpid < 0) { LOG(THREAD, LOG_ALL, 1, "client thread creation failed: %d\n", newpid); return false; } else if (newpid == 0) { /* dynamorio_clone() should have called client_thread_run directly */ ASSERT_NOT_REACHED(); return false; } return true; #else ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: implement on Mac */ return false; #endif } #endif /* CLIENT_SIDELINE PR 222812: tied to sideline usage */ int get_num_processors(void) { static uint num_cpu = 0; /* cached value */ if (!num_cpu) { #ifdef MACOS DEBUG_DECLARE(bool ok =) sysctl_query(CTL_HW, HW_NCPU, &num_cpu, sizeof(num_cpu)); ASSERT(ok); #else /* We used to use get_nprocs_conf, but that's in libc, so now we just * look at the /sys filesystem ourselves, which is what glibc does. */ uint local_num_cpus = 0; file_t cpu_dir = os_open_directory("/sys/devices/system/cpu", OS_OPEN_READ); dir_iterator_t iter; ASSERT(cpu_dir != INVALID_FILE && "/sys must be mounted: mount -t sysfs sysfs /sys"); os_dir_iterator_start(&iter, cpu_dir); while (os_dir_iterator_next(&iter)) { int dummy_num; if (sscanf(iter.name, "cpu%d", &dummy_num) == 1) local_num_cpus++; } os_close(cpu_dir); num_cpu = local_num_cpus; #endif ASSERT(num_cpu); } return num_cpu; } /* i#46: To support -no_private_loader, we have to call the dlfcn family of * routines in libdl.so. When we do early injection, there is no loader to * resolve these imports, so they will crash. Early injection is incompatible * with -no_private_loader, so this should never happen. */ #if defined(CLIENT_INTERFACE) || defined(HOT_PATCHING_INTERFACE) shlib_handle_t load_shared_library(const char *name, bool reachable) { # ifdef STATIC_LIBRARY if (os_files_same(name, get_application_name())) { /* The private loader falls back to dlsym() and friends for modules it * doesn't recognize, so this works without disabling the private loader. */ return dlopen(NULL, RTLD_LAZY); /* Gets a handle to the exe. */ } # endif /* We call locate_and_load_private_library() to support searching for * a pathless name. */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) return (shlib_handle_t) locate_and_load_private_library(name, reachable); # if defined(STATIC_LIBRARY) || defined(MACOS) ASSERT(!DYNAMO_OPTION(early_inject)); return dlopen(name, RTLD_LAZY); # else /* -no_private_loader is no longer supported in our default builds. * If we want it for hybrid mode we should add a new build param and include * the libdl calls here under that param. */ ASSERT_NOT_REACHED(); return NULL; # endif } #endif #if defined(CLIENT_INTERFACE) shlib_routine_ptr_t lookup_library_routine(shlib_handle_t lib, const char *name) { if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { return (shlib_routine_ptr_t) get_private_library_address((app_pc)lib, name); } # if defined(STATIC_LIBRARY) || defined(MACOS) ASSERT(!DYNAMO_OPTION(early_inject)); return dlsym(lib, name); # else ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */ return NULL; # endif } void unload_shared_library(shlib_handle_t lib) { if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { unload_private_library(lib); } else { # if defined(STATIC_LIBRARY) || defined(MACOS) ASSERT(!DYNAMO_OPTION(early_inject)); if (!DYNAMO_OPTION(avoid_dlclose)) { dlclose(lib); } # else ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */ # endif } } void shared_library_error(char *buf, int maxlen) { const char *err; if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { err = "error in private loader"; } else { # if defined(STATIC_LIBRARY) || defined(MACOS) ASSERT(!DYNAMO_OPTION(early_inject)); err = dlerror(); if (err == NULL) { err = "dlerror returned NULL"; } # else ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported */ err = "unknown error"; # endif } strncpy(buf, err, maxlen-1); buf[maxlen-1] = '\0'; /* strncpy won't put on trailing null if maxes out */ } /* addr is any pointer known to lie within the library. * for linux, one of addr or name is needed; for windows, neither is needed. */ bool shared_library_bounds(IN shlib_handle_t lib, IN byte *addr, IN const char *name, OUT byte **start, OUT byte **end) { ASSERT(start != NULL && end != NULL); /* PR 366195: dlopen() handle truly is opaque, so we have to use either * addr or name */ ASSERT(addr != NULL || name != NULL); *start = addr; if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { privmod_t *mod; /* look for private library first */ acquire_recursive_lock(&privload_lock); mod = privload_lookup_by_base((app_pc)lib); if (name != NULL && mod == NULL) mod = privload_lookup(name); if (mod != NULL && !mod->externally_loaded) { *start = mod->base; if (end != NULL) *end = mod->base + mod->size; release_recursive_lock(&privload_lock); return true; } release_recursive_lock(&privload_lock); } return (memquery_library_bounds(name, start, end, NULL, 0) > 0); } #endif /* defined(CLIENT_INTERFACE) */ #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ /* FIXME - not available in 2.0 or earlier kernels, not really an issue since no one * should be running anything that old. */ int llseek_syscall(int fd, int64 offset, int origin, int64 *result) { #if defined(X64) || defined(MACOS) # ifndef X64 /* 2 slots for 64-bit arg */ *result = dynamorio_syscall(SYS_lseek, 4, fd, (uint)(offset & 0xFFFFFFFF), (uint)((offset >> 32) & 0xFFFFFFFF), origin); # else *result = dynamorio_syscall(SYS_lseek, 3, fd, offset, origin); # endif return ((*result > 0) ? 0 : (int)*result); #else return dynamorio_syscall(SYS__llseek, 5, fd, (uint)((offset >> 32) & 0xFFFFFFFF), (uint)(offset & 0xFFFFFFFF), result, origin); #endif } static ptr_int_t dynamorio_syscall_stat(const char *fname, struct stat64 *st) { #ifdef SYSNUM_STAT return dynamorio_syscall(SYSNUM_STAT, 2, fname, st); #else return dynamorio_syscall(SYS_fstatat, 4, AT_FDCWD, fname, st, 0); #endif } bool os_file_exists(const char *fname, bool is_dir) { /* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */ struct stat64 st; ptr_int_t res = dynamorio_syscall_stat(fname, &st); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } return (!is_dir || S_ISDIR(st.st_mode)); } /* Returns true if two paths point to the same file. Follows symlinks. */ bool os_files_same(const char *path1, const char *path2) { struct stat64 st1, st2; ptr_int_t res = dynamorio_syscall_stat(path1, &st1); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } res = dynamorio_syscall_stat(path2, &st2); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } return st1.st_ino == st2.st_ino; } bool os_get_file_size(const char *file, uint64 *size) { /* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */ struct stat64 st; ptr_int_t res = dynamorio_syscall_stat(file, &st); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } ASSERT(size != NULL); *size = st.st_size; return true; } bool os_get_file_size_by_handle(file_t fd, uint64 *size) { /* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */ struct stat64 st; ptr_int_t res = dynamorio_syscall(SYSNUM_FSTAT, 2, fd, &st); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } ASSERT(size != NULL); *size = st.st_size; return true; } /* created directory will be owned by effective uid, * Note a symbolic link will never be followed. */ bool os_create_dir(const char *fname, create_directory_flags_t create_dir_flags) { bool require_new = TEST(CREATE_DIR_REQUIRE_NEW, create_dir_flags); #ifdef SYS_mkdir int rc = dynamorio_syscall(SYS_mkdir, 2, fname, S_IRWXU|S_IRWXG); #else int rc = dynamorio_syscall(SYS_mkdirat, 3, AT_FDCWD, fname, S_IRWXU|S_IRWXG); #endif ASSERT(create_dir_flags == CREATE_DIR_REQUIRE_NEW || create_dir_flags == CREATE_DIR_ALLOW_EXISTING); return (rc == 0 || (!require_new && rc == -EEXIST)); } bool os_delete_dir(const char *name) { #ifdef SYS_rmdir return (dynamorio_syscall(SYS_rmdir, 1, name) == 0); #else return (dynamorio_syscall(SYS_unlinkat, 3, AT_FDCWD, name, AT_REMOVEDIR) == 0); #endif } int open_syscall(const char *file, int flags, int mode) { ASSERT(file != NULL); #ifdef SYS_open return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_open), 3, file, flags, mode); #else return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_openat), 4, AT_FDCWD, file, flags, mode); #endif } int close_syscall(int fd) { return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_close), 1, fd); } int dup_syscall(int fd) { return dynamorio_syscall(SYS_dup, 1, fd); } ssize_t read_syscall(int fd, void *buf, size_t nbytes) { return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_read), 3, fd, buf, nbytes); } ssize_t write_syscall(int fd, const void *buf, size_t nbytes) { return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_write), 3, fd, buf, nbytes); } #ifndef NOT_DYNAMORIO_CORE_PROPER static int fcntl_syscall(int fd, int cmd, long arg) { return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_fcntl), 3, fd, cmd, arg); } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ /* not easily accessible in header files */ #ifndef O_LARGEFILE # ifdef X64 /* not needed */ # define O_LARGEFILE 0 # else # define O_LARGEFILE 0100000 # endif #endif /* we assume that opening for writing wants to create file. * we also assume that nobody calling this is creating a persistent * file: for that, use os_open_protected() to avoid leaking on exec * and to separate from the app's files. */ file_t os_open(const char *fname, int os_open_flags) { int res; int flags = 0; if (TEST(OS_OPEN_ALLOW_LARGE, os_open_flags)) flags |= O_LARGEFILE; if (TEST(OS_OPEN_WRITE_ONLY, os_open_flags)) res = open_syscall(fname, flags|O_WRONLY, 0); else if (!TEST(OS_OPEN_WRITE, os_open_flags)) res = open_syscall(fname, flags|O_RDONLY, 0); else { res = open_syscall(fname, flags|O_RDWR|O_CREAT| (TEST(OS_OPEN_APPEND, os_open_flags) ? /* Currently we only support either appending * or truncating, just like Windows and the client * interface. If we end up w/ a use case that wants * neither it could open append and then seek; if we do * add OS_TRUNCATE or sthg we'll need to add it to * any current writers who don't set OS_OPEN_REQUIRE_NEW. */ O_APPEND : O_TRUNC) | (TEST(OS_OPEN_REQUIRE_NEW, os_open_flags) ? O_EXCL : 0), S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP); } if (res < 0) return INVALID_FILE; return res; } file_t os_open_directory(const char *fname, int os_open_flags) { /* no special handling */ return os_open(fname, os_open_flags); } void os_close(file_t f) { close_syscall(f); } #ifndef NOT_DYNAMORIO_CORE_PROPER /* dups curfd to a private fd. * returns -1 if unsuccessful. */ file_t fd_priv_dup(file_t curfd) { file_t newfd = -1; if (DYNAMO_OPTION(steal_fds) > 0) { /* RLIMIT_NOFILES is 1 greater than max and F_DUPFD starts at given value */ /* XXX: if > linux 2.6.24, can use F_DUPFD_CLOEXEC to avoid later call: * so how do we tell if the flag is supported? try calling once at init? */ newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd); if (newfd < 0) { /* We probably ran out of fds, esp if debug build and there are * lots of threads. Should we track how many we've given out to * avoid a failed syscall every time after? */ SYSLOG_INTERNAL_WARNING_ONCE("ran out of stolen fd space"); /* Try again but this time in the app space, somewhere high up * to avoid issues like tcsh assuming it can own fds 3-5 for * piping std{in,out,err} (xref the old -open_tcsh_fds option). */ newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd/2); } } return newfd; } bool fd_mark_close_on_exec(file_t fd) { /* we assume FD_CLOEXEC is the only flag and don't bother w/ F_GETFD */ if (fcntl_syscall(fd, F_SETFD, FD_CLOEXEC) != 0) { SYSLOG_INTERNAL_WARNING("unable to mark file %d as close-on-exec", fd); return false; } return true; } void fd_table_add(file_t fd, uint flags) { if (fd_table != NULL) { TABLE_RWLOCK(fd_table, write, lock); DODEBUG({ /* i#1010: If the fd is already in the table, chances are it's a * stale logfile fd left behind by a vforked or cloned child that * called execve. Avoid an assert if that happens. */ bool present = generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd); ASSERT_CURIOSITY_ONCE(!present && "stale fd not cleaned up"); }); generic_hash_add(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd, /* store the flags, w/ a set bit to ensure not 0 */ (void *)(ptr_uint_t)(flags|OS_OPEN_RESERVED)); TABLE_RWLOCK(fd_table, write, unlock); } else { #ifdef DEBUG num_fd_add_pre_heap++; /* we add main_logfile in os_init() */ ASSERT(num_fd_add_pre_heap == 1 && "only main_logfile should come here"); #endif } } static bool fd_is_dr_owned(file_t fd) { ptr_uint_t flags; ASSERT(fd_table != NULL); TABLE_RWLOCK(fd_table, read, lock); flags = (ptr_uint_t) generic_hash_lookup(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd); TABLE_RWLOCK(fd_table, read, unlock); return (flags != 0); } static bool fd_is_in_private_range(file_t fd) { return (DYNAMO_OPTION(steal_fds) > 0 && min_dr_fd > 0 && fd >= min_dr_fd); } file_t os_open_protected(const char *fname, int os_open_flags) { file_t dup; file_t res = os_open(fname, os_open_flags); if (res < 0) return res; /* we could have os_open() always switch to a private fd but it's probably * not worth the extra syscall for temporary open/close sequences so we * only use it for persistent files */ dup = fd_priv_dup(res); if (dup >= 0) { close_syscall(res); res = dup; fd_mark_close_on_exec(res); } /* else just keep original */ /* ditto here, plus for things like config.c opening files we can't handle * grabbing locks and often don't have heap available so no fd_table */ fd_table_add(res, os_open_flags); return res; } void os_close_protected(file_t f) { ASSERT(fd_table != NULL || dynamo_exited); if (fd_table != NULL) { TABLE_RWLOCK(fd_table, write, lock); generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)f); TABLE_RWLOCK(fd_table, write, unlock); } os_close(f); } bool os_get_current_dir(char *buf, size_t bufsz) { # ifdef MACOS static char noheap_buf[MAXPATHLEN]; bool res = false; file_t fd = os_open(".", OS_OPEN_READ); int len; /* F_GETPATH assumes a buffer of size MAXPATHLEN */ char *fcntl_buf; if (dynamo_heap_initialized) fcntl_buf = global_heap_alloc(MAXPATHLEN HEAPACCT(ACCT_OTHER)); else fcntl_buf = noheap_buf; if (fd == INVALID_FILE) goto cwd_error; if (fcntl_syscall(fd, F_GETPATH, (long)fcntl_buf) != 0) goto cwd_error; len = snprintf(buf, bufsz, "%s", fcntl_buf); buf[bufsz-1] = '\0'; return (len > 0 && len < bufsz); cwd_error: if (dynamo_heap_initialized) global_heap_free(fcntl_buf, MAXPATHLEN HEAPACCT(ACCT_OTHER)); os_close(fd); return res; # else return (dynamorio_syscall(SYS_getcwd, 2, buf, bufsz) > 0); # endif } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ #ifndef NOT_DYNAMORIO_CORE_PROPER /* so drinject can use drdecode's copy */ ssize_t os_write(file_t f, const void *buf, size_t count) { return write_syscall(f, buf, count); } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ ssize_t os_read(file_t f, void *buf, size_t count) { return read_syscall(f, buf, count); } void os_flush(file_t f) { /* we're not using FILE*, so there is no buffering */ } /* seek the current file position to offset bytes from origin, return true if successful */ bool os_seek(file_t f, int64 offset, int origin) { int64 result; int ret = 0; ret = llseek_syscall(f, offset, origin, &result); return (ret == 0); } /* return the current file position, -1 on failure */ int64 os_tell(file_t f) { int64 result = -1; int ret = 0; ret = llseek_syscall(f, 0, SEEK_CUR, &result); if (ret != 0) return -1; return result; } bool os_delete_file(const char *name) { #ifdef SYS_unlink return (dynamorio_syscall(SYS_unlink, 1, name) == 0); #else return (dynamorio_syscall(SYS_unlinkat, 3, AT_FDCWD, name, 0) == 0); #endif } bool os_rename_file(const char *orig_name, const char *new_name, bool replace) { ptr_int_t res; if (!replace) { /* SYS_rename replaces so we must test beforehand => could have race */ /* _LARGEFILE64_SOURCE should make libc struct match kernel (see top of file) */ struct stat64 st; ptr_int_t res = dynamorio_syscall_stat(new_name, &st); if (res == 0) return false; else if (res != -ENOENT) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s stat failed: "PIFX"\n", __func__, res); return false; } } #ifdef SYS_rename res = dynamorio_syscall(SYS_rename, 2, orig_name, new_name); #else res = dynamorio_syscall(SYS_renameat, 4, AT_FDCWD, orig_name, AT_FDCWD, new_name); #endif if (res != 0) LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s \"%s\" to \"%s\" failed: "PIFX"\n", __func__, orig_name, new_name, res); return (res == 0); } bool os_delete_mapped_file(const char *filename) { return os_delete_file(filename); } byte * os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, map_flags_t map_flags) { int flags; byte *map; #if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER) bool loop = false; uint iters = 0; # define MAX_MMAP_LOOP_ITERS 100 byte *region_start = NULL, *region_end = NULL; #else uint pg_offs; ASSERT_TRUNCATE(pg_offs, uint, offs / PAGE_SIZE); pg_offs = (uint) (offs / PAGE_SIZE); #endif #ifdef VMX86_SERVER flags = MAP_PRIVATE; /* MAP_SHARED not supported yet */ #else flags = TEST(MAP_FILE_COPY_ON_WRITE, map_flags) ? MAP_PRIVATE : MAP_SHARED; #endif #if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER) /* Allocate memory from reachable range for image: or anything (pcache * in particular): for low 4GB, easiest to just pass MAP_32BIT (which is * low 2GB, but good enough). */ if (DYNAMO_OPTION(heap_in_lower_4GB) && !TEST(MAP_FILE_FIXED, map_flags)) flags |= MAP_32BIT; #endif /* Allows memory request instead of mapping a file, * so we can request memory from a particular address with fixed argument */ if (f == -1) flags |= MAP_ANONYMOUS; if (TEST(MAP_FILE_FIXED, map_flags)) flags |= MAP_FIXED; /* Reachability is not supported for drinjectlib */ #if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER) if (!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags)) { vmcode_get_reachable_region(&region_start, &region_end); /* addr need not be NULL: we'll use it if it's in the region */ ASSERT(!TEST(MAP_FILE_FIXED, map_flags)); /* Loop to handle races */ loop = true; } while (!loop || (addr != NULL && addr >= region_start && addr+*size <= region_end) || find_free_memory_in_region(region_start, region_end, *size, &addr, NULL)) { #endif map = mmap_syscall(addr, *size, memprot_to_osprot(prot), flags, f, /* x86 Linux mmap uses offset in pages */ IF_LINUX_ELSE(IF_X64_ELSE(offs, pg_offs), offs)); if (!mmap_syscall_succeeded(map)) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, map); map = NULL; } #if defined(X64) && !defined(NOT_DYNAMORIO_CORE_PROPER) else if (loop && (map < region_start || map+*size > region_end)) { /* Try again: probably a race. Hopefully our notion of "there's a free * region big enough" matches the kernel's, else we'll loop forever * (which we try to catch w/ a max iters count). */ munmap_syscall(map, *size); map = NULL; } else break; if (!loop) break; if (++iters > MAX_MMAP_LOOP_ITERS) { ASSERT_NOT_REACHED(); map = NULL; break; } addr = NULL; /* pick a new one */ } #endif return map; } bool os_unmap_file(byte *map, size_t size) { long res = munmap_syscall(map, size); return (res == 0); } /* around most of file, to exclude preload */ #if !defined(NOT_DYNAMORIO_CORE_PROPER) || defined(STANDALONE_UNIT_TEST) bool os_get_disk_free_space(/*IN*/ file_t file_handle, /*OUT*/ uint64 *AvailableQuotaBytes /*OPTIONAL*/, /*OUT*/ uint64 *TotalQuotaBytes /*OPTIONAL*/, /*OUT*/ uint64 *TotalVolumeBytes /*OPTIONAL*/) { /* libc struct seems to match kernel's */ struct statfs stat; ptr_int_t res = dynamorio_syscall(SYS_fstatfs, 2, file_handle, &stat); if (res != 0) { LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: "PIFX"\n", __func__, res); return false; } LOG(GLOBAL, LOG_STATS, 3, "os_get_disk_free_space: avail="SZFMT", free="SZFMT", bsize="SZFMT"\n", stat.f_bavail, stat.f_bfree, stat.f_bsize); if (AvailableQuotaBytes != NULL) *AvailableQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize); /* no support for quotas */ if (TotalQuotaBytes != NULL) *TotalQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize); if (TotalVolumeBytes != NULL) /* despite name this is how much is free */ *TotalVolumeBytes = ((uint64)stat.f_bfree * stat.f_bsize); return true; } #ifdef LINUX static bool symlink_is_self_exe(const char *path) { /* Look for "/proc/%d/exe" where %d exists in /proc/self/task/%d, * or "/proc/self/exe". Rule out the exe link for another process * (though it could also be under DR we have no simple way to obtain * its actual app path). */ # define SELF_LEN_LEADER 6 /* "/proc/" */ # define SELF_LEN_TRAILER 4 /* "/exe" */ # define SELF_LEN_MAX 18 size_t len = strlen(path); if (strcmp(path, "/proc/self/exe") == 0) return true; if (len < SELF_LEN_MAX && /* /proc/nnnnnn/exe */ strncmp(path, "/proc/", SELF_LEN_LEADER) == 0 && strncmp(path + len - SELF_LEN_TRAILER, "/exe", SELF_LEN_TRAILER) == 0) { int pid; if (sscanf(path + SELF_LEN_LEADER, "%d", &pid) == 1) { char task[32]; snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", pid); NULL_TERMINATE_BUFFER(task); return os_file_exists(task, true/*dir*/); } } return false; } #endif void exit_process_syscall(long status) { /* We now assume SYS_exit_group is defined: not building on old machines, * but will execute there. We try exit_group and if it fails we use exit. * * FIXME: if no exit_group, kill all other threads (==processes in same addr * space) manually? Presumably we got here b/c at an unsafe point to do * full exit? Or is that not true: what about dr_abort()? */ dynamorio_syscall(SYSNUM_EXIT_PROCESS, 1, status); /* would assert that result is -ENOSYS but assert likely calls us => infinite loop */ exit_thread_syscall(status); ASSERT_NOT_REACHED(); } void exit_thread_syscall(long status) { #ifdef MACOS mach_port_t thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0); /* FIXME i#1403: on MacOS we fail to free the app's stack: we need to pass it to * bsdthread_terminate. */ dynamorio_syscall(SYSNUM_EXIT_THREAD, 4, 0, 0, thread_port, 0); #else dynamorio_syscall(SYSNUM_EXIT_THREAD, 1, status); #endif } /* FIXME: this one will not be easily internationalizable yet it is easier to have a syslog based Unix implementation with real strings. */ void os_syslog(syslog_event_type_t priority, uint message_id, uint substitutions_num, va_list args) { int native_priority; switch (priority) { case SYSLOG_INFORMATION: native_priority = LOG_INFO; break; case SYSLOG_WARNING: native_priority = LOG_WARNING; break; case SYSLOG_CRITICAL: native_priority = LOG_CRIT; break; case SYSLOG_ERROR: native_priority = LOG_ERR; break; default: ASSERT_NOT_REACHED(); } /* can amount to passing a format string (careful here) to vsyslog */ /* Never let user controlled data in the format string! */ ASSERT_NOT_IMPLEMENTED(false); } /* This is subject to races, but should only happen at init/attach when * there should only be one live thread. */ static bool safe_read_via_query(const void *base, size_t size, void *out_buf, size_t *bytes_read) { bool res = false; size_t num_read = 0; ASSERT(!fault_handling_initialized); /* XXX: in today's init ordering, allmem will never be initialized when we come * here, but we check it nevertheless to be general in case this routine is * ever called at some later time */ if (IF_MEMQUERY_ELSE(false, memcache_initialized())) res = is_readable_without_exception_internal(base, size, false/*use allmem*/); else res = is_readable_without_exception_query_os((void *)base, size); if (res) { memcpy(out_buf, base, size); num_read = size; } if (bytes_read != NULL) *bytes_read = num_read; return res; } bool safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read) { STATS_INC(num_safe_reads); /* XXX i#350: we'd like to always use safe_read_fast() and remove this extra * call layer, but safe_read_fast() requires fault handling to be set up. * We do set up an early signal handler in os_init(), * but there is still be a window prior to that with no handler. */ if (!fault_handling_initialized) { return safe_read_via_query(base, size, out_buf, bytes_read); } else { return safe_read_fast(base, size, out_buf, bytes_read); } } bool safe_read_if_fast(const void *base, size_t size, void *out_buf) { if (!fault_handling_initialized) { memcpy(out_buf, base, size); return true; } else { return safe_read_ex(base, size, out_buf, NULL); } } /* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */ bool safe_read(const void *base, size_t size, void *out_buf) { return safe_read_ex(base, size, out_buf, NULL); } bool safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written) { return safe_write_try_except(base, size, in_buf, bytes_written); } /* is_readable_without_exception checks to see that all bytes with addresses * from pc to pc+size-1 are readable and that reading from there won't * generate an exception. if 'from_os' is true, check what the os thinks * the prot bits are instead of using the all memory list. */ static bool is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os) { uint prot = MEMPROT_NONE; byte *check_pc = (byte *) ALIGN_BACKWARD(pc, PAGE_SIZE); if (size > ((byte *)POINTER_MAX - pc)) size = (byte *)POINTER_MAX - pc; do { bool rc = query_os ? get_memory_info_from_os(check_pc, NULL, NULL, &prot) : get_memory_info(check_pc, NULL, NULL, &prot); if (!rc || !TESTANY(MEMPROT_READ|MEMPROT_EXEC, prot)) return false; if (POINTER_OVERFLOW_ON_ADD(check_pc, PAGE_SIZE)) break; check_pc += PAGE_SIZE; } while (check_pc < pc+size); return true; } bool is_readable_without_exception(const byte *pc, size_t size) { /* case 9745 / i#853: We've had problems with all_memory_areas not being * accurate in the past. Parsing proc maps is too slow for some apps, so we * use a runtime option. */ bool query_os = IF_MEMQUERY_ELSE(true, !DYNAMO_OPTION(use_all_memory_areas)); return is_readable_without_exception_internal(pc, size, query_os); } /* Identical to is_readable_without_exception except that the os is queried * for info on the indicated region */ bool is_readable_without_exception_query_os(byte *pc, size_t size) { return is_readable_without_exception_internal(pc, size, true); } bool is_user_address(byte *pc) { /* FIXME: NYI */ /* note returning true will always skip the case 9022 logic on Linux */ return true; } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ /* change protections on memory region starting at pc of length length * this does not update the all memory area info */ bool os_set_protection(byte *pc, size_t length, uint prot/*MEMPROT_*/) { app_pc start_page = (app_pc) PAGE_START(pc); uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE); long res = 0; uint flags = memprot_to_osprot(prot); #ifdef IA32_ON_IA64 LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n"); LOG(THREAD_GET, LOG_VMAREAS, 1, " attempted change_prot("PFX", "PIFX", %s) => " "mprotect("PFX", "PIFX")==%d pages\n", pc, length, memprot_string(prot), start_page, num_bytes, num_bytes / PAGE_SIZE); #else DOSTATS({ /* once on each side of prot, to get on right side of writability */ if (!TEST(PROT_WRITE, flags)) { STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE); } }); res = mprotect_syscall((void *) start_page, num_bytes, flags); if (res != 0) return false; LOG(THREAD_GET, LOG_VMAREAS, 3, "change_prot("PFX", "PIFX", %s) => " "mprotect("PFX", "PIFX", %d)==%d pages\n", pc, length, memprot_string(prot), start_page, num_bytes, flags, num_bytes / PAGE_SIZE); #endif DOSTATS({ /* once on each side of prot, to get on right side of writability */ if (TEST(PROT_WRITE, flags)) { STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE); } }); return true; } #ifndef NOT_DYNAMORIO_CORE_PROPER /* change protections on memory region starting at pc of length length */ bool set_protection(byte *pc, size_t length, uint prot/*MEMPROT_*/) { if (os_set_protection(pc, length, prot) == false) return false; #ifndef HAVE_MEMINFO_QUERY else { app_pc start_page = (app_pc) PAGE_START(pc); uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE); memcache_update_locked(start_page, start_page + num_bytes, prot, -1/*type unchanged*/, true/*exists*/); } #endif return true; } /* change protections on memory region starting at pc of length length */ bool change_protection(byte *pc, size_t length, bool writable) { uint flags = (writable) ? (MEMPROT_READ|MEMPROT_WRITE) : (MEMPROT_READ); return set_protection(pc, length, flags); } /* make pc's page writable */ bool make_writable(byte *pc, size_t size) { long res; app_pc start_page = (app_pc) PAGE_START(pc); size_t prot_size = (size == 0) ? PAGE_SIZE : size; uint prot = PROT_EXEC|PROT_READ|PROT_WRITE; /* if can get current protection then keep old read/exec flags. * this is crucial on modern linux kernels which refuse to mark stack +x. */ if (!is_in_dynamo_dll(pc)/*avoid allmem assert*/ && #ifdef STATIC_LIBRARY /* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY, * but we can't call get_memory_info() until allmem is initialized. Our * uses before then are for patching x86.asm, which is OK. */ IF_NO_MEMQUERY(memcache_initialized() &&) #endif get_memory_info(pc, NULL, NULL, &prot)) prot |= PROT_WRITE; ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size); #ifdef IA32_ON_IA64 LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n"); LOG(THREAD_GET, LOG_VMAREAS, 3, "attempted make_writable: pc "PFX" -> "PFX"-"PFX"\n", pc, start_page, start_page + prot_size); #else res = mprotect_syscall((void *) start_page, prot_size, prot); LOG(THREAD_GET, LOG_VMAREAS, 3, "make_writable: pc "PFX" -> "PFX"-"PFX" %d\n", pc, start_page, start_page + prot_size, res); ASSERT(res == 0); if (res != 0) return false; #endif STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, size / PAGE_SIZE); #ifndef HAVE_MEMINFO_QUERY /* update all_memory_areas list with the protection change */ if (memcache_initialized()) { memcache_update_locked(start_page, start_page + prot_size, osprot_to_memprot(prot), -1/*type unchanged*/, true/*exists*/); } #endif return true; } /* like make_writable but adds COW */ bool make_copy_on_writable(byte *pc, size_t size) { /* FIXME: for current usage this should be fine */ return make_writable(pc, size); } /* make pc's page unwritable */ void make_unwritable(byte *pc, size_t size) { long res; app_pc start_page = (app_pc) PAGE_START(pc); size_t prot_size = (size == 0) ? PAGE_SIZE : size; uint prot = PROT_EXEC|PROT_READ; /* if can get current protection then keep old read/exec flags. * this is crucial on modern linux kernels which refuse to mark stack +x. */ if (!is_in_dynamo_dll(pc)/*avoid allmem assert*/ && #ifdef STATIC_LIBRARY /* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY, * but we can't call get_memory_info() until allmem is initialized. Our * uses before then are for patching x86.asm, which is OK. */ IF_NO_MEMQUERY(memcache_initialized() &&) #endif get_memory_info(pc, NULL, NULL, &prot)) prot &= ~PROT_WRITE; ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size); /* inc stats before making unwritable, in case messing w/ data segment */ STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, size / PAGE_SIZE); #ifdef IA32_ON_IA64 LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n"); LOG(THREAD_GET, LOG_VMAREAS, 3, "attempted make_writable: pc "PFX" -> "PFX"-"PFX"\n", pc, start_page, start_page + prot_size); #else res = mprotect_syscall((void *) start_page, prot_size, prot); LOG(THREAD_GET, LOG_VMAREAS, 3, "make_unwritable: pc "PFX" -> "PFX"-"PFX"\n", pc, start_page, start_page + prot_size); ASSERT(res == 0); # ifndef HAVE_MEMINFO_QUERY /* update all_memory_areas list with the protection change */ if (memcache_initialized()) { memcache_update_locked(start_page, start_page + prot_size, osprot_to_memprot(prot), -1/*type unchanged*/, false/*!exists*/); } # endif #endif } /****************************************************************************/ /* SYSTEM CALLS */ /* SYS_ defines are in /usr/include/bits/syscall.h * numbers used by libc are in /usr/include/asm/unistd.h * kernel defines are in /usr/src/linux-2.4/include/asm-i386/unistd.h * kernel function names are in /usr/src/linux/arch/i386/kernel/entry.S * * For now, we've copied the SYS/NR defines from syscall.h and unistd.h * and put them in our own local syscall.h. */ /* num_raw should be the xax register value. * For a live system call, dcontext_live should be passed (for examining * the dcontext->last_exit and exit_reason flags); otherwise, gateway should * be passed. */ int os_normalized_sysnum(int num_raw, instr_t *gateway, dcontext_t *dcontext) { #ifdef MACOS /* The x64 encoding indicates the syscall type in the top 8 bits. * We drop the 0x2000000 for BSD so we can use the SYS_ enum constants. * That leaves 0x1000000 for Mach and 0x3000000 for Machdep. * On 32-bit, a different encoding is used: we transform that * to the x64 encoding minus BSD. */ int interrupt = 0; int num = 0; if (gateway != NULL) { if (instr_is_interrupt(gateway)) interrupt = instr_get_interrupt_number(gateway); } else { ASSERT(dcontext != NULL); if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) { if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_NI_SYSCALL_INT_0x81) interrupt = 0x81; else { ASSERT(dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_NI_SYSCALL_INT_0x82); interrupt = 0x82; } } } # ifdef X64 if (num_raw >> 24 == 0x2) return (int)(num_raw & 0xffffff); /* Drop BSD bit */ else num = (int) num_raw; /* Keep Mach and Machdep bits */ # else if ((ptr_int_t)num_raw < 0) /* Mach syscall */ return (SYSCALL_NUM_MARKER_MACH | -(int)num_raw); else { /* Bottom 16 bits are the number, top are arg size. */ num = (int)(num_raw & 0xffff); } # endif if (interrupt == 0x81) num |= SYSCALL_NUM_MARKER_MACH; else if (interrupt == 0x82) num |= SYSCALL_NUM_MARKER_MACHDEP; return num; #else return num_raw; #endif } static bool ignorable_system_call_normalized(int num) { switch (num) { #if defined(SYS_exit_group) case SYS_exit_group: #endif case SYS_exit: #ifdef MACOS case SYS_bsdthread_terminate: #endif #ifdef LINUX case SYS_brk: # ifdef SYS_uselib case SYS_uselib: # endif #endif #if defined(X64) || !defined(ARM) case SYS_mmap: #endif #if !defined(X64) && !defined(MACOS) case SYS_mmap2: #endif case SYS_munmap: #ifdef LINUX case SYS_mremap: #endif case SYS_mprotect: #ifdef ANDROID case SYS_prctl: #endif case SYS_execve: #ifdef LINUX case SYS_clone: #elif defined(MACOS) case SYS_bsdthread_create: case SYS_posix_spawn: #endif #ifdef SYS_fork case SYS_fork: #endif #ifdef SYS_vfork case SYS_vfork: #endif case SYS_kill: #if defined(SYS_tkill) case SYS_tkill: #endif #if defined(SYS_tgkill) case SYS_tgkill: #endif #if defined(LINUX) && !defined(X64) && !defined(ARM) case SYS_signal: #endif #ifdef MACOS case SYS_sigsuspend_nocancel: #endif #if !defined(X64) || defined(MACOS) case SYS_sigaction: case SYS_sigsuspend: case SYS_sigpending: case SYS_sigreturn: case SYS_sigprocmask: #endif #ifdef LINUX case SYS_rt_sigreturn: case SYS_rt_sigaction: case SYS_rt_sigprocmask: case SYS_rt_sigpending: case SYS_rt_sigtimedwait: case SYS_rt_sigqueueinfo: case SYS_rt_sigsuspend: #ifdef SYS_signalfd case SYS_signalfd: #endif case SYS_signalfd4: #endif case SYS_sigaltstack: #if defined(LINUX) && !defined(X64) && !defined(ARM) case SYS_sgetmask: case SYS_ssetmask: #endif case SYS_setitimer: case SYS_getitimer: #ifdef MACOS case SYS_close_nocancel: #endif case SYS_close: #ifdef SYS_dup2 case SYS_dup2: #endif #ifdef LINUX case SYS_dup3: #endif #ifdef MACOS case SYS_fcntl_nocancel: #endif case SYS_fcntl: #if defined(X64) || !defined(ARM) case SYS_getrlimit: #endif #if defined(LINUX) && !defined(X64) case SYS_ugetrlimit: #endif case SYS_setrlimit: #ifdef LINUX case SYS_prlimit64: #endif #if defined(LINUX) && defined(X86) /* i#784: app may have behavior relying on SIGALRM */ case SYS_alarm: #endif /* i#107: syscall might change/query app's seg memory * need stop app from clobbering our GDT slot. */ #if defined(LINUX) && defined(X86) && defined(X64) case SYS_arch_prctl: #endif #if defined(LINUX) && defined(X86) case SYS_set_thread_area: case SYS_get_thread_area: /* FIXME: we might add SYS_modify_ldt later. */ #endif #if defined(LINUX) && defined(ARM) /* syscall changes app's thread register */ case SYS_set_tls: case SYS_cacheflush: #endif return false; #ifdef LINUX # ifdef SYS_readlink case SYS_readlink: # endif case SYS_readlinkat: return !DYNAMO_OPTION(early_inject); #endif default: #ifdef VMX86_SERVER if (is_vmkuw_sysnum(num)) return vmkuw_ignorable_system_call(num); #endif return true; } } bool ignorable_system_call(int num_raw, instr_t *gateway, dcontext_t *dcontext_live) { return ignorable_system_call_normalized (os_normalized_sysnum(num_raw, gateway, dcontext_live)); } typedef struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; } mmap_arg_struct_t; #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ const reg_id_t syscall_regparms[MAX_SYSCALL_ARGS] = { #ifdef X86 # ifdef X64 DR_REG_RDI, DR_REG_RSI, DR_REG_RDX, DR_REG_R10, /* RCX goes here in normal x64 calling contention. */ DR_REG_R8, DR_REG_R9 # else DR_REG_EBX, DR_REG_ECX, DR_REG_EDX, DR_REG_ESI, DR_REG_EDI, DR_REG_EBP # endif /* 64/32-bit */ #elif defined(AARCHXX) DR_REG_R0, DR_REG_R1, DR_REG_R2, DR_REG_R3, DR_REG_R4, DR_REG_R5, #endif /* X86/ARM */ }; #ifndef NOT_DYNAMORIO_CORE_PROPER static inline reg_t * sys_param_addr(dcontext_t *dcontext, int num) { /* we force-inline get_mcontext() and so don't take it as a param */ priv_mcontext_t *mc = get_mcontext(dcontext); #if defined(X86) && defined(X64) switch (num) { case 0: return &mc->xdi; case 1: return &mc->xsi; case 2: return &mc->xdx; case 3: return &mc->r10; /* since rcx holds retaddr for syscall instr */ case 4: return &mc->r8; case 5: return &mc->r9; default: CLIENT_ASSERT(false, "invalid system call parameter number"); } #else # ifdef MACOS /* XXX: if we don't end up using dcontext->sys_was_int here, we could * make that field Linux-only. */ /* For 32-bit, the args are passed on the stack, above a retaddr slot * (regardless of whether using a sysenter or int gateway). */ return ((reg_t *)mc->esp) + 1/*retaddr*/ + num; # endif /* even for vsyscall where ecx (syscall) or esp (sysenter) are saved into * ebp, the original parameter registers are not yet changed pre-syscall, * except for ebp, which is pushed on the stack: * 0xffffe400 55 push %ebp %esp -> %esp (%esp) * 0xffffe401 89 cd mov %ecx -> %ebp * 0xffffe403 0f 05 syscall -> %ecx * * 0xffffe400 51 push %ecx %esp -> %esp (%esp) * 0xffffe401 52 push %edx %esp -> %esp (%esp) * 0xffffe402 55 push %ebp %esp -> %esp (%esp) * 0xffffe403 89 e5 mov %esp -> %ebp * 0xffffe405 0f 34 sysenter -> %esp */ switch (num) { case 0: return &mc->IF_X86_ELSE(xbx, r0); case 1: return &mc->IF_X86_ELSE(xcx, r1); case 2: return &mc->IF_X86_ELSE(xdx, r2); case 3: return &mc->IF_X86_ELSE(xsi, r3); case 4: return &mc->IF_X86_ELSE(xdi, r4); /* FIXME: do a safe_read: but what about performance? * See the #if 0 below, as well. */ case 5: return IF_X86_ELSE((dcontext->sys_was_int ? &mc->xbp : ((reg_t*)mc->xsp)), &mc->r5); # ifdef ARM /* AArch32 supposedly has 7 args in some cases. */ case 6: return &mc->r6; # endif default: CLIENT_ASSERT(false, "invalid system call parameter number"); } #endif return 0; } static inline reg_t sys_param(dcontext_t *dcontext, int num) { return *sys_param_addr(dcontext, num); } void set_syscall_param(dcontext_t *dcontext, int param_num, reg_t new_value) { *sys_param_addr(dcontext, param_num) = new_value; } static inline bool syscall_successful(priv_mcontext_t *mc, int normalized_sysnum) { #ifdef MACOS if (TEST(SYSCALL_NUM_MARKER_MACH, normalized_sysnum)) { /* XXX: Mach syscalls vary (for some KERN_SUCCESS=0 is success, * for others that return mach_port_t 0 is failure (I think?). * We defer to drsyscall. */ return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0); } else return !TEST(EFLAGS_CF, mc->eflags); #else if (normalized_sysnum == IF_X64_ELSE(SYS_mmap, SYS_mmap2) || # if !defined(ARM) && !defined(X64) normalized_sysnum == SYS_mmap || # endif normalized_sysnum == SYS_mremap) return mmap_syscall_succeeded((byte *)MCXT_SYSCALL_RES(mc)); return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0); #endif } /* For non-Mac, this does nothing to indicate "success": you can pass -errno. * For Mac, this clears CF and just sets xax. To return a 64-bit value in * 32-bit mode, the caller must explicitly set xdx as well (we don't always * do so b/c syscalls that just return 32-bit values do not touch xdx). */ static inline void set_success_return_val(dcontext_t *dcontext, reg_t val) { /* since always coming from dispatch now, only need to set mcontext */ priv_mcontext_t *mc = get_mcontext(dcontext); #ifdef MACOS /* On MacOS, success is determined by CF, except for Mach syscalls, but * there it doesn't hurt to set CF. */ mc->eflags &= ~(EFLAGS_CF); #endif MCXT_SYSCALL_RES(mc) = val; } /* Always pass a positive value for errno */ static inline void set_failure_return_val(dcontext_t *dcontext, uint errno_val) { priv_mcontext_t *mc = get_mcontext(dcontext); #ifdef MACOS /* On MacOS, success is determined by CF, and errno is positive */ mc->eflags |= EFLAGS_CF; MCXT_SYSCALL_RES(mc) = errno_val; #else MCXT_SYSCALL_RES(mc) = -(int)errno_val; #endif } #ifdef CLIENT_INTERFACE DR_API reg_t dr_syscall_get_param(void *drcontext, int param_num) { dcontext_t *dcontext = (dcontext_t *) drcontext; CLIENT_ASSERT(dcontext->client_data->in_pre_syscall, "dr_syscall_get_param() can only be called from pre-syscall event"); return sys_param(dcontext, param_num); } DR_API void dr_syscall_set_param(void *drcontext, int param_num, reg_t new_value) { dcontext_t *dcontext = (dcontext_t *) drcontext; CLIENT_ASSERT(dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall, "dr_syscall_set_param() can only be called from a syscall event"); *sys_param_addr(dcontext, param_num) = new_value; } DR_API reg_t dr_syscall_get_result(void *drcontext) { dcontext_t *dcontext = (dcontext_t *) drcontext; CLIENT_ASSERT(dcontext->client_data->in_post_syscall, "dr_syscall_get_param() can only be called from post-syscall event"); return MCXT_SYSCALL_RES(get_mcontext(dcontext)); } DR_API bool dr_syscall_get_result_ex(void *drcontext, dr_syscall_result_info_t *info INOUT) { dcontext_t *dcontext = (dcontext_t *) drcontext; priv_mcontext_t *mc = get_mcontext(dcontext); CLIENT_ASSERT(dcontext->client_data->in_post_syscall, "only call dr_syscall_get_param_ex() from post-syscall event"); CLIENT_ASSERT(info != NULL, "invalid parameter"); CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size"); if (info->size != sizeof(*info)) return false; info->value = MCXT_SYSCALL_RES(mc); info->succeeded = syscall_successful(mc, dcontext->sys_num); if (info->use_high) { /* MacOS has some 32-bit syscalls that return 64-bit values in * xdx:xax, but the other syscalls don't clear xdx, so we can't easily * return a 64-bit value all the time. */ IF_X86_ELSE({ info->high = mc->xdx; }, { ASSERT_NOT_REACHED(); }); } if (info->use_errno) { if (info->succeeded) info->errno_value = 0; else { info->errno_value = (uint)IF_LINUX(-(int))MCXT_SYSCALL_RES(mc); } } return true; } DR_API void dr_syscall_set_result(void *drcontext, reg_t value) { dcontext_t *dcontext = (dcontext_t *) drcontext; CLIENT_ASSERT(dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall, "dr_syscall_set_result() can only be called from a syscall event"); /* For non-Mac, the caller can still pass -errno and this will work */ set_success_return_val(dcontext, value); } DR_API bool dr_syscall_set_result_ex(void *drcontext, dr_syscall_result_info_t *info) { dcontext_t *dcontext = (dcontext_t *) drcontext; priv_mcontext_t *mc = get_mcontext(dcontext); CLIENT_ASSERT(dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall, "dr_syscall_set_result() can only be called from a syscall event"); CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size"); if (info->size != sizeof(*info)) return false; if (info->use_errno) { if (info->succeeded) { /* a weird case but we let the user combine these */ set_success_return_val(dcontext, info->errno_value); } else set_failure_return_val(dcontext, info->errno_value); } else { if (info->succeeded) set_success_return_val(dcontext, info->value); else { /* use this to set CF, even though it might negate the value */ set_failure_return_val(dcontext, (uint)info->value); /* now set the value, overriding set_failure_return_val() */ MCXT_SYSCALL_RES(mc) = info->value; } if (info->use_high) { /* MacOS has some 32-bit syscalls that return 64-bit values in * xdx:xax. */ IF_X86_ELSE({ mc->xdx = info->high; }, { ASSERT_NOT_REACHED(); }); } } return true; } DR_API void dr_syscall_set_sysnum(void *drcontext, int new_num) { dcontext_t *dcontext = (dcontext_t *) drcontext; priv_mcontext_t *mc = get_mcontext(dcontext); CLIENT_ASSERT(dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall, "dr_syscall_set_sysnum() can only be called from a syscall event"); MCXT_SYSNUM_REG(mc) = new_num; } DR_API void dr_syscall_invoke_another(void *drcontext) { dcontext_t *dcontext = (dcontext_t *) drcontext; CLIENT_ASSERT(dcontext->client_data->in_post_syscall, "dr_syscall_invoke_another() can only be called from post-syscall event"); LOG(THREAD, LOG_SYSCALLS, 2, "invoking additional syscall on client request\n"); dcontext->client_data->invoke_another_syscall = true; # ifdef X86 if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) { priv_mcontext_t *mc = get_mcontext(dcontext); /* restore xbp to xsp */ mc->xbp = mc->xsp; } # endif /* X86 */ /* for x64 we don't need to copy xcx into r10 b/c we use r10 as our param */ } #endif /* CLIENT_INTERFACE */ static inline bool is_thread_create_syscall_helper(ptr_uint_t sysnum, ptr_uint_t flags) { #ifdef MACOS /* XXX i#1403: we need earlier injection to intercept * bsdthread_register in order to capture workqueue threads. */ return (sysnum == SYS_bsdthread_create || sysnum == SYS_vfork); #else # ifdef SYS_vfork if (sysnum == SYS_vfork) return true; # endif # ifdef LINUX if (sysnum == SYS_clone && TEST(CLONE_VM, flags)) return true; # endif return false; #endif } bool is_thread_create_syscall(dcontext_t *dcontext) { priv_mcontext_t *mc = get_mcontext(dcontext); return is_thread_create_syscall_helper(MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0)); } bool was_thread_create_syscall(dcontext_t *dcontext) { return is_thread_create_syscall_helper(dcontext->sys_num, /* flags in param0 */ dcontext->sys_param0); } static inline bool is_sigreturn_syscall_helper(int sysnum) { #ifdef MACOS return sysnum == SYS_sigreturn; #else return (IF_NOT_X64(sysnum == SYS_sigreturn ||) sysnum == SYS_rt_sigreturn); #endif } bool is_sigreturn_syscall(dcontext_t *dcontext) { priv_mcontext_t *mc = get_mcontext(dcontext); return is_sigreturn_syscall_helper(MCXT_SYSNUM_REG(mc)); } bool was_sigreturn_syscall(dcontext_t *dcontext) { return is_sigreturn_syscall_helper(dcontext->sys_num); } /* process a signal this process/thread is sending to itself */ static void handle_self_signal(dcontext_t *dcontext, uint sig) { /* FIXME PR 297903: watch for all DEFAULT_TERMINATE signals, * and for any thread in the group, not just self. * * FIXME PR 297033: watch for SIGSTOP and SIGCONT. * * With -intercept_all_signals, we only need to watch for SIGKILL * and SIGSTOP here, and we avoid the FIXMEs below. If it's fine * for DR not to clean up on a SIGKILL, then SIGSTOP is all that's * left (at least once we have PR 297033 and are intercepting the * various STOP variations and CONT). */ if (sig == SIGABRT && !DYNAMO_OPTION(intercept_all_signals)) { LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 1, "thread "TIDFMT" sending itself a SIGABRT\n", get_thread_id()); KSTOP(num_exits_dir_syscall); /* FIXME: need to check whether app has a handler for SIGABRT! */ /* FIXME PR 211180/6723: this will do SYS_exit rather than the SIGABRT. * Should do set_default_signal_action(SIGABRT) (and set a flag so * no races w/ another thread re-installing?) and then SYS_kill. */ cleanup_and_terminate(dcontext, SYSNUM_EXIT_THREAD, -1, 0, (is_last_app_thread() && !dynamo_exited), IF_MACOS_ELSE(dcontext->thread_port, 0), 0); ASSERT_NOT_REACHED(); } } /*************************************************************************** * EXECVE */ /* when adding here, also add to the switch in handle_execve if necessary */ enum { ENV_PROP_RUNUNDER, ENV_PROP_OPTIONS, ENV_PROP_EXECVE_LOGDIR, ENV_PROP_EXE_PATH, ENV_PROP_CONFIGDIR, }; static const char * const env_to_propagate[] = { /* these must line up with the enum */ DYNAMORIO_VAR_RUNUNDER, DYNAMORIO_VAR_OPTIONS, /* DYNAMORIO_VAR_EXECVE_LOGDIR is different from DYNAMORIO_VAR_LOGDIR: * - DYNAMORIO_VAR_LOGDIR: a parent dir inside which a new dir will be created; * - DYNAMORIO_VAR_EXECVE_LOGDIR: the same subdir with the pre-execve process. * Xref comment in create_log_dir about their precedence. */ DYNAMORIO_VAR_EXECVE_LOGDIR, /* i#909: needed for early injection */ DYNAMORIO_VAR_EXE_PATH, /* these will only be propagated if they exist */ DYNAMORIO_VAR_CONFIGDIR, }; #define NUM_ENV_TO_PROPAGATE (sizeof(env_to_propagate)/sizeof(env_to_propagate[0])) /* Called at pre-SYS_execve to append DR vars in the target process env vars list. * For late injection via libdrpreload, we call this for *all children, because * even if -no_follow_children is specified, a whitelist will still ask for takeover * and it's libdrpreload who checks the whitelist. * For -early, however, we check the config ahead of time and only call this routine * if we in fact want to inject. * XXX i#1679: these parent vs child differences bring up corner cases of which * config dir takes precedence (if the child clears the HOME env var, e.g.). */ static void add_dr_env_vars(dcontext_t *dcontext, char *inject_library_path, const char *app_path) { char **envp = (char **) sys_param(dcontext, 2); int idx, j, preload = -1, ldpath = -1; int num_old, num_new, sz; bool need_var[NUM_ENV_TO_PROPAGATE]; int prop_idx[NUM_ENV_TO_PROPAGATE]; bool ldpath_us = false, preload_us = false; char **new_envp, *var, *old; /* check if any var needs to be propagated */ for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) { prop_idx[j] = -1; if (get_config_val(env_to_propagate[j]) == NULL) need_var[j] = false; else need_var[j] = true; } /* Special handling for DYNAMORIO_VAR_EXECVE_LOGDIR: * we only need it if follow_children is true and PROCESS_DIR exists. */ if (DYNAMO_OPTION(follow_children) && get_log_dir(PROCESS_DIR, NULL, NULL)) need_var[ENV_PROP_EXECVE_LOGDIR] = true; else need_var[ENV_PROP_EXECVE_LOGDIR] = false; if (DYNAMO_OPTION(early_inject)) need_var[ENV_PROP_EXE_PATH] = true; /* iterate the env in target process */ if (envp == NULL) { LOG(THREAD, LOG_SYSCALLS, 3, "\tenv is NULL\n"); idx = 0; } else { for (idx = 0; envp[idx] != NULL; idx++) { /* execve env vars should never be set here */ ASSERT(strstr(envp[idx], DYNAMORIO_VAR_EXECVE) != envp[idx]); for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) { if (strstr(envp[idx], env_to_propagate[j]) == envp[idx]) { /* If conflict between env and cfg, we assume those env vars * are for DR usage only, and replace them with cfg value. */ prop_idx[j] = idx; /* remember the index for replacing later */ break; } } if (!DYNAMO_OPTION(early_inject) && strstr(envp[idx], "LD_LIBRARY_PATH=") == envp[idx]) { ldpath = idx; if (strstr(envp[idx], inject_library_path) != NULL) ldpath_us = true; } if (!DYNAMO_OPTION(early_inject) && strstr(envp[idx], "LD_PRELOAD=") == envp[idx]) { preload = idx; if (strstr(envp[idx], DYNAMORIO_PRELOAD_NAME) != NULL && strstr(envp[idx], DYNAMORIO_LIBRARY_NAME) != NULL) { preload_us = true; } } LOG(THREAD, LOG_SYSCALLS, 3, "\tenv %d: %s\n", idx, envp[idx]); } } /* We want to add new env vars, so we create a new envp * array. We have to deallocate them and restore the old * envp if execve fails; if execve succeeds, the address * space is reset so we don't need to do anything. */ num_old = idx; /* how many new env vars we need add */ num_new = 2 + /* execve indicator var plus final NULL */ (DYNAMO_OPTION(early_inject) ? 0 : (((preload<0) ? 1 : 0) + ((ldpath<0) ? 1 : 0))); for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) { if ((DYNAMO_OPTION(follow_children) || j == ENV_PROP_EXE_PATH) && need_var[j] && prop_idx[j] < 0) num_new++; } /* setup new envp */ new_envp = heap_alloc(dcontext, sizeof(char*)*(num_old+num_new) HEAPACCT(ACCT_OTHER)); /* copy old envp */ memcpy(new_envp, envp, sizeof(char*)*num_old); /* change/add preload and ldpath if necessary */ if (!DYNAMO_OPTION(early_inject) && !preload_us) { int idx_preload; LOG(THREAD, LOG_SYSCALLS, 1, "WARNING: execve env does NOT preload DynamoRIO, forcing it!\n"); if (preload >= 0) { /* replace the existing preload */ sz = strlen(envp[preload]) + strlen(DYNAMORIO_PRELOAD_NAME)+ strlen(DYNAMORIO_LIBRARY_NAME) + 3; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); old = envp[preload] + strlen("LD_PRELOAD="); snprintf(var, sz, "LD_PRELOAD=%s %s %s", DYNAMORIO_PRELOAD_NAME, DYNAMORIO_LIBRARY_NAME, old); idx_preload = preload; } else { /* add new preload */ sz = strlen("LD_PRELOAD=") + strlen(DYNAMORIO_PRELOAD_NAME) + strlen(DYNAMORIO_LIBRARY_NAME) + 2; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); snprintf(var, sz, "LD_PRELOAD=%s %s", DYNAMORIO_PRELOAD_NAME, DYNAMORIO_LIBRARY_NAME); idx_preload = idx++; } *(var+sz-1) = '\0'; /* null terminate */ new_envp[idx_preload] = var; LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_preload, new_envp[idx_preload]); } if (!DYNAMO_OPTION(early_inject) && !ldpath_us) { int idx_ldpath; if (ldpath >= 0) { sz = strlen(envp[ldpath]) + strlen(inject_library_path) + 2; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); old = envp[ldpath] + strlen("LD_LIBRARY_PATH="); snprintf(var, sz, "LD_LIBRARY_PATH=%s:%s", inject_library_path, old); idx_ldpath = ldpath; } else { sz = strlen("LD_LIBRARY_PATH=") + strlen(inject_library_path) + 1; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); snprintf(var, sz, "LD_LIBRARY_PATH=%s", inject_library_path); idx_ldpath = idx++; } *(var+sz-1) = '\0'; /* null terminate */ new_envp[idx_ldpath] = var; LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_ldpath, new_envp[idx_ldpath]); } /* propagating DR env vars */ for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) { const char *val = ""; if (!need_var[j]) continue; if (!DYNAMO_OPTION(follow_children) && j != ENV_PROP_EXE_PATH) continue; switch (j) { case ENV_PROP_RUNUNDER: ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_RUNUNDER) == 0); /* Must pass RUNUNDER_ALL to get child injected if has no app config. * If rununder var is already set we assume it's set to 1. */ ASSERT((RUNUNDER_ON | RUNUNDER_ALL) == 0x3); /* else, update "3" */ val = "3"; break; case ENV_PROP_OPTIONS: ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_OPTIONS) == 0); val = option_string; break; case ENV_PROP_EXECVE_LOGDIR: /* we use PROCESS_DIR for DYNAMORIO_VAR_EXECVE_LOGDIR */ ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXECVE_LOGDIR) == 0); ASSERT(get_log_dir(PROCESS_DIR, NULL, NULL)); break; case ENV_PROP_EXE_PATH: ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXE_PATH) == 0); val = app_path; break; default: val = getenv(env_to_propagate[j]); if (val == NULL) val = ""; break; } if (j == ENV_PROP_EXECVE_LOGDIR) { uint logdir_length; get_log_dir(PROCESS_DIR, NULL, &logdir_length); /* logdir_length includes the terminating NULL */ sz = strlen(DYNAMORIO_VAR_EXECVE_LOGDIR) + logdir_length + 1/* '=' */; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); snprintf(var, sz, "%s=", DYNAMORIO_VAR_EXECVE_LOGDIR); get_log_dir(PROCESS_DIR, var+strlen(var), &logdir_length); } else { sz = strlen(env_to_propagate[j]) + strlen(val) + 2 /* '=' + null */; var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); snprintf(var, sz, "%s=%s", env_to_propagate[j], val); } *(var+sz-1) = '\0'; /* null terminate */ prop_idx[j] = (prop_idx[j] >= 0) ? prop_idx[j] : idx++; new_envp[prop_idx[j]] = var; LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", prop_idx[j], new_envp[prop_idx[j]]); } if (!DYNAMO_OPTION(follow_children) && !DYNAMO_OPTION(early_inject)) { if (prop_idx[ENV_PROP_RUNUNDER] >= 0) { /* disable auto-following of this execve, yet still allow preload * on other side to inject if config file exists. * kind of hacky mangle here: */ ASSERT(!need_var[ENV_PROP_RUNUNDER]); ASSERT(new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] == 'D'); new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] = 'X'; } } sz = strlen(DYNAMORIO_VAR_EXECVE) + 4; /* we always pass this var to indicate "post-execve" */ var = heap_alloc(dcontext, sizeof(char)*sz HEAPACCT(ACCT_OTHER)); /* PR 458917: we overload this to also pass our gdt index */ ASSERT(os_tls_get_gdt_index(dcontext) < 100 && os_tls_get_gdt_index(dcontext) >= -1); /* only 2 chars allocated */ snprintf(var, sz, "%s=%02d", DYNAMORIO_VAR_EXECVE, os_tls_get_gdt_index(dcontext)); *(var+sz-1) = '\0'; /* null terminate */ new_envp[idx++] = var; LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx-1, new_envp[idx-1]); /* must end with NULL */ new_envp[idx++] = NULL; ASSERT((num_new + num_old) == idx); /* update syscall param */ *sys_param_addr(dcontext, 2) = (reg_t) new_envp; /* OUT */ /* store for reset in case execve fails, and for cleanup if * this is a vfork thread */ dcontext->sys_param0 = (reg_t) envp; dcontext->sys_param1 = (reg_t) new_envp; } static ssize_t script_file_reader(const char *pathname, void *buf, size_t count) { /* FIXME i#2090: Check file is executable. */ file_t file = os_open(pathname, OS_OPEN_READ); size_t len; if (file == INVALID_FILE) return -1; len = os_read(file, buf, count); os_close(file); return len; } /* For early injection, recognise when the executable is a script ("#!") and * modify the syscall parameters to invoke a script interpreter instead. In * this case we will have allocated memory here but we expect the caller to * do a non-failing execve of libdynamorio.so and therefore not to have to * free the memory. That is one reason for checking that the (final) script * interpreter really is an executable binary. * We recognise one error case here and return the non-zero error code (ELOOP) * but in other cases we leave it up to the caller to detect the error, which * it may do by attempting to exec the path natively, expecting this to fail, * though there is the obvious danger that the file might have been modified * just before the exec. * We do not, and cannot easily, handle a file that is executable but not * readable. Currently such files will be executed without DynamoRIO though * in some situations it would be more helpful to stop with an error. * * XXX: There is a minor transparency bug with misformed binaries. For example, * execve can return EINVAL if the ELF executable has more than one PT_INTERP * segment but we do not check this and so under DynamoRIO the error would be * detected only after the exec, if we are following the child. * * FIXME i#2091: There is a memory leak if a script is recognised, and it is * later decided not to inject (see where should_inject is set), and the exec * fails, because in this case there is no mechanism for freeing the memory * allocated in this function. This function should return sufficient information * for the caller to free the memory, which it can do so before the exec if it * reverts to the original syscall arguments and execs the script. */ static int handle_execve_script(dcontext_t *dcontext) { char *fname = (char *)sys_param(dcontext, 0); char **orig_argv = (char **)sys_param(dcontext, 1); script_interpreter_t *script; int ret = 0; script = global_heap_alloc(sizeof(*script) HEAPACCT(ACCT_OTHER)); if (!find_script_interpreter(script, fname, script_file_reader)) goto free_and_return; if (script->argc == 0) { ret = ELOOP; goto free_and_return; } /* Check that the final interpreter is an executable binary. */ { file_t file = os_open(script->argv[0], OS_OPEN_READ); bool is64; if (file == INVALID_FILE) goto free_and_return; if (!module_file_is_module64(file, &is64, NULL)) { os_close(file); goto free_and_return; } } { size_t i, orig_argc = 0; char **new_argv; /* Concatenate new arguments and original arguments. */ while (orig_argv[orig_argc] != NULL) ++orig_argc; if (orig_argc == 0) orig_argc = 1; new_argv = global_heap_alloc((script->argc + orig_argc + 1) * sizeof(char *) HEAPACCT(ACCT_OTHER)); for (i = 0; i < script->argc; i++) new_argv[i] = script->argv[i]; new_argv[script->argc] = fname; /* replaces orig_argv[0] */ for (i = 1; i < orig_argc; i++) new_argv[script->argc + i] = orig_argv[i]; new_argv[script->argc + orig_argc] = NULL; /* Modify syscall parameters. */ *sys_param_addr(dcontext, 0) = (reg_t)new_argv[0]; *sys_param_addr(dcontext, 1) = (reg_t)new_argv; } return 0; free_and_return: global_heap_free(script, sizeof(*script) HEAPACCT(ACCT_OTHER)); return ret; } static int handle_execve(dcontext_t *dcontext) { /* in /usr/src/linux/arch/i386/kernel/process.c: * asmlinkage int sys_execve(struct pt_regs regs) { ... * error = do_execve(filename, (char **) regs.xcx, (char **) regs.xdx, &regs); * in fs/exec.c: * int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs) */ /* We need to make sure we get injected into the new image: * we simply make sure LD_PRELOAD contains us, and that our directory * is on LD_LIBRARY_PATH (seems not to work to put absolute paths in * LD_PRELOAD). * FIXME: this doesn't work for setuid programs * * For -follow_children we also pass the current DYNAMORIO_RUNUNDER and * DYNAMORIO_OPTIONS and logdir to the new image to support a simple * run-all-children model without bothering w/ setting up config files for * children, and to support injecting across execve that does not * preserve $HOME. * FIXME i#287/PR 546544: we'll need to propagate DYNAMORIO_AUTOINJECT too * once we use it in preload */ /* FIXME i#191: supposed to preserve things like pending signal * set across execve: going to ignore for now */ char *fname; bool x64 = IF_X64_ELSE(true, false); bool expect_to_fail = false; bool should_inject; file_t file; char *inject_library_path; char rununder_buf[16]; /* just an integer printed in ascii */ bool app_specific, from_env, rununder_on; #if defined(LINUX) || defined(DEBUG) const char **argv; #endif if (DYNAMO_OPTION(follow_children) && DYNAMO_OPTION(early_inject)) { int ret = handle_execve_script(dcontext); if (ret != 0) return ret; } fname = (char *)sys_param(dcontext, 0); #if defined(LINUX) || defined(DEBUG) argv = (const char **)sys_param(dcontext, 1); #endif #ifdef LINUX if (DYNAMO_OPTION(early_inject) && symlink_is_self_exe(fname)) { /* i#907: /proc/self/exe points at libdynamorio.so. Make sure we run * the right thing here. */ fname = get_application_name(); } #endif LOG(GLOBAL, LOG_ALL, 1, "\n---------------------------------------------------------------------------\n"); LOG(THREAD, LOG_ALL, 1, "\n---------------------------------------------------------------------------\n"); DODEBUG({ int i; SYSLOG_INTERNAL_INFO("-- execve %s --", fname); LOG(THREAD, LOG_SYSCALLS, 1, "syscall: execve %s\n", fname); LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 1, "execve %s\n", fname); if (stats->loglevel >= 3) { if (argv == NULL) { LOG(THREAD, LOG_SYSCALLS, 3, "\targs are NULL\n"); } else { for (i = 0; argv[i] != NULL; i++) { LOG(THREAD, LOG_SYSCALLS, 2, "\targ %d: len=%d\n", i, strlen(argv[i])); LOG(THREAD, LOG_SYSCALLS, 3, "\targ %d: %s\n", i, argv[i]); } } } }); /* i#237/PR 498284: if we're a vfork "thread" we're really in a different * process and if we exec then the parent process will still be alive. We * can't easily clean our own state (dcontext, dstack, etc.) up in our * parent process: we need it to invoke the syscall and the syscall might * fail. We could expand cleanup_and_terminate to also be able to invoke * SYS_execve: but execve seems more likely to fail than termination * syscalls. Our solution is to mark this thread as "execve" and hide it * from regular thread queries; we clean it up in the process-exiting * synch_with_thread(), or if the same parent thread performs another vfork * (to prevent heap accumulation from repeated vfork+execve). Since vfork * on linux suspends the parent, there cannot be any races with the execve * syscall completing: there can't even be peer vfork threads, so we could * set a flag and clean up in dispatch, but that seems overkill. (If vfork * didn't suspend the parent we'd need to touch a marker file or something * to know the execve was finished.) */ mark_thread_execve(dcontext->thread_record, true); #ifdef STATIC_LIBRARY /* no way we can inject, we just lose control */ SYSLOG_INTERNAL_WARNING("WARNING: static DynamoRIO library, losing control on execve"); return 0; #endif /* Issue 20: handle cross-architecture execve */ /* Xref alternate solution i#145: use dual paths on * LD_LIBRARY_PATH to solve cross-arch execve */ file = os_open(fname, OS_OPEN_READ); if (file != INVALID_FILE) { if (!module_file_is_module64(file, &x64, NULL/*only care about primary==execve*/)) expect_to_fail = true; os_close(file); } else expect_to_fail = true; inject_library_path = IF_X64_ELSE(x64, !x64) ? dynamorio_library_path : dynamorio_alt_arch_path; should_inject = DYNAMO_OPTION(follow_children); if (get_config_val_other_app(get_short_name(fname), get_process_id(), x64 ? DR_PLATFORM_64BIT : DR_PLATFORM_32BIT, DYNAMORIO_VAR_RUNUNDER, rununder_buf, BUFFER_SIZE_ELEMENTS(rununder_buf), &app_specific, &from_env, NULL /* 1config is ok */)) { if (should_inject_from_rununder(rununder_buf, app_specific, from_env, &rununder_on)) should_inject = rununder_on; } if (should_inject) add_dr_env_vars(dcontext, inject_library_path, fname); else { dcontext->sys_param0 = 0; dcontext->sys_param1 = 0; } #ifdef LINUX /* We have to be accurate with expect_to_fail as we cannot come back * and fail the syscall once the kernel execs DR! */ if (should_inject && DYNAMO_OPTION(early_inject) && !expect_to_fail) { /* i#909: change the target image to libdynamorio.so */ const char *drpath = IF_X64_ELSE(x64, !x64) ? dynamorio_library_filepath : dynamorio_alt_arch_filepath; TRY_EXCEPT(dcontext, /* try */ { if (symlink_is_self_exe(argv[0])) { /* we're out of sys_param entries so we assume argv[0] == fname */ dcontext->sys_param3 = (reg_t) argv; argv[0] = fname; /* XXX: handle readable but not writable! */ } else dcontext->sys_param3 = 0; /* no restore in post */ dcontext->sys_param4 = (reg_t) fname; /* store for restore in post */ *sys_param_addr(dcontext, 0) = (reg_t) drpath; LOG(THREAD, LOG_SYSCALLS, 2, "actual execve on: %s\n", (char *)sys_param(dcontext, 0)); }, /* except */ { dcontext->sys_param3 = 0; /* no restore in post */ dcontext->sys_param4 = 0; /* no restore in post */ LOG(THREAD, LOG_SYSCALLS, 2, "argv is unreadable, expect execve to fail\n"); }); } else { dcontext->sys_param3 = 0; /* no restore in post */ dcontext->sys_param4 = 0; /* no restore in post */ } #endif /* we need to clean up the .1config file here. if the execve fails, * we'll just live w/o dynamic option re-read. */ config_exit(); return 0; } static void handle_execve_post(dcontext_t *dcontext) { /* if we get here it means execve failed (doesn't return on success), * or we did an execve from a vfork and its memory changes are visible * in the parent process. * we have to restore env to how it was and free the allocated heap. */ char **old_envp = (char **) dcontext->sys_param0; char **new_envp = (char **) dcontext->sys_param1; #ifdef STATIC_LIBRARY /* nothing to clean up */ return; #endif #ifdef LINUX if (dcontext->sys_param4 != 0) { /* restore original /proc/.../exe */ *sys_param_addr(dcontext, 0) = dcontext->sys_param4; if (dcontext->sys_param3 != 0) { /* restore original argv[0] */ const char **argv = (const char **) dcontext->sys_param3; argv[0] = (const char *) dcontext->sys_param4; } } #endif if (new_envp != NULL) { int i; LOG(THREAD, LOG_SYSCALLS, 2, "\tcleaning up our env vars\n"); /* we replaced existing ones and/or added new ones. * we can't compare to old_envp b/c it may have changed by now. */ for (i=0; new_envp[i] != NULL; i++) { if (is_dynamo_address((byte *)new_envp[i])) { heap_free(dcontext, new_envp[i], sizeof(char)*(strlen(new_envp[i])+1) HEAPACCT(ACCT_OTHER)); } } i++; /* need to de-allocate final null slot too */ heap_free(dcontext, new_envp, sizeof(char*)*i HEAPACCT(ACCT_OTHER)); /* restore prev envp if we're post-syscall */ if (!dcontext->thread_record->execve) *sys_param_addr(dcontext, 2) = (reg_t) old_envp; } } /* i#237/PR 498284: to avoid accumulation of thread state we clean up a vfork * child who invoked execve here so we have at most one outstanding thread. we * also clean up at process exit and before thread creation. we could do this * in dispatch but too rare to be worth a flag check there. */ static void cleanup_after_vfork_execve(dcontext_t *dcontext) { thread_record_t **threads; int num_threads, i; if (num_execve_threads == 0) return; mutex_lock(&thread_initexit_lock); get_list_of_threads_ex(&threads, &num_threads, true/*include execve*/); for (i=0; i<num_threads; i++) { if (threads[i]->execve) { LOG(THREAD, LOG_SYSCALLS, 2, "cleaning up earlier vfork thread "TIDFMT"\n", threads[i]->id); dynamo_other_thread_exit(threads[i]); } } mutex_unlock(&thread_initexit_lock); global_heap_free(threads, num_threads*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); } /* returns whether to execute syscall */ static bool handle_close_pre(dcontext_t *dcontext) { /* in fs/open.c: asmlinkage long sys_close(unsigned int fd) */ uint fd = (uint) sys_param(dcontext, 0); LOG(THREAD, LOG_SYSCALLS, 3, "syscall: close fd %d\n", fd); /* prevent app from closing our files */ if (fd_is_dr_owned(fd)) { SYSLOG_INTERNAL_WARNING_ONCE("app trying to close DR file(s)"); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app trying to close DR file %d! Not allowing it.\n", fd); if (DYNAMO_OPTION(fail_on_stolen_fds)) { set_failure_return_val(dcontext, EBADF); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); } else set_success_return_val(dcontext, 0); return false; /* do not execute syscall */ } /* Xref PR 258731 - duplicate STDOUT/STDERR when app closes them so we (or * a client) can continue to use them for logging. */ if (DYNAMO_OPTION(dup_stdout_on_close) && fd == STDOUT) { our_stdout = fd_priv_dup(fd); if (our_stdout < 0) /* no private fd available */ our_stdout = dup_syscall(fd); if (our_stdout >= 0) fd_mark_close_on_exec(our_stdout); fd_table_add(our_stdout, 0); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app is closing stdout=%d - duplicating descriptor for " "DynamoRIO usage got %d.\n", fd, our_stdout); if (privmod_stdout != NULL && IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { /* update the privately loaded libc's stdout _fileno. */ (*privmod_stdout)->STDFILE_FILENO = our_stdout; } } if (DYNAMO_OPTION(dup_stderr_on_close) && fd == STDERR) { our_stderr = fd_priv_dup(fd); if (our_stderr < 0) /* no private fd available */ our_stderr = dup_syscall(fd); if (our_stderr >= 0) fd_mark_close_on_exec(our_stderr); fd_table_add(our_stderr, 0); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app is closing stderr=%d - duplicating descriptor for " "DynamoRIO usage got %d.\n", fd, our_stderr); if (privmod_stderr != NULL && IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { /* update the privately loaded libc's stderr _fileno. */ (*privmod_stderr)->STDFILE_FILENO = our_stderr; } } if (DYNAMO_OPTION(dup_stdin_on_close) && fd == STDIN) { our_stdin = fd_priv_dup(fd); if (our_stdin < 0) /* no private fd available */ our_stdin = dup_syscall(fd); if (our_stdin >= 0) fd_mark_close_on_exec(our_stdin); fd_table_add(our_stdin, 0); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app is closing stdin=%d - duplicating descriptor for " "DynamoRIO usage got %d.\n", fd, our_stdin); if (privmod_stdin != NULL && IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { /* update the privately loaded libc's stdout _fileno. */ (*privmod_stdin)->STDFILE_FILENO = our_stdin; } } return true; } /***************************************************************************/ /* Used to obtain the pc of the syscall instr itself when the dcontext dc * is currently in a syscall handler. * Alternatively for sysenter we could set app_sysenter_instr_addr for Linux. */ #define SYSCALL_PC(dc) \ ((get_syscall_method() == SYSCALL_METHOD_INT || \ get_syscall_method() == SYSCALL_METHOD_SYSCALL) ? \ (ASSERT(SYSCALL_LENGTH == INT_LENGTH), \ POST_SYSCALL_PC(dc) - INT_LENGTH) : \ (vsyscall_syscall_end_pc - SYSENTER_LENGTH)) static void handle_exit(dcontext_t *dcontext) { priv_mcontext_t *mc = get_mcontext(dcontext); bool exit_process = false; if (dcontext->sys_num == SYSNUM_EXIT_PROCESS) { /* We can have multiple thread groups within the same address space. * We need to know whether this is the only group left. * FIXME: we can have races where new threads are created after our * check: we'll live with that for now, but the right approach is to * suspend all threads via synch_with_all_threads(), do the check, * and if exit_process then exit w/o resuming: though have to * coordinate lock access w/ cleanup_and_terminate. * Xref i#94. Xref PR 541760. */ process_id_t mypid = get_process_id(); thread_record_t **threads; int num_threads, i; exit_process = true; mutex_lock(&thread_initexit_lock); get_list_of_threads(&threads, &num_threads); for (i=0; i<num_threads; i++) { if (threads[i]->pid != mypid && !IS_CLIENT_THREAD(threads[i]->dcontext)) { exit_process = false; break; } } if (!exit_process) { /* We need to clean up the other threads in our group here. */ thread_id_t myid = get_thread_id(); priv_mcontext_t mcontext; DEBUG_DECLARE(thread_synch_result_t synch_res;) LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "SYS_exit_group %d not final group: %d cleaning up just " "threads in group\n", get_process_id(), get_thread_id()); /* Set where we are to handle reciprocal syncs */ copy_mcontext(mc, &mcontext); mc->pc = SYSCALL_PC(dcontext); for (i=0; i<num_threads; i++) { if (threads[i]->id != myid && threads[i]->pid == mypid) { /* See comments in dynamo_process_exit_cleanup(): we terminate * to make cleanup easier, but may want to switch to shifting * the target thread to a stack-free loop. */ DEBUG_DECLARE(synch_res =) synch_with_thread(threads[i]->id, true/*block*/, true/*have initexit lock*/, THREAD_SYNCH_VALID_MCONTEXT, THREAD_SYNCH_TERMINATED_AND_CLEANED, THREAD_SYNCH_SUSPEND_FAILURE_IGNORE); /* initexit lock may be released and re-acquired in course of * doing the synch so we may have races where the thread * exits on its own (or new threads appear): we'll live * with those for now. */ ASSERT(synch_res == THREAD_SYNCH_RESULT_SUCCESS); } } copy_mcontext(&mcontext, mc); } mutex_unlock(&thread_initexit_lock); global_heap_free(threads, num_threads*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); } if (is_last_app_thread() && !dynamo_exited) { LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "SYS_exit%s(%d) in final thread "TIDFMT" of "PIDFMT" => exiting DynamoRIO\n", (dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "", MCXT_SYSNUM_REG(mc), get_thread_id(), get_process_id()); /* we want to clean up even if not automatic startup! */ automatic_startup = true; exit_process = true; } else { LOG(THREAD, LOG_TOP|LOG_THREADS|LOG_SYSCALLS, 1, "SYS_exit%s(%d) in thread "TIDFMT" of "PIDFMT" => cleaning up %s\n", (dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "", MCXT_SYSNUM_REG(mc), get_thread_id(), get_process_id(), exit_process ? "process" : "thread"); } KSTOP(num_exits_dir_syscall); cleanup_and_terminate(dcontext, MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0), sys_param(dcontext, 1), exit_process, /* SYS_bsdthread_terminate has 2 more args */ sys_param(dcontext, 2), sys_param(dcontext, 3)); } #if defined(LINUX) && defined(X86) /* XXX i#58: just until we have Mac support */ static bool os_set_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc) { #ifdef X86 int i; os_thread_data_t *ostd = dcontext->os_field; our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas; if (user_desc->seg_not_present == 1) { /* find an empty one to update */ for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) { if (desc[i].seg_not_present == 1) break; } if (i < GDT_NUM_TLS_SLOTS) { user_desc->entry_number = GDT_SELECTOR(i + tls_min_index()); memcpy(&desc[i], user_desc, sizeof(*user_desc)); } else return false; } else { /* If we used early injection, this might be ld.so trying to set up TLS. We * direct the app to use the GDT entry we already set up for our private * libraries, but only the first time it requests TLS. */ if (user_desc->entry_number == -1 && return_stolen_lib_tls_gdt) { mutex_lock(&set_thread_area_lock); if (return_stolen_lib_tls_gdt) { uint selector = read_thread_register(LIB_SEG_TLS); uint index = SELECTOR_INDEX(selector); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); return_stolen_lib_tls_gdt = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); user_desc->entry_number = index; LOG(GLOBAL, LOG_THREADS, 2, "%s: directing app to use " "selector 0x%x for first call to set_thread_area\n", __FUNCTION__, selector); } mutex_unlock(&set_thread_area_lock); } /* update the specific one */ i = user_desc->entry_number - tls_min_index(); if (i < 0 || i >= GDT_NUM_TLS_SLOTS) return false; LOG(GLOBAL, LOG_THREADS, 2, "%s: change selector 0x%x base from "PFX" to "PFX"\n", __FUNCTION__, GDT_SELECTOR(user_desc->entry_number), desc[i].base_addr, user_desc->base_addr); memcpy(&desc[i], user_desc, sizeof(*user_desc)); } /* if not conflict with dr's tls, perform the syscall */ if (IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true) && GDT_SELECTOR(user_desc->entry_number) != read_thread_register(SEG_TLS) && GDT_SELECTOR(user_desc->entry_number) != read_thread_register(LIB_SEG_TLS)) return false; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return true; } static bool os_get_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc) { #ifdef X86 os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field; our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas; int i = user_desc->entry_number - tls_min_index(); if (i < 0 || i >= GDT_NUM_TLS_SLOTS) return false; if (desc[i].seg_not_present == 1) return false; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return true; } #endif /* This function is used for switch lib tls segment on creating thread. * We switch to app's lib tls seg before thread creation system call, i.e. * clone and vfork, and switch back to dr's lib tls seg after the system call. * They are only called on parent thread, not the child thread. * The child thread's tls is setup in os_tls_app_seg_init. */ /* XXX: It looks like the Linux kernel has some dependency on the segment * descriptor. If using dr's segment descriptor, the created thread will have * access violation for tls not being setup. However, it works fine if we switch * the descriptor to app's segment descriptor before creating the thread. * We should be able to remove this function later if we find the problem. */ static bool os_switch_lib_tls(dcontext_t *dcontext, bool to_app) { return os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app); } #ifdef X86 /* dcontext can be NULL if !to_app */ static bool os_switch_seg_to_base(dcontext_t *dcontext, os_local_state_t *os_tls, reg_id_t seg, bool to_app, app_pc base) { bool res = false; ASSERT(dcontext != NULL); ASSERT(IF_X86_ELSE((seg == SEG_FS || seg == SEG_GS), (seg == DR_REG_TPIDRURW || DR_REG_TPIDRURO))); switch (os_tls->tls_type) { # ifdef X64 case TLS_TYPE_ARCH_PRCTL: { res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, base, NULL); ASSERT(res); LOG(GLOBAL, LOG_THREADS, 2, "%s %s: arch_prctl successful for thread "TIDFMT" base "PFX"\n", __FUNCTION__, to_app ? "to app" : "to DR", get_thread_id(), base); if (seg == SEG_TLS && base == NULL) { /* Set the selector to 0 so we don't think TLS is available. */ /* FIXME i#107: Still assumes app isn't using SEG_TLS. */ reg_t zero = 0; WRITE_DR_SEG(zero); } break; } # endif case TLS_TYPE_GDT: { our_modify_ldt_t desc; uint index; uint selector; if (to_app) { selector = os_tls->app_lib_tls_reg; index = SELECTOR_INDEX(selector); } else { index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index()); ASSERT(index != -1 && "TLS indices not initialized"); selector = GDT_SELECTOR(index); } if (selector != 0) { if (to_app) { our_modify_ldt_t *areas = ((os_thread_data_t *)dcontext->os_field)->app_thread_areas; ASSERT((index >= tls_min_index()) && ((index - tls_min_index()) <= GDT_NUM_TLS_SLOTS)); desc = areas[index - tls_min_index()]; } else { tls_init_descriptor(&desc, base, GDT_NO_SIZE_LIMIT, index); } res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, NULL, &desc); ASSERT(res); } else { /* For a selector of zero, we just reset the segment to zero. We * don't need to call set_thread_area. */ res = true; /* Indicate success. */ } /* XXX i#2098: it is unsafe to call LOG here in between GDT and register changes */ /* i558 update lib seg reg to enforce the segment changes */ if (seg == SEG_TLS) WRITE_DR_SEG(selector); else WRITE_LIB_SEG(selector); LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n", __FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector); LOG(THREAD, LOG_LOADER, 2, "%s %s: set_thread_area successful for thread "TIDFMT" base "PFX"\n", __FUNCTION__, to_app ? "to app" : "to DR", get_thread_id(), base); break; } case TLS_TYPE_LDT: { uint index; uint selector; /* XXX i#1285: added for MacOS private loader, but we don't * have enough other code to test this yet. */ ASSERT_NOT_TESTED(); if (to_app) { selector = os_tls->app_lib_tls_reg; index = SELECTOR_INDEX(selector); } else { index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index()); ASSERT(index != -1 && "TLS indices not initialized"); selector = LDT_SELECTOR(index); } LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n", __FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector); if (seg == SEG_TLS) WRITE_DR_SEG(selector); else WRITE_LIB_SEG(selector); LOG(THREAD, LOG_LOADER, 2, "%s %s: ldt selector swap successful for thread "TIDFMT"\n", __FUNCTION__, to_app ? "to app" : "to DR", get_thread_id()); break; } default: ASSERT_NOT_REACHED(); return false; } ASSERT((!to_app && seg == SEG_TLS) || BOOLS_MATCH(to_app, os_using_app_state(dcontext))); return res; } static bool os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base) { if (tls == NULL) { ASSERT(dcontext != NULL); tls = get_os_tls_from_dc(dcontext); } return os_switch_seg_to_base(dcontext, tls, SEG_TLS, false, base); } #endif /* X86 */ static bool os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app) { os_local_state_t *os_tls = get_os_tls_from_dc(dcontext); #ifdef X86 app_pc base; /* we can only update the executing thread's segment (i#920) */ ASSERT_MESSAGE(CHKLVL_ASSERTS+1/*expensive*/, "can only act on executing thread", /* i#2089: a clone syscall, or when native, temporarily puts in * invalid TLS, so we don't check get_thread_private_dcontext(). */ is_thread_tls_allocated() && dcontext->owning_thread == get_sys_thread_id()); if (to_app) { base = os_get_app_tls_base(dcontext, seg); } else { base = os_get_priv_tls_base(dcontext, seg); } return os_switch_seg_to_base(dcontext, os_tls, seg, to_app, base); #elif defined(AARCHXX) bool res = false; os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field; ASSERT(INTERNAL_OPTION(private_loader)); if (to_app) { /* On switching to app's TLS, we need put DR's TLS base into app's TLS * at the same offset so it can be loaded on entering code cache. * Otherwise, the context switch code on entering fcache will fault on * accessing DR's TLS. * The app's TLS slot value is stored into privlib's TLS slot for * later restore on switching back to privlib's TLS. */ byte **priv_lib_tls_swap_slot = (byte **) (ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET); byte **app_lib_tls_swap_slot = (byte **) (os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET); LOG(THREAD, LOG_LOADER, 3, "%s: switching to app: app slot=&"PFX" *"PFX", priv slot=&"PFX" *"PFX"\n", __FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot, priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot); byte *dr_tls_base = *priv_lib_tls_swap_slot; *priv_lib_tls_swap_slot = *app_lib_tls_swap_slot; *app_lib_tls_swap_slot = dr_tls_base; LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n", __FUNCTION__, (to_app ? "app" : "dr"), os_tls->app_lib_tls_base); res = write_thread_register(os_tls->app_lib_tls_base); } else { /* Restore the app's TLS slot that we used for storing DR's TLS base, * and put DR's TLS base back to privlib's TLS slot. */ byte **priv_lib_tls_swap_slot = (byte **) (ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET); byte **app_lib_tls_swap_slot = (byte **) (os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET); byte *dr_tls_base = *app_lib_tls_swap_slot; LOG(THREAD, LOG_LOADER, 3, "%s: switching to DR: app slot=&"PFX" *"PFX", priv slot=&"PFX" *"PFX"\n", __FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot, priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot); *app_lib_tls_swap_slot = *priv_lib_tls_swap_slot; *priv_lib_tls_swap_slot = dr_tls_base; LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n", __FUNCTION__, (to_app ? "app" : "dr"), ostd->priv_lib_tls_base); res = write_thread_register(ostd->priv_lib_tls_base); } LOG(THREAD, LOG_LOADER, 2, "%s %s: set_tls swap success=%d for thread "TIDFMT"\n", __FUNCTION__, to_app ? "to app" : "to DR", res, get_thread_id()); return res; #elif defined(AARCH64) (void)os_tls; ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ return false; #endif /* X86/ARM/AARCH64 */ } /* System call interception: put any special handling here * Arguments come from the pusha right before the call */ /* WARNING: flush_fragments_and_remove_region assumes that pre and post system * call handlers do not examine or modify fcache or its fragments in any * way except for calling flush_fragments_and_remove_region! */ /* WARNING: All registers are IN values, but NOT OUT values -- * must set mcontext's register for that. */ /* Returns false if system call should NOT be executed (in which case, * post_system_call() will *not* be called!). * Returns true if system call should go ahead */ /* XXX: split out specific handlers into separate routines */ bool pre_system_call(dcontext_t *dcontext) { priv_mcontext_t *mc = get_mcontext(dcontext); bool execute_syscall = true; where_am_i_t old_whereami = dcontext->whereami; dcontext->whereami = WHERE_SYSCALL_HANDLER; /* FIXME We haven't yet done the work to detect which syscalls we * can determine a priori will fail. Once we do, we will set the * expect_last_syscall_to_fail to true for those case, and can * confirm in post_system_call() that the syscall failed as * expected. */ DODEBUG(dcontext->expect_last_syscall_to_fail = false;); /* save key register values for post_system_call (they get clobbered * in syscall itself) */ dcontext->sys_num = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext); RSTATS_INC(pre_syscall); DOSTATS({ if (ignorable_system_call_normalized(dcontext->sys_num)) STATS_INC(pre_syscall_ignorable); }); LOG(THREAD, LOG_SYSCALLS, 2, "system call %d\n", dcontext->sys_num); #if defined(LINUX) && defined(X86) /* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330) * we fall back on int, but we have to tweak syscall param #5 (ebp) * Once we have PR 288330 we can remove this. */ if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) { dcontext->sys_xbp = mc->xbp; /* not using SAFE_READ due to performance concerns (we do this for * every single system call on systems where we can't hook vsyscall!) */ TRY_EXCEPT(dcontext, /* try */ { mc->xbp = *(reg_t*)mc->xsp; }, /* except */ { ASSERT_NOT_REACHED(); mc->xbp = 0; }); } #endif switch (dcontext->sys_num) { case SYSNUM_EXIT_PROCESS: # if defined(LINUX) && VMX86_SERVER if (os_in_vmkernel_32bit()) { /* on esx 3.5 => ENOSYS, so wait for SYS_exit */ LOG(THREAD, LOG_SYSCALLS, 2, "on esx35 => ignoring exitgroup\n"); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); break; } #endif /* fall-through */ case SYSNUM_EXIT_THREAD: { handle_exit(dcontext); break; } /****************************************************************************/ /* MEMORY REGIONS */ #if defined(LINUX) && !defined(X64) && !defined(ARM) case SYS_mmap: { /* in /usr/src/linux/arch/i386/kernel/sys_i386.c: asmlinkage int old_mmap(struct mmap_arg_struct_t *arg) */ mmap_arg_struct_t *arg = (mmap_arg_struct_t *) sys_param(dcontext, 0); mmap_arg_struct_t arg_buf; if (safe_read(arg, sizeof(mmap_arg_struct_t), &arg_buf)) { void *addr = (void *) arg->addr; size_t len = (size_t) arg->len; uint prot = (uint) arg->prot; LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mmap addr="PFX" size="PIFX" prot=0x%x" " flags="PIFX" offset="PIFX" fd=%d\n", addr, len, prot, arg->flags, arg->offset, arg->fd); /* Check for overlap with existing code or patch-proof regions */ if (addr != NULL && !app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot), !TEST(MAP_FIXED, arg->flags))) { /* Rather than failing or skipping the syscall we'd like to just * remove the hint -- but we don't want to write to app memory, so * we do fail. We could set up our own mmap_arg_struct_t but * we'd need dedicate per-thread storage, and SYS_mmap is obsolete. */ execute_syscall = false; set_failure_return_val(dcontext, ENOMEM); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); break; } } /* post_system_call does the work */ dcontext->sys_param0 = (reg_t) arg; break; } #endif case IF_MACOS_ELSE(SYS_mmap,IF_X64_ELSE(SYS_mmap,SYS_mmap2)): { /* in /usr/src/linux/arch/i386/kernel/sys_i386.c: asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) */ void *addr = (void *) sys_param(dcontext, 0); size_t len = (size_t) sys_param(dcontext, 1); uint prot = (uint) sys_param(dcontext, 2); uint flags = (uint) sys_param(dcontext, 3); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mmap2 addr="PFX" size="PIFX" prot=0x%x" " flags="PIFX" offset="PIFX" fd=%d\n", addr, len, prot, flags, sys_param(dcontext, 5), sys_param(dcontext, 4)); /* Check for overlap with existing code or patch-proof regions */ if (addr != NULL && !app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot), !TEST(MAP_FIXED, flags))) { if (!TEST(MAP_FIXED, flags)) { /* Rather than failing or skipping the syscall we just remove * the hint which should eliminate any overlap. */ *sys_param_addr(dcontext, 0) = 0; } else { execute_syscall = false; set_failure_return_val(dcontext, ENOMEM); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); break; } } /* post_system_call does the work */ dcontext->sys_param0 = (reg_t) addr; dcontext->sys_param1 = len; dcontext->sys_param2 = prot; dcontext->sys_param3 = flags; break; } /* must flush stale fragments when we see munmap/mremap */ case SYS_munmap: { /* in /usr/src/linux/mm/mmap.c: asmlinkage long sys_munmap(unsigned long addr, uint len) */ app_pc addr = (void *) sys_param(dcontext, 0); size_t len = (size_t) sys_param(dcontext, 1); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: munmap addr="PFX" size="PFX"\n", addr, len); RSTATS_INC(num_app_munmaps); /* FIXME addr is supposed to be on a page boundary so we * could detect that condition here and set * expect_last_syscall_to_fail. */ /* save params in case an undo is needed in post_system_call */ dcontext->sys_param0 = (reg_t) addr; dcontext->sys_param1 = len; /* We assume that the unmap will succeed and so are conservative * and remove the region from exec areas and flush all fragments * prior to issuing the syscall. If the unmap fails, we try to * recover in post_system_call() by re-adding the region. This * approach has its shortcomings -- see comments below in * post_system_call(). */ /* Check for unmapping a module. */ os_get_module_info_lock(); if (module_overlaps(addr, len)) { /* FIXME - handle unmapping more than one module at once, or only unmapping * part of a module (for which case should adjust view size? or treat as full * unmap?). Theoretical for now as we haven't seen this. */ module_area_t *ma = module_pc_lookup(addr); ASSERT_CURIOSITY(ma != NULL); ASSERT_CURIOSITY(addr == ma->start); /* XREF 307599 on rounding module end to the next PAGE boundary */ ASSERT_CURIOSITY((app_pc)ALIGN_FORWARD(addr+len, PAGE_SIZE) == ma->end); os_get_module_info_unlock(); /* i#210: * we only think a module is removed if its first memory region * is unloaded (unmapped). * XREF i#160 to fix the real problem of handling module splitting. */ if (ma != NULL && ma->start == addr) module_list_remove(addr, ALIGN_FORWARD(len, PAGE_SIZE)); } else os_get_module_info_unlock(); app_memory_deallocation(dcontext, (app_pc)addr, len, false /* don't own thread_initexit_lock */, true /* image, FIXME: though not necessarily */); /* FIXME: case 4983 use is_elf_so_header() */ #ifndef HAVE_MEMINFO_QUERY memcache_lock(); memcache_remove(addr, addr + len); memcache_unlock(); #endif break; } #ifdef LINUX case SYS_mremap: { /* in /usr/src/linux/mm/mmap.c: asmlinkage unsigned long sys_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) */ dr_mem_info_t info; app_pc addr = (void *) sys_param(dcontext, 0); size_t old_len = (size_t) sys_param(dcontext, 1); size_t new_len = (size_t) sys_param(dcontext, 2); DEBUG_DECLARE(bool ok;) LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mremap addr="PFX" size="PFX"\n", addr, old_len); /* post_system_call does the work */ dcontext->sys_param0 = (reg_t) addr; dcontext->sys_param1 = old_len; dcontext->sys_param2 = new_len; /* i#173 * we need memory type and prot to set the * new memory region in the post_system_call */ DEBUG_DECLARE(ok =) query_memory_ex(addr, &info); ASSERT(ok); dcontext->sys_param3 = info.prot; dcontext->sys_param4 = info.type; DOCHECK(1, { /* we don't expect to see remappings of modules */ os_get_module_info_lock(); ASSERT_CURIOSITY(!module_overlaps(addr, old_len)); os_get_module_info_unlock(); }); break; } #endif case SYS_mprotect: { /* in /usr/src/linux/mm/mprotect.c: asmlinkage long sys_mprotect(unsigned long start, uint len, unsigned long prot) */ uint res; DEBUG_DECLARE(size_t size;) app_pc addr = (void *) sys_param(dcontext, 0); size_t len = (size_t) sys_param(dcontext, 1); uint prot = (uint) sys_param(dcontext, 2); uint old_memprot = MEMPROT_NONE, new_memprot; bool exists = true; /* save params in case an undo is needed in post_system_call */ dcontext->sys_param0 = (reg_t) addr; dcontext->sys_param1 = len; dcontext->sys_param2 = prot; LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mprotect addr="PFX" size="PFX" prot=%s\n", addr, len, memprot_string(osprot_to_memprot(prot))); if (!get_memory_info(addr, NULL, IF_DEBUG_ELSE(&size, NULL), &old_memprot)) { exists = false; /* Xref PR 413109, PR 410921: if the start, or any page, is not mapped, * this should fail with ENOMEM. We used to force-fail it to avoid * asserts in our own allmem update code, but there are cases where a * seemingly unmapped page succeeds (i#1912: next page of grows-down * initial stack). Thus we let it go through. */ LOG(THREAD, LOG_SYSCALLS, 2, "\t"PFX" isn't mapped: probably mprotect will fail\n", addr); } else { /* If mprotect region spans beyond the end of the vmarea then it * spans 2 or more vmareas with dissimilar protection (xref * PR 410921) or has unallocated regions in between (PR 413109). */ DOCHECK(1, dcontext->mprot_multi_areas = len > size ? true : false;); } new_memprot = osprot_to_memprot(prot) | /* mprotect won't change meta flags */ (old_memprot & MEMPROT_META_FLAGS); res = app_memory_protection_change(dcontext, addr, len, new_memprot, &new_memprot, NULL); if (res != DO_APP_MEM_PROT_CHANGE) { if (res == FAIL_APP_MEM_PROT_CHANGE) { ASSERT_NOT_IMPLEMENTED(false); /* return code? */ } else { ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE); ASSERT_NOT_REACHED(); } execute_syscall = false; } else { /* FIXME Store state for undo if the syscall fails. */ IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, new_memprot, -1/*type unchanged*/, exists)); } break; } #ifdef ANDROID case SYS_prctl: dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); dcontext->sys_param2 = sys_param(dcontext, 2); dcontext->sys_param3 = sys_param(dcontext, 3); dcontext->sys_param4 = sys_param(dcontext, 4); break; #endif #ifdef LINUX case SYS_brk: { if (DYNAMO_OPTION(emulate_brk)) { /* i#1004: emulate brk via a separate mmap */ byte *new_val = (byte *) sys_param(dcontext, 0); byte *res = emulate_app_brk(dcontext, new_val); execute_syscall = false; /* SYS_brk returns old brk on failure */ set_success_return_val(dcontext, (reg_t)res); } else { /* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas. * We store the old break in the param1 slot. */ DODEBUG(dcontext->sys_param0 = (reg_t) sys_param(dcontext, 0);); dcontext->sys_param1 = dynamorio_syscall(SYS_brk, 1, 0); } break; } # ifdef SYS_uselib case SYS_uselib: { /* Used to get the kernel to load a share library (legacy system call). * Was primarily used when statically linking to dynamically loaded shared * libraries that were loaded at known locations. Shouldn't be used by * applications using the dynamic loader (ld) which is currently the only * way we can inject so we don't expect to see this. PR 307621. */ ASSERT_NOT_IMPLEMENTED(false); break; } # endif #endif /****************************************************************************/ /* SPAWNING */ #ifdef LINUX case SYS_clone: { /* in /usr/src/linux/arch/i386/kernel/process.c * 32-bit params: flags, newsp, ptid, tls, ctid * 64-bit params: should be the same yet tls (for ARCH_SET_FS) is in r8?!? * I don't see how sys_clone gets its special args: shouldn't it * just get pt_regs as a "special system call"? * sys_clone(unsigned long clone_flags, unsigned long newsp, * void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) */ uint flags = (uint) sys_param(dcontext, 0); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone with flags = "PFX"\n", flags); LOG(THREAD, LOG_SYSCALLS, 2, "args: "PFX", "PFX", "PFX", "PFX", "PFX"\n", sys_param(dcontext, 0), sys_param(dcontext, 1), sys_param(dcontext, 2), sys_param(dcontext, 3), sys_param(dcontext, 4)); handle_clone(dcontext, flags); if ((flags & CLONE_VM) == 0) { LOG(THREAD, LOG_SYSCALLS, 1, "\tWARNING: CLONE_VM not set!\n"); } /* save for post_system_call */ dcontext->sys_param0 = (reg_t) flags; /* i#1010: If we have private fds open (usually logfiles), we should * clean those up before they get reused by a new thread. * XXX: Ideally we'd do this in fd_table_add(), but we can't acquire * thread_initexit_lock there. */ cleanup_after_vfork_execve(dcontext); /* For thread creation clone syscalls a clone_record_t structure * containing the pc after the app's syscall instr and other data * (see i#27) is placed at the bottom of the dstack (which is allocated * by create_clone_record() - it also saves app stack and switches * to dstack). xref i#149/PR 403015. * Note: This must be done after sys_param0 is set. */ if (is_thread_create_syscall(dcontext)) { create_clone_record(dcontext, sys_param_addr(dcontext, 1) /*newsp*/); os_clone_pre(dcontext); } else /* This is really a fork. */ os_fork_pre(dcontext); break; } #elif defined(MACOS) case SYS_bsdthread_create: { /* XXX i#1403: we need earlier injection to intercept * bsdthread_register in order to capture workqueue threads. * For now we settle for intercepting bsd threads at the user thread func. * We miss a little user-mode code but this is enough to get started. */ app_pc func = (app_pc) sys_param(dcontext, 0); void *func_arg = (void *) sys_param(dcontext, 1); void *clone_rec; LOG(THREAD, LOG_SYSCALLS, 1, "bsdthread_create: thread func "PFX", arg "PFX"\n", func, func_arg); handle_clone(dcontext, CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD); clone_rec = create_clone_record(dcontext, NULL, func, func_arg); dcontext->sys_param0 = (reg_t) func; dcontext->sys_param1 = (reg_t) func_arg; *sys_param_addr(dcontext, 0) = (reg_t) new_bsdthread_intercept; *sys_param_addr(dcontext, 1) = (reg_t) clone_rec; break; } case SYS_posix_spawn: { /* FIXME i#1644: monitor this call which can be fork or exec */ ASSERT_NOT_IMPLEMENTED(false); break; } #endif #ifdef SYS_vfork case SYS_vfork: { /* treat as if sys_clone with flags just as sys_vfork does */ /* in /usr/src/linux/arch/i386/kernel/process.c */ uint flags = CLONE_VFORK | CLONE_VM | SIGCHLD; LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork\n"); handle_clone(dcontext, flags); cleanup_after_vfork_execve(dcontext); /* save for post_system_call, treated as if SYS_clone */ dcontext->sys_param0 = (reg_t) flags; /* vfork has the same needs as clone. Pass info via a clone_record_t * structure to child. See SYS_clone for info about i#149/PR 403015. */ IF_LINUX(ASSERT(is_thread_create_syscall(dcontext))); dcontext->sys_param1 = mc->xsp; /* for restoring in parent */ # ifdef MACOS create_clone_record(dcontext, (reg_t *)&mc->xsp, NULL, NULL); # else create_clone_record(dcontext, (reg_t *)&mc->xsp /*child uses parent sp*/); # endif os_clone_pre(dcontext); break; } #endif #ifdef SYS_fork case SYS_fork: { LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork\n"); os_fork_pre(dcontext); break; } #endif case SYS_execve: { int ret = handle_execve(dcontext); if (ret != 0) { execute_syscall = false; set_failure_return_val(dcontext, ret); } break; } /****************************************************************************/ /* SIGNALS */ case IF_MACOS_ELSE(SYS_sigaction,SYS_rt_sigaction): { /* 174 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage long sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact, size_t sigsetsize) */ int sig = (int) sys_param(dcontext, 0); const kernel_sigaction_t *act = (const kernel_sigaction_t *) sys_param(dcontext, 1); prev_sigaction_t *oact = (prev_sigaction_t *) sys_param(dcontext, 2); size_t sigsetsize = (size_t) /* On Mac there is no size arg (but it doesn't use old sigaction, so * closer to rt_ than non-rt_ below). */ IF_MACOS_ELSE(sizeof(kernel_sigset_t), sys_param(dcontext, 3)); uint res; LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction %d "PFX" "PFX" %d\n", IF_MACOS_ELSE("","rt_"), sig, act, oact, sigsetsize); /* post_syscall does some work as well */ dcontext->sys_param0 = (reg_t) sig; dcontext->sys_param1 = (reg_t) act; dcontext->sys_param2 = (reg_t) oact; dcontext->sys_param3 = (reg_t) sigsetsize; execute_syscall = handle_sigaction(dcontext, sig, act, oact, sigsetsize, &res); if (!execute_syscall) { LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res); if (res == 0) set_success_return_val(dcontext, 0); else set_failure_return_val(dcontext, res); } break; } #if defined(LINUX) && !defined(X64) case SYS_sigaction: { /* 67 */ /* sys_sigaction(int sig, const struct old_sigaction *act, * struct old_sigaction *oact) */ int sig = (int) sys_param(dcontext, 0); const old_sigaction_t *act = (const old_sigaction_t *) sys_param(dcontext, 1); old_sigaction_t *oact = (old_sigaction_t *) sys_param(dcontext, 2); uint res; LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction %d "PFX" "PFX"\n", sig, act, oact); dcontext->sys_param0 = (reg_t) sig; dcontext->sys_param1 = (reg_t) act; dcontext->sys_param2 = (reg_t) oact; execute_syscall = handle_old_sigaction(dcontext, sig, act, oact, &res); if (!execute_syscall) { LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res); if (res == 0) set_success_return_val(dcontext, 0); else set_failure_return_val(dcontext, res); } break; } #endif #if defined(LINUX) && !defined(X64) case SYS_sigreturn: { /* 119 */ /* in /usr/src/linux/arch/i386/kernel/signal.c: asmlinkage int sys_sigreturn(unsigned long __unused) */ execute_syscall = handle_sigreturn(dcontext, false); /* app will not expect syscall to return, so when handle_sigreturn * returns false it always redirects the context, and thus no * need to set return val here. */ break; } #endif #ifdef LINUX case SYS_rt_sigreturn: { /* 173 */ /* in /usr/src/linux/arch/i386/kernel/signal.c: asmlinkage int sys_rt_sigreturn(unsigned long __unused) */ execute_syscall = handle_sigreturn(dcontext, true); /* see comment for SYS_sigreturn on return val */ break; } #endif #ifdef MACOS case SYS_sigreturn: { /* int sigreturn(struct ucontext *uctx, int infostyle) */ execute_syscall = handle_sigreturn(dcontext, (void *) sys_param(dcontext, 0), (int) sys_param(dcontext, 1)); /* see comment for SYS_sigreturn on return val */ break; } #endif case SYS_sigaltstack: { /* 186 */ /* in /usr/src/linux/arch/i386/kernel/signal.c: asmlinkage int sys_sigaltstack(const stack_t *uss, stack_t *uoss) */ const stack_t *uss = (const stack_t *) sys_param(dcontext, 0); stack_t *uoss = (stack_t *) sys_param(dcontext, 1); execute_syscall = handle_sigaltstack(dcontext, uss, uoss); if (!execute_syscall) { set_success_return_val(dcontext, 0); } break; } case IF_MACOS_ELSE(SYS_sigprocmask,SYS_rt_sigprocmask): { /* 175 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize) */ /* we also need access to the params in post_system_call */ dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); dcontext->sys_param2 = sys_param(dcontext, 2); dcontext->sys_param3 = sys_param(dcontext, 3); execute_syscall = handle_sigprocmask(dcontext, (int) sys_param(dcontext, 0), (kernel_sigset_t *) sys_param(dcontext, 1), (kernel_sigset_t *) sys_param(dcontext, 2), (size_t) sys_param(dcontext, 3)); if (!execute_syscall) set_success_return_val(dcontext, 0); break; } #ifdef MACOS case SYS_sigsuspend_nocancel: #endif case IF_MACOS_ELSE(SYS_sigsuspend,SYS_rt_sigsuspend): { /* 179 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage int sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize) */ handle_sigsuspend(dcontext, (kernel_sigset_t *) sys_param(dcontext, 0), (size_t) sys_param(dcontext, 1)); break; } #ifdef LINUX # ifdef SYS_signalfd case SYS_signalfd: /* 282/321 */ # endif case SYS_signalfd4: { /* 289 */ /* int signalfd (int fd, const sigset_t *mask, size_t sizemask) */ /* int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) */ ptr_int_t new_result; dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); dcontext->sys_param2 = sys_param(dcontext, 2); # ifdef SYS_signalfd if (dcontext->sys_num == SYS_signalfd) dcontext->sys_param3 = 0; else # endif dcontext->sys_param3 = sys_param(dcontext, 3); new_result = handle_pre_signalfd(dcontext, (int) dcontext->sys_param0, (kernel_sigset_t *) dcontext->sys_param1, (size_t) dcontext->sys_param2, (int) dcontext->sys_param3); execute_syscall = false; /* since non-Mac, we can use this even if the call failed */ set_success_return_val(dcontext, new_result); break; } #endif case SYS_kill: { /* 37 */ /* in /usr/src/linux/kernel/signal.c: * asmlinkage long sys_kill(int pid, int sig) */ pid_t pid = (pid_t) sys_param(dcontext, 0); uint sig = (uint) sys_param(dcontext, 1); LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2, "thread "TIDFMT" sending signal %d to pid "PIDFMT"\n", get_thread_id(), sig, pid); /* We check whether targeting this process or this process group */ if (pid == get_process_id() || pid == 0 || pid == -get_process_group_id()) { handle_self_signal(dcontext, sig); } break; } #if defined(SYS_tkill) case SYS_tkill: { /* 238 */ /* in /usr/src/linux/kernel/signal.c: * asmlinkage long sys_tkill(int pid, int sig) */ pid_t tid = (pid_t) sys_param(dcontext, 0); uint sig = (uint) sys_param(dcontext, 1); LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2, "thread "TIDFMT" sending signal %d to tid %d\n", get_thread_id(), sig, tid); if (tid == get_thread_id()) { handle_self_signal(dcontext, sig); } break; } #endif #if defined(SYS_tgkill) case SYS_tgkill: { /* 270 */ /* in /usr/src/linux/kernel/signal.c: * asmlinkage long sys_tgkill(int tgid, int pid, int sig) */ pid_t tgid = (pid_t) sys_param(dcontext, 0); pid_t tid = (pid_t) sys_param(dcontext, 1); uint sig = (uint) sys_param(dcontext, 2); LOG(GLOBAL, LOG_TOP|LOG_SYSCALLS, 2, "thread "TIDFMT" sending signal %d to tid %d tgid %d\n", get_thread_id(), sig, tid, tgid); /* some kernels support -1 values: + tgkill(-1, tid, sig) == tkill(tid, sig) * tgkill(tgid, -1, sig) == kill(tgid, sig) * the 2nd was proposed but is not in 2.6.20 so I'm ignoring it, since * I don't want to kill the thread when the signal is never sent! * FIXME: the 1st is in my tkill manpage, but not my 2.6.20 kernel sources! */ if ((tgid == -1 || tgid == get_process_id()) && tid == get_thread_id()) { handle_self_signal(dcontext, sig); } break; } #endif case SYS_setitimer: /* 104 */ dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); dcontext->sys_param2 = sys_param(dcontext, 2); handle_pre_setitimer(dcontext, (int) sys_param(dcontext, 0), (const struct itimerval *) sys_param(dcontext, 1), (struct itimerval *) sys_param(dcontext, 2)); break; case SYS_getitimer: /* 105 */ dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); break; #if defined(LINUX) && defined(X86) case SYS_alarm: /* 27 on x86 and 37 on x64 */ dcontext->sys_param0 = sys_param(dcontext, 0); handle_pre_alarm(dcontext, (unsigned int) dcontext->sys_param0); break; #endif #if 0 # ifndef X64 case SYS_signal: { /* 48 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage unsigned long sys_signal(int sig, __sighandler_t handler) */ break; } case SYS_sigsuspend: { /* 72 */ /* in /usr/src/linux/arch/i386/kernel/signal.c: asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask) */ break; } case SYS_sigprocmask: { /* 126 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage long sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset) */ break; } # endif #else /* until we've implemented them, keep down here to get warning: */ # if defined(LINUX) && !defined(X64) # ifndef ARM case SYS_signal: # endif case SYS_sigsuspend: case SYS_sigprocmask: # endif #endif #if defined(LINUX) && !defined(X64) case SYS_sigpending: /* 73 */ # ifndef ARM case SYS_sgetmask: /* 68 */ case SYS_ssetmask: /* 69 */ # endif #endif #ifdef LINUX case SYS_rt_sigtimedwait: /* 177 */ case SYS_rt_sigqueueinfo: /* 178 */ #endif case IF_MACOS_ELSE(SYS_sigpending,SYS_rt_sigpending): { /* 176 */ /* FIXME i#92: handle all of these syscalls! */ LOG(THREAD, LOG_ASYNCH|LOG_SYSCALLS, 1, "WARNING: unhandled signal system call %d\n", dcontext->sys_num); SYSLOG_INTERNAL_WARNING_ONCE("unhandled signal system call %d", dcontext->sys_num); break; } /****************************************************************************/ /* FILES */ /* prevent app from closing our files or opening a new file in our fd space. * it's not worth monitoring all syscalls that take in fds from affecting ours. */ #ifdef MACOS case SYS_close_nocancel: #endif case SYS_close: { execute_syscall = handle_close_pre(dcontext); #ifdef LINUX if (execute_syscall) signal_handle_close(dcontext, (file_t) sys_param(dcontext, 0)); #endif break; } #ifdef SYS_dup2 case SYS_dup2: IF_LINUX(case SYS_dup3:) { file_t newfd = (file_t) sys_param(dcontext, 1); if (fd_is_dr_owned(newfd) || fd_is_in_private_range(newfd)) { SYSLOG_INTERNAL_WARNING_ONCE("app trying to dup-close DR file(s)"); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app trying to dup2/dup3 to %d. Disallowing.\n", newfd); if (DYNAMO_OPTION(fail_on_stolen_fds)) { set_failure_return_val(dcontext, EBADF); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); } else set_success_return_val(dcontext, 0); execute_syscall = false; } break; } #endif #ifdef MACOS case SYS_fcntl_nocancel: #endif case SYS_fcntl: { int cmd = (int) sys_param(dcontext, 1); long arg = (long) sys_param(dcontext, 2); /* we only check for asking for min in private space: not min below * but actual will be above (see notes in os_file_init()) */ if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC) && fd_is_in_private_range(arg)) { SYSLOG_INTERNAL_WARNING_ONCE("app trying to open private fd(s)"); LOG(THREAD, LOG_TOP|LOG_SYSCALLS, 1, "WARNING: app trying to dup to >= %d. Disallowing.\n", arg); set_failure_return_val(dcontext, EINVAL); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); execute_syscall = false; } else { dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = cmd; } break; } #if defined(X64) || !defined(ARM) || defined(MACOS) case SYS_getrlimit: #endif #if defined(LINUX) && !defined(X64) case SYS_ugetrlimit: #endif /* save for post */ dcontext->sys_param0 = sys_param(dcontext, 0); /* resource */ dcontext->sys_param1 = sys_param(dcontext, 1); /* rlimit */ break; case SYS_setrlimit: { int resource = (int) sys_param(dcontext, 0); if (resource == RLIMIT_NOFILE && DYNAMO_OPTION(steal_fds) > 0) { # if !defined(ARM) && !defined(X64) && !defined(MACOS) struct compat_rlimit rlim; # else struct rlimit rlim; # endif if (safe_read((void *)sys_param(dcontext, 1), sizeof(rlim), &rlim) && rlim.rlim_max <= min_dr_fd && rlim.rlim_cur <= rlim.rlim_max) { /* if the new rlimit is lower, pretend succeed */ app_rlimit_nofile.rlim_cur = rlim.rlim_cur; app_rlimit_nofile.rlim_max = rlim.rlim_max; set_success_return_val(dcontext, 0); } else { /* don't let app raise limits as that would mess up our fd space */ set_failure_return_val(dcontext, EPERM); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); } execute_syscall = false; } break; } #ifdef LINUX case SYS_prlimit64: /* save for post */ dcontext->sys_param0 = sys_param(dcontext, 0); /* pid */ dcontext->sys_param1 = sys_param(dcontext, 1); /* resource */ dcontext->sys_param2 = sys_param(dcontext, 2); /* new rlimit */ dcontext->sys_param3 = sys_param(dcontext, 3); /* old rlimit */ if (/* XXX: how do we handle the case of setting rlimit.nofile on another * process that is running with DynamoRIO? */ /* XXX: CLONE_FILES allows different processes to share the same file * descriptor table, and different threads of the same process have * separate file descriptor tables. POSIX specifies that rlimits are * per-process, not per-thread, and Linux follows suit, so the threads * with different descriptors will not matter, and the pids sharing * descriptors turns into the hard-to-solve IPC problem. */ (dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id()) && dcontext->sys_param1 == RLIMIT_NOFILE && dcontext->sys_param2 != (reg_t)NULL && DYNAMO_OPTION(steal_fds) > 0) { struct rlimit rlim; if (safe_read((void *)(dcontext->sys_param2), sizeof(rlim), &rlim) && rlim.rlim_max <= min_dr_fd && rlim.rlim_cur <= rlim.rlim_max) { /* if the new rlimit is lower, pretend succeed */ app_rlimit_nofile.rlim_cur = rlim.rlim_cur; app_rlimit_nofile.rlim_max = rlim.rlim_max; set_success_return_val(dcontext, 0); /* set old rlimit if necessary */ if (dcontext->sys_param3 != (reg_t)NULL) { safe_write_ex((void *)(dcontext->sys_param3), sizeof(rlim), &app_rlimit_nofile, NULL); } } else { /* don't let app raise limits as that would mess up our fd space */ set_failure_return_val(dcontext, EPERM); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); } execute_syscall = false; } break; #endif #ifdef LINUX # ifdef SYS_readlink case SYS_readlink: # endif case SYS_readlinkat: if (DYNAMO_OPTION(early_inject)) { dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); dcontext->sys_param2 = sys_param(dcontext, 2); if (dcontext->sys_num == SYS_readlinkat) dcontext->sys_param3 = sys_param(dcontext, 3); } break; /* i#107 syscalls that might change/query app's segment */ # if defined(X86) && defined(X64) case SYS_arch_prctl: { /* we handle arch_prctl in post_syscall */ dcontext->sys_param0 = sys_param(dcontext, 0); dcontext->sys_param1 = sys_param(dcontext, 1); break; } # endif # ifdef X86 case SYS_set_thread_area: { our_modify_ldt_t desc; if (INTERNAL_OPTION(mangle_app_seg) && safe_read((void *)sys_param(dcontext, 0), sizeof(desc), &desc)) { if (os_set_app_thread_area(dcontext, &desc) && safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc, NULL)) { /* check if the range is unlimited */ ASSERT_CURIOSITY(desc.limit == 0xfffff); execute_syscall = false; set_success_return_val(dcontext, 0); } } break; } case SYS_get_thread_area: { our_modify_ldt_t desc; if (INTERNAL_OPTION(mangle_app_seg) && safe_read((const void *)sys_param(dcontext, 0), sizeof(desc), &desc)) { if (os_get_app_thread_area(dcontext, &desc) && safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc, NULL)) { execute_syscall = false; set_success_return_val(dcontext, 0); } } break; } # endif /* X86 */ # ifdef ARM case SYS_set_tls: { LOG(THREAD, LOG_VMAREAS|LOG_SYSCALLS, 2, "syscall: set_tls "PFX"\n", sys_param(dcontext, 0)); if (os_set_app_tls_base(dcontext, TLS_REG_LIB, (void *)sys_param(dcontext, 0))) { execute_syscall = false; set_success_return_val(dcontext, 0); } else { ASSERT_NOT_REACHED(); } break; } case SYS_cacheflush: { /* We assume we don't want to change the executable_areas list or change * the selfmod status of this region: else we should call something * that invokes handle_modified_code() in a way that handles a bigger * region than a single write. */ app_pc start = (app_pc) sys_param(dcontext, 0); app_pc end = (app_pc) sys_param(dcontext, 1); LOG(THREAD, LOG_VMAREAS|LOG_SYSCALLS, 2, "syscall: cacheflush "PFX"-"PFX"\n", start, end); flush_fragments_from_region(dcontext, start, end - start, /* An unlink flush should be fine: the app must * use synch to ensure other threads see the * new code. */ false/*don't force synchall*/); break; } # endif /* ARM */ #elif defined(MACOS) /* FIXME i#58: handle i386_{get,set}_ldt and thread_fast_set_cthread_self64 */ #endif #ifdef DEBUG # ifdef MACOS case SYS_open_nocancel: # endif # ifdef SYS_open case SYS_open: { dcontext->sys_param0 = sys_param(dcontext, 0); break; } # endif #endif default: { #ifdef VMX86_SERVER if (is_vmkuw_sysnum(dcontext->sys_num)) { execute_syscall = vmkuw_pre_system_call(dcontext); break; } #endif } } /* end switch */ dcontext->whereami = old_whereami; return execute_syscall; } void all_memory_areas_lock(void) { IF_NO_MEMQUERY(memcache_lock()); } void all_memory_areas_unlock(void) { IF_NO_MEMQUERY(memcache_unlock()); } void update_all_memory_areas(app_pc start, app_pc end, uint prot, int type) { IF_NO_MEMQUERY(memcache_update(start, end, prot, type)); } bool remove_from_all_memory_areas(app_pc start, app_pc end) { IF_NO_MEMQUERY(return memcache_remove(start, end)); return true; } /* We consider a module load to happen at the first mmap, so we check on later * overmaps to ensure things look consistent. */ static bool mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode, bool at_map) { module_area_t *ma; os_get_module_info_lock(); ma = module_pc_lookup(base); if (ma != NULL) { /* FIXME - how can we distinguish between the loader mapping the segments * over the initial map from someone just mapping over part of a module? If * is the latter case need to adjust the view size or remove from module list. */ LOG(GLOBAL, LOG_VMAREAS, 2, "%s mmap overlapping module area : \n" "\tmap : base="PFX" base+size="PFX" inode="UINT64_FORMAT_STRING"\n" "\tmod : start="PFX" end="PFX" inode="UINT64_FORMAT_STRING"\n", at_map ? "new" : "existing", base, base+size, inode, ma->start, ma->end, ma->names.inode); ASSERT_CURIOSITY(base >= ma->start); if (at_map) { ASSERT_CURIOSITY(base+size <= ma->end); } else { /* FIXME - I'm having problems with this check for existing maps. I * haven't been able to get gdb to break in early enough to really get a good * look at the early loader behavior. Two issues: One case is with our .so * for which the anonymous .bss mapping is one page larger than expected * (which might be some loader bug in the size calculation? or something? if * so should see it trigger the at_map curiosity on some dll and can address * then) and the other is that for a few executables the .bss mapping is much * larger (~0x20000 larger) then expected when running under DR (but not * running natively where it is instead the expected size). Both could just * be the loader merging adjacent identically protected regions though I * can't explain the discrepancy between DR and native given that our vmmheap * is elsewhere in the address space (so who and how allocated that adjacent * memory). I've yet to see any issue with dynamically loaded modules so * it's probably the loader merging regions. Still worth investigating. */ ASSERT_CURIOSITY(inode == 0 /*see above comment*/|| module_contains_addr(ma, base+size-1)); } ASSERT_CURIOSITY(ma->names.inode == inode || inode == 0 /* for .bss */); DOCHECK(1, { if (readable && module_is_header(base, size)) { /* Case 8879: For really small modules, to save disk space, the same * disk page could hold both RO and .data, occupying just 1 page of * disk space, e.g. /usr/lib/httpd/modules/mod_auth_anon.so. When * such a module is mapped in, the os maps the same disk page twice, * one readonly and one copy-on-write (see pg. 96, Sec 4.4 from * Linkers and Loaders by John R. Levine). This makes the data * section also satisfy the elf_header check above. So, if the new * mmap overlaps an elf_area and it is also a header, then make sure * the previous page (correcting for alignment) is also a elf_header. * Note, if it is a header of a different module, then we'll not have * an overlap, so we will not hit this case. */ ASSERT_CURIOSITY(ma->start + ma->os_data.alignment == base); } }); } os_get_module_info_unlock(); #ifdef ANDROID /* i#1860: we need to keep looking for the segment with .dynamic as Android's * loader does not map the whole file up front. */ if (ma != NULL && at_map && readable) os_module_update_dynamic_info(base, size, at_map); #endif return ma != NULL; } static void os_add_new_app_module(dcontext_t *dcontext, bool at_map, app_pc base, size_t size, uint memprot) { memquery_iter_t iter; bool found_map = false; uint64 inode = 0; const char *filename = ""; size_t mod_size = size; if (!at_map) { /* the size is the first seg size, get the whole module size instead */ app_pc first_seg_base = NULL; app_pc first_seg_end = NULL; app_pc last_seg_end = NULL; if (module_walk_program_headers(base, size, at_map, false, &first_seg_base, &first_seg_end, &last_seg_end, NULL, NULL)) { ASSERT_CURIOSITY(size == (ALIGN_FORWARD(first_seg_end, PAGE_SIZE) - (ptr_uint_t)first_seg_base) || base == vdso_page_start || base == vsyscall_page_start); mod_size = ALIGN_FORWARD(last_seg_end, PAGE_SIZE) - (ptr_uint_t)first_seg_base; } } LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 2, "dlopen "PFX"-"PFX"%s\n", base, base+mod_size, TEST(MEMPROT_EXEC, memprot) ? " +x": ""); /* Mapping in a new module. From what we've observed of the loader's * behavior, it first maps the file in with size equal to the final * memory image size (I'm not sure how it gets that size without reading * in the elf header and then walking through all the program headers to * get the largest virtual offset). This is necessary to reserve all the * space that will be needed. It then walks through the program headers * mapping over the the previously mapped space with the appropriate * permissions and offsets. Note that the .bss portion is mapped over * as anonymous. It may also, depending on the program headers, make some * areas read-only after fixing up their relocations etc. NOTE - at * no point are the section headers guaranteed to be mapped in so we can't * reliably walk sections (only segments) without looking to disk. */ /* FIXME - when should we add the module to our list? At the first map * seems to be the best choice as we know the bounds and it's difficult to * tell when the loader is finished. The downside is that at the initial map * the memory layout isn't finalized (memory beyond the first segment will * be shifted for page alignment reasons), so we have to be careful and * make adjustments to read anything beyond the first segment until the * loader finishes. This goes for the client too as it gets notified when we * add to the list. FIXME we could try to track the expected segment overmaps * and only notify the client after the last one (though that's still before * linking and relocation, but that's true on Windows too). */ /* Get filename & inode for the list. */ memquery_iterator_start(&iter, base, true /* plan to alloc a module_area_t */); while (memquery_iterator_next(&iter)) { if (iter.vm_start == base) { if (iter.vm_start == vsyscall_page_start) { ASSERT_CURIOSITY(!at_map); } else { ASSERT_CURIOSITY(iter.inode != 0 || base == vdso_page_start); ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */ /* XREF 307599 on rounding module end to the next PAGE boundary */ ASSERT_CURIOSITY((iter.vm_end - iter.vm_start == ALIGN_FORWARD(size, PAGE_SIZE))); inode = iter.inode; filename = dr_strdup(iter.comment HEAPACCT(ACCT_OTHER)); found_map = true; } break; } } memquery_iterator_stop(&iter); #ifdef HAVE_MEMINFO /* barring weird races we should find this map except [vdso] */ ASSERT_CURIOSITY(found_map || base == vsyscall_page_start || base == vdso_page_start); #else /* HAVE_MEMINFO */ /* Without /proc/maps or other memory querying interface available at * library map time, there is no way to find out the name of the file * that was mapped, thus its inode isn't available either. * * Just module_list_add with no filename will still result in * library name being extracted from the .dynamic section and added * to the module list. However, this name may not always exist, thus * we might have a library with no file name available at all! * * Note: visor implements vsi mem maps that give file info, but, no * path, should be ok. xref PR 401580. * * Once PR 235433 is implemented in visor then fix memquery_iterator*() to * use vsi to find out page protection info, file name & inode. */ #endif /* HAVE_MEMINFO */ /* XREF 307599 on rounding module end to the next PAGE boundary */ if (found_map) { module_list_add(base, ALIGN_FORWARD(mod_size, PAGE_SIZE), at_map, filename, inode); dr_strfree(filename HEAPACCT(ACCT_OTHER)); } } void os_check_new_app_module(dcontext_t *dcontext, app_pc pc) { module_area_t *ma; os_get_module_info_lock(); ma = module_pc_lookup(pc); /* ma might be NULL due to dynamic generated code or custom loaded modules */ if (ma == NULL) { dr_mem_info_t info; /* i#1760: an app module loaded by custom loader (e.g., bionic libc) * might not be detected by DynamoRIO in process_mmap. */ if (query_memory_ex_from_os(pc, &info) && info.type == DR_MEMTYPE_IMAGE) { /* add the missing module */ os_get_module_info_unlock(); os_add_new_app_module(get_thread_private_dcontext(), false/*!at_map*/, info.base_pc, info.size, info.prot); os_get_module_info_lock(); } } os_get_module_info_unlock(); } /* All processing for mmap and mmap2. */ static void process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot, uint flags _IF_DEBUG(const char *map_type)) { bool image = false; uint memprot = osprot_to_memprot(prot); #ifdef ANDROID /* i#1861: avoid merging file-backed w/ anon regions */ if (!TEST(MAP_ANONYMOUS, flags)) memprot |= MEMPROT_HAS_COMMENT; #endif LOG(THREAD, LOG_SYSCALLS, 4, "process_mmap("PFX","PFX",0x%x,%s,%s)\n", base, size, flags, memprot_string(memprot), map_type); /* Notes on how ELF SOs are mapped in. * * o The initial mmap for an ELF file specifies enough space for * all segments (and their constituent sections) in the file. * The protection bits for that section are used for the entire * region, and subsequent mmaps for subsequent segments within * the region modify their portion's protection bits as needed. * So if the prot bits for the first segment are +x, the entire * region is +x. ** Note that our primary concern is adjusting * exec areas to reflect the prot bits of subsequent * segments. ** The region is added to the all-memory areas * and also to exec areas (as determined by app_memory_allocation()). * * o Any subsequent segment sub-mappings specify their own protection * bits and therefore are added to the exec areas via normal * processing. They are also "naturally" added to the all-mems list. * We do a little extra processing when mapping into a previously * mapped region and the prot bits mismatch; if the new mapping is * not +x, flushing needs to occur. */ /* process_mmap can be called with PROT_NONE, so we need to check if we * can read the memory to see if it is a elf_header */ /* XXX: get inode for check */ if (TEST(MAP_ANONYMOUS, flags)) { /* not an ELF mmap */ LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": anon\n", base); } else if (mmap_check_for_module_overlap(base, size, TEST(MEMPROT_READ, memprot), 0, true)) { /* FIXME - how can we distinguish between the loader mapping the segments * over the initial map from someone just mapping over part of a module? If * is the latter case need to adjust the view size or remove from module list. */ image = true; DODEBUG({ map_type = "ELF SO"; }); LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": overlaps image\n", base); } else if (TEST(MEMPROT_READ, memprot) && /* i#727: We can still get SIGBUS on mmap'ed files that can't be * read, so pass size=0 to use a safe_read. */ module_is_header(base, 0)) { #ifdef ANDROID /* The Android loader's initial all-segment-covering mmap is anonymous */ dr_mem_info_t info; if (query_memory_ex_from_os((byte *)ALIGN_FORWARD(base+size, PAGE_SIZE), &info) && info.prot == MEMPROT_NONE && info.type == DR_MEMTYPE_DATA) { LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": Android elf\n", base); image = true; DODEBUG({ map_type = "ELF SO"; }); os_add_new_app_module(dcontext, true/*at_map*/, base, /* pass segment size, not whole module size */ size, memprot); } else #endif if (module_is_partial_map(base, size, memprot)) { /* i#1240: App might read first page of ELF header using mmap, which * might accidentally be treated as a module load. Heuristically * distinguish this by saying that if this is the first mmap for an ELF * (i.e., it doesn't overlap with a previous map), and if it's small, * then don't treat it as a module load. */ LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": partial\n", base); } else { LOG(THREAD, LOG_SYSCALLS, 4, "mmap "PFX": elf header\n", base); image = true; DODEBUG({ map_type = "ELF SO"; }); os_add_new_app_module(dcontext, true/*at_map*/, base, size, memprot); } } IF_NO_MEMQUERY(memcache_handle_mmap(dcontext, base, size, memprot, image)); /* app_memory_allocation() expects to not see an overlap -- exec areas * doesn't expect one. We have yet to see a +x mmap into a previously * mapped +x region, but we do check and handle in pre-syscall (i#1175). */ LOG(THREAD, LOG_SYSCALLS, 4, "\t try app_mem_alloc\n"); if (app_memory_allocation(dcontext, base, size, memprot, image _IF_DEBUG(map_type))) STATS_INC(num_app_code_modules); LOG(THREAD, LOG_SYSCALLS, 4, "\t app_mem_alloc -- DONE\n"); } #ifdef LINUX /* Call right after the system call. * i#173: old_prot and old_type should be from before the system call */ static bool handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base, size_t old_size, uint old_prot, uint old_type) { if (!mmap_syscall_succeeded(base)) return false; if (base != old_base || size < old_size) { /* take action only if * there was a change */ DEBUG_DECLARE(bool ok;) /* fragments were shifted...don't try to fix them, just flush */ app_memory_deallocation(dcontext, (app_pc)old_base, old_size, false /* don't own thread_initexit_lock */, false /* not image, FIXME: somewhat arbitrary */); DOCHECK(1, { /* we don't expect to see remappings of modules */ os_get_module_info_lock(); ASSERT_CURIOSITY(!module_overlaps(base, size)); os_get_module_info_unlock(); }); /* Verify that the current prot on the new region (according to * the os) is the same as what the prot used to be for the old * region. */ DOCHECK(1, { uint memprot; ok = get_memory_info_from_os(base, NULL, NULL, &memprot); /* allow maps to have +x, * +x may be caused by READ_IMPLIES_EXEC set in personality flag (i#262) */ ASSERT(ok && (memprot == old_prot || (memprot & (~MEMPROT_EXEC)) == old_prot)); }); app_memory_allocation(dcontext, base, size, old_prot, old_type == DR_MEMTYPE_IMAGE _IF_DEBUG("mremap")); IF_NO_MEMQUERY(memcache_handle_mremap(dcontext, base, size, old_base, old_size, old_prot, old_type)); } return true; } static void handle_app_brk(dcontext_t *dcontext, byte *lowest_brk/*if known*/, byte *old_brk, byte *new_brk) { /* i#851: the brk might not be page aligned */ old_brk = (app_pc) ALIGN_FORWARD(old_brk, PAGE_SIZE); new_brk = (app_pc) ALIGN_FORWARD(new_brk, PAGE_SIZE); if (new_brk < old_brk) { /* Usually the heap is writable, so we don't really need to call * this here: but seems safest to do so, esp if someone made part of * the heap read-only and then put code there. */ app_memory_deallocation(dcontext, new_brk, old_brk - new_brk, false /* don't own thread_initexit_lock */, false /* not image */); } else if (new_brk > old_brk) { /* No need to call app_memory_allocation() as doesn't interact * w/ security policies. */ } IF_NO_MEMQUERY(memcache_handle_app_brk(lowest_brk, old_brk, new_brk)); } #endif /* This routine is *not* called is pre_system_call() returns false to skip * the syscall. */ /* XXX: split out specific handlers into separate routines */ void post_system_call(dcontext_t *dcontext) { priv_mcontext_t *mc = get_mcontext(dcontext); /* registers have been clobbered, so sysnum is kept in dcontext */ int sysnum = dcontext->sys_num; /* We expect most syscall failures to return < 0, so >= 0 is success. * Some syscall return addresses that have the sign bit set and so * appear to be failures but are not. They are handled on a * case-by-case basis in the switch statement below. */ ptr_int_t result = (ptr_int_t) MCXT_SYSCALL_RES(mc); /* signed */ bool success = syscall_successful(mc, sysnum); app_pc base; size_t size; uint prot; where_am_i_t old_whereami; DEBUG_DECLARE(bool ok;) RSTATS_INC(post_syscall); old_whereami = dcontext->whereami; dcontext->whereami = WHERE_SYSCALL_HANDLER; #if defined(LINUX) && defined(X86) /* PR 313715: restore xbp since for some vsyscall sequences that use * the syscall instruction its value is needed: * 0xffffe400 <__kernel_vsyscall+0>: push %ebp * 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp * 0xffffe403 <__kernel_vsyscall+3>: syscall * 0xffffe405 <__kernel_vsyscall+5>: mov $0x2b,%ecx * 0xffffe40a <__kernel_vsyscall+10>: movl %ecx,%ss * 0xffffe40c <__kernel_vsyscall+12>: mov %ebp,%ecx * 0xffffe40e <__kernel_vsyscall+14>: pop %ebp * 0xffffe40f <__kernel_vsyscall+15>: ret */ if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) { mc->xbp = dcontext->sys_xbp; } #endif /* handle fork, try to do it early before too much logging occurs */ if (false # ifdef SYS_fork || sysnum == SYS_fork # endif IF_LINUX(|| (sysnum == SYS_clone && !TEST(CLONE_VM, dcontext->sys_param0)))) { if (result == 0) { /* we're the child */ thread_id_t child = get_sys_thread_id(); # ifdef DEBUG thread_id_t parent = get_parent_id(); SYSLOG_INTERNAL_INFO("-- parent %d forked child %d --", parent, child); # endif /* first, fix TLS of dcontext */ ASSERT(parent != 0); /* change parent pid to our pid */ replace_thread_id(dcontext->owning_thread, child); dcontext->owning_thread = child; dcontext->owning_process = get_process_id(); /* now let dynamo initialize new shared memory, logfiles, etc. * need access to static vars in dynamo.c, that's why we don't do it. */ /* FIXME - xref PR 246902 - dispatch runs a lot of code before * getting to post_system_call() is any of that going to be messed up * by waiting till here to fixup the child logfolder/file and tid? */ dynamorio_fork_init(dcontext); LOG(THREAD, LOG_SYSCALLS, 1, "after fork-like syscall: parent is %d, child is %d\n", parent, child); } else { /* we're the parent */ os_fork_post(dcontext, true/*parent*/); } } LOG(THREAD, LOG_SYSCALLS, 2, "post syscall: sysnum="PFX", result="PFX" (%d)\n", sysnum, MCXT_SYSCALL_RES(mc), (int)MCXT_SYSCALL_RES(mc)); switch (sysnum) { /****************************************************************************/ /* MEMORY REGIONS */ #ifdef DEBUG # ifdef MACOS case SYS_open_nocancel: # endif # ifdef SYS_open case SYS_open: { if (success) { /* useful for figuring out what module was loaded that then triggers * module.c elf curiosities */ LOG(THREAD, LOG_SYSCALLS, 2, "SYS_open %s => %d\n", dcontext->sys_param0, (int)result); } break; } # endif #endif #if defined(LINUX) && !defined(X64) && !defined(ARM) case SYS_mmap: #endif case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): { uint flags; DEBUG_DECLARE(const char *map_type;) RSTATS_INC(num_app_mmaps); base = (app_pc) MCXT_SYSCALL_RES(mc); /* For mmap, it's NOT arg->addr! */ /* mmap isn't simply a user-space wrapper for mmap2. It's called * directly when dynamically loading an SO, i.e., dlopen(). */ #ifdef LINUX /* MacOS success is in CF */ success = mmap_syscall_succeeded((app_pc)result); /* The syscall either failed OR the retcode is less than the * largest uint value of any errno and the addr returned is * page-aligned. */ ASSERT_CURIOSITY(!success || ((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE))); #else ASSERT_CURIOSITY(!success || ALIGNED(base, PAGE_SIZE)); #endif if (!success) goto exit_post_system_call; #if defined(LINUX) && !defined(X64) && !defined(ARM) if (sysnum == SYS_mmap) { /* The syscall succeeded so the read of 'arg' should be * safe. */ mmap_arg_struct_t *arg = (mmap_arg_struct_t *) dcontext->sys_param0; size = (size_t) arg->len; prot = (uint) arg->prot; flags = (uint) arg->flags; DEBUG_DECLARE(map_type = "mmap";) } else { #endif size = (size_t) dcontext->sys_param1; prot = (uint) dcontext->sys_param2; flags = (uint) dcontext->sys_param3; DEBUG_DECLARE(map_type = IF_X64_ELSE("mmap2","mmap");) #if defined(LINUX) && !defined(X64) && !defined(ARM) } #endif process_mmap(dcontext, base, size, prot, flags _IF_DEBUG(map_type)); break; } case SYS_munmap: { app_pc addr = (app_pc) dcontext->sys_param0; size_t len = (size_t) dcontext->sys_param1; /* We assumed in pre_system_call() that the unmap would succeed * and flushed fragments and removed the region from exec areas. * If the unmap failed, we re-add the region to exec areas. * * The same logic can be used on Windows (but isn't yet). */ /* FIXME There are shortcomings to the approach. If another thread * executes in the region after our pre_system_call processing * but before the re-add below, it will get a security violation. * That's less than ideal but at least isn't a security hole. * The overall shortcoming is that we lose the state from our * stateful security policies -- future exec list, tables used * for RCT (.C/.E/.F) -- which can't be easily restored. Also, * the re-add could add a region that wasn't on the exec list * previously. * * See case 7559 for a better approach. */ if (!success) { dr_mem_info_t info; /* must go to os to get real memory since we already removed */ DEBUG_DECLARE(ok =) query_memory_ex_from_os(addr, &info); ASSERT(ok); app_memory_allocation(dcontext, addr, len, info.prot, info.type == DR_MEMTYPE_IMAGE _IF_DEBUG("failed munmap")); IF_NO_MEMQUERY(memcache_update_locked((app_pc)ALIGN_BACKWARD(addr, PAGE_SIZE), (app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE), info.prot, info.type, false/*add back*/)); } break; } #ifdef LINUX case SYS_mremap: { app_pc old_base = (app_pc) dcontext->sys_param0; size_t old_size = (size_t) dcontext->sys_param1; base = (app_pc) MCXT_SYSCALL_RES(mc); size = (size_t) dcontext->sys_param2; /* even if no shift, count as munmap plus mmap */ RSTATS_INC(num_app_munmaps); RSTATS_INC(num_app_mmaps); success = handle_app_mremap(dcontext, base, size, old_base, old_size, /* i#173: use memory prot and type * obtained from pre_system_call */ (uint) dcontext->sys_param3, (uint) dcontext->sys_param4); /* The syscall either failed OR the retcode is less than the * largest uint value of any errno and the addr returned is * is page-aligned. */ ASSERT_CURIOSITY(!success || ((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE))); if (!success) goto exit_post_system_call; break; } #endif case SYS_mprotect: { base = (app_pc) dcontext->sys_param0; size = dcontext->sys_param1; prot = dcontext->sys_param2; #ifdef VMX86_SERVER /* PR 475111: workaround for PR 107872 */ if (os_in_vmkernel_userworld() && result == -EBUSY && prot == PROT_NONE) { result = mprotect_syscall(base, size, PROT_READ); /* since non-Mac, we can use this even if the call failed */ set_success_return_val(dcontext, result); success = (result >= 0); LOG(THREAD, LOG_VMAREAS, 1, "re-doing mprotect -EBUSY for "PFX"-"PFX" => %d\n", base, base + size, (int)result); SYSLOG_INTERNAL_WARNING_ONCE("re-doing mprotect for PR 475111, PR 107872"); } #endif /* FIXME i#143: we need to tweak the returned oldprot for * writable areas we've made read-only */ if (!success) { uint memprot = 0; /* Revert the prot bits if needed. */ if (!get_memory_info_from_os(base, NULL, NULL, &memprot)) memprot = PROT_NONE; LOG(THREAD, LOG_SYSCALLS, 3, "syscall: mprotect failed: "PFX"-"PFX" prot->%d\n", base, base+size, osprot_to_memprot(prot)); LOG(THREAD, LOG_SYSCALLS, 3, "\told prot->%d\n", memprot); if (prot != memprot_to_osprot(memprot)) { /* We're trying to reverse the prot change, assuming that * this action doesn't have any unexpected side effects * when doing so (such as not reversing some bit of internal * state). */ uint new_memprot; DEBUG_DECLARE(uint res =) app_memory_protection_change(dcontext, base, size, osprot_to_memprot(prot), &new_memprot, NULL); ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE); ASSERT(res == DO_APP_MEM_PROT_CHANGE || res == PRETEND_APP_MEM_PROT_CHANGE); /* PR 410921 - Revert the changes to all-mems list. * FIXME: This fix assumes the whole region had the prot & * type, which is true in the cases we have seen so far, but * theoretically may not be true. If it isn't true, multiple * memory areas with different types/protections might have * been changed in pre_system_call(), so will have to keep a * list of all vmareas changed. This might be expensive for * each mprotect syscall to guard against a rare theoretical bug. */ ASSERT_CURIOSITY(!dcontext->mprot_multi_areas); IF_NO_MEMQUERY(memcache_update_locked(base, base + size, memprot, -1/*type unchanged*/, true/*exists*/)); } } break; } #ifdef ANDROID case SYS_prctl: { int code = (int) dcontext->sys_param0; int subcode = (ulong) dcontext->sys_param1; if (success && code == PR_SET_VMA && subcode == PR_SET_VMA_ANON_NAME) { byte *addr = (byte *) dcontext->sys_param2; size_t len = (size_t) dcontext->sys_param3; IF_DEBUG(const char *comment = (const char *) dcontext->sys_param4;) uint memprot = 0; if (!get_memory_info_from_os(addr, NULL, NULL, &memprot)) memprot = MEMPROT_NONE; /* We're post-syscall so from_os should match the prctl */ ASSERT((comment == NULL && !TEST(MEMPROT_HAS_COMMENT, memprot)) || (comment != NULL && TEST(MEMPROT_HAS_COMMENT, memprot))); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: prctl PR_SET_VMA_ANON_NAME base="PFX" size="PFX" comment=%s\n", addr, len, comment == NULL ? "<null>" : comment); IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, memprot, -1/*type unchanged*/, true/*exists*/)); } break; } #endif #ifdef LINUX case SYS_brk: { /* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas. * This code should work regardless of whether syscall failed * (if it failed, the old break will be returned). We stored * the old break in sys_param1 in pre-syscall. */ app_pc old_brk = (app_pc) dcontext->sys_param1; app_pc new_brk = (app_pc) result; DEBUG_DECLARE(app_pc req_brk = (app_pc) dcontext->sys_param0;); ASSERT(!DYNAMO_OPTION(emulate_brk)); /* shouldn't get here */ # ifdef DEBUG if (DYNAMO_OPTION(early_inject) && req_brk != NULL /* Ignore calls that don't increase brk. */) { DO_ONCE({ ASSERT_CURIOSITY(new_brk > old_brk && "i#1004: first brk() " "allocation failed with -early_inject"); }); } # endif handle_app_brk(dcontext, NULL, old_brk, new_brk); break; } #endif /****************************************************************************/ /* SPAWNING -- fork mostly handled above */ #ifdef LINUX case SYS_clone: { /* in /usr/src/linux/arch/i386/kernel/process.c */ LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone returned "PFX"\n", MCXT_SYSCALL_RES(mc)); /* We switch the lib tls segment back to dr's privlib segment. * Please refer to comment on os_switch_lib_tls. * It is only called in parent thread. * The child thread's tls setup is done in os_tls_app_seg_init. */ if (was_thread_create_syscall(dcontext)) { if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) os_switch_lib_tls(dcontext, false/*to dr*/); /* i#2089: we already restored the DR tls in os_clone_post() */ } break; } #elif defined(MACOS) && !defined(X64) case SYS_bsdthread_create: { /* restore stack values we clobbered */ ASSERT(*sys_param_addr(dcontext, 0) == (reg_t) new_bsdthread_intercept); *sys_param_addr(dcontext, 0) = dcontext->sys_param0; *sys_param_addr(dcontext, 1) = dcontext->sys_param1; break; } #endif #ifdef SYS_fork case SYS_fork: { LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork returned "PFX"\n", MCXT_SYSCALL_RES(mc)); break; } #endif #ifdef SYS_vfork case SYS_vfork: { LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork returned "PFX"\n", MCXT_SYSCALL_RES(mc)); IF_LINUX(ASSERT(was_thread_create_syscall(dcontext))); /* restore xsp in parent */ LOG(THREAD, LOG_SYSCALLS, 2, "vfork: restoring xsp from "PFX" to "PFX"\n", mc->xsp, dcontext->sys_param1); mc->xsp = dcontext->sys_param1; if (MCXT_SYSCALL_RES(mc) != 0) { /* We switch the lib tls segment back to dr's segment. * Please refer to comment on os_switch_lib_tls. * It is only called in parent thread. * The child thread's tls setup is done in os_tls_app_seg_init. */ if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { os_switch_lib_tls(dcontext, false/*to dr*/); } /* i#2089: we already restored the DR tls in os_clone_post() */ } break; } #endif case SYS_execve: { /* if we get here it means execve failed (doesn't return on success) */ success = false; mark_thread_execve(dcontext->thread_record, false); ASSERT(result < 0); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: execve failed\n"); handle_execve_post(dcontext); /* Don't 'break' as we have an ASSERT(success) just below * the switch(). */ goto exit_post_system_call; break; /* unnecessary but good form so keep it */ } /****************************************************************************/ /* SIGNALS */ case IF_MACOS_ELSE(SYS_sigaction,SYS_rt_sigaction): { /* 174 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage long sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact, size_t sigsetsize) */ /* FIXME i#148: Handle syscall failure. */ int sig = (int) dcontext->sys_param0; const kernel_sigaction_t *act = (const kernel_sigaction_t *) dcontext->sys_param1; prev_sigaction_t *oact = (prev_sigaction_t *) dcontext->sys_param2; size_t sigsetsize = (size_t) dcontext->sys_param3; uint res; res = handle_post_sigaction(dcontext, success, sig, act, oact, sigsetsize); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction => %d\n", IF_MACOS_ELSE("","rt_"), -res); if (res != 0) set_failure_return_val(dcontext, res); if (!success || res != 0) goto exit_post_system_call; break; } #if defined(LINUX) && !defined(X64) case SYS_sigaction: { /* 67 */ int sig = (int) dcontext->sys_param0; const old_sigaction_t *act = (const old_sigaction_t *) dcontext->sys_param1; old_sigaction_t *oact = (old_sigaction_t *) dcontext->sys_param2; uint res = handle_post_old_sigaction(dcontext, success, sig, act, oact); LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction => %d\n", -res); if (res != 0) set_failure_return_val(dcontext, res); if (!success || res != 0) goto exit_post_system_call; break; } #endif case IF_MACOS_ELSE(SYS_sigprocmask,SYS_rt_sigprocmask): { /* 175 */ /* in /usr/src/linux/kernel/signal.c: asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize) */ /* FIXME i#148: Handle syscall failure. */ handle_post_sigprocmask(dcontext, (int) dcontext->sys_param0, (kernel_sigset_t *) dcontext->sys_param1, (kernel_sigset_t *) dcontext->sys_param2, (size_t) dcontext->sys_param3); break; } #if defined(LINUX) && !defined(X64) case SYS_sigreturn: /* 119 */ #endif case IF_MACOS_ELSE(SYS_sigreturn,SYS_rt_sigreturn): /* 173 */ /* there is no return value: it's just the value of eax, so avoid * assert below */ success = true; break; case SYS_setitimer: /* 104 */ handle_post_setitimer(dcontext, success, (int) dcontext->sys_param0, (const struct itimerval *) dcontext->sys_param1, (struct itimerval *) dcontext->sys_param2); break; case SYS_getitimer: /* 105 */ handle_post_getitimer(dcontext, success, (int) dcontext->sys_param0, (struct itimerval *) dcontext->sys_param1); break; #if defined(LINUX) && defined(X86) case SYS_alarm: /* 27 on x86 and 37 on x64 */ handle_post_alarm(dcontext, success, (unsigned int) dcontext->sys_param0); break; #endif #if defined(LINUX) && defined(X86) && defined(X64) case SYS_arch_prctl: { if (success && INTERNAL_OPTION(mangle_app_seg)) { tls_handle_post_arch_prctl(dcontext, dcontext->sys_param0, dcontext->sys_param1); } break; } #endif /****************************************************************************/ /* FILES */ #ifdef SYS_dup2 case SYS_dup2: IF_LINUX(case SYS_dup3:) { # ifdef LINUX if (success) signal_handle_dup(dcontext, (file_t) sys_param(dcontext, 1), (file_t) result); # endif break; } #endif #ifdef MACOS case SYS_fcntl_nocancel: #endif case SYS_fcntl: { #ifdef LINUX /* Linux-only since only for signalfd */ if (success) { file_t fd = (long) dcontext->sys_param0; int cmd = (int) dcontext->sys_param1; if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC)) signal_handle_dup(dcontext, fd, (file_t) result); } break; #endif } case IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)): { int resource = dcontext->sys_param0; if (success && resource == RLIMIT_NOFILE) { /* we stole some space: hide it from app */ struct rlimit *rlim = (struct rlimit *) dcontext->sys_param1; safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur), &app_rlimit_nofile.rlim_cur, NULL); safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max), &app_rlimit_nofile.rlim_max, NULL); } break; } #if !defined(ARM) && !defined(X64) && !defined(MACOS) /* Old struct w/ smaller fields */ case SYS_getrlimit: { int resource = dcontext->sys_param0; if (success && resource == RLIMIT_NOFILE) { struct compat_rlimit *rlim = (struct compat_rlimit *) dcontext->sys_param1; safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur), &app_rlimit_nofile.rlim_cur, NULL); safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max), &app_rlimit_nofile.rlim_max, NULL); } break; } #endif #ifdef LINUX case SYS_prlimit64: { int resource = dcontext->sys_param1; struct rlimit *rlim = (struct rlimit *) dcontext->sys_param3; if (success && resource == RLIMIT_NOFILE && rlim != NULL && /* XXX: xref pid discussion in pre_system_call SYS_prlimit64 */ (dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id())) { safe_write_ex(rlim, sizeof(*rlim), &app_rlimit_nofile, NULL); } break; } #endif #ifdef LINUX # ifdef SYS_readlink case SYS_readlink: # endif case SYS_readlinkat: if (success && DYNAMO_OPTION(early_inject)) { bool is_at = (sysnum == SYS_readlinkat); /* i#907: /proc/self/exe is a symlink to libdynamorio.so. We need * to fix it up if the app queries. Any thread id can be passed to * /proc/%d/exe, so we have to check. We could instead look for * libdynamorio.so in the result but we've tweaked our injector * in the past to exec different binaries so this seems more robust. */ if (symlink_is_self_exe((const char *)(is_at ? dcontext->sys_param1 : dcontext->sys_param0))) { char *tgt = (char *) (is_at ? dcontext->sys_param2 : dcontext->sys_param1); size_t tgt_sz = (size_t) (is_at ? dcontext->sys_param3 : dcontext->sys_param2); int len = snprintf(tgt, tgt_sz, "%s", get_application_name()); if (len > 0) set_success_return_val(dcontext, len); else { set_failure_return_val(dcontext, EINVAL); DODEBUG({ dcontext->expect_last_syscall_to_fail = true; }); } } } break; #endif #ifdef VMX86_SERVER default: if (is_vmkuw_sysnum(sysnum)) { vmkuw_post_system_call(dcontext); break; } #endif } /* switch */ DODEBUG({ if (ignorable_system_call_normalized(sysnum)) { STATS_INC(post_syscall_ignorable); } else { /* Many syscalls can fail though they aren't ignored. However, they * shouldn't happen without us knowing about them. See PR 402769 * for SYS_close case. */ if (!(success || sysnum == SYS_close || IF_MACOS(sysnum == SYS_close_nocancel ||) dcontext->expect_last_syscall_to_fail)) { LOG(THREAD, LOG_SYSCALLS, 1, "Unexpected failure of non-ignorable syscall %d\n", sysnum); } } }); exit_post_system_call: #ifdef CLIENT_INTERFACE /* The instrument_post_syscall should be called after DR finishes all * its operations, since DR needs to know the real syscall results, * and any changes made by the client are simply to fool the app. * Also, dr_syscall_invoke_another() needs to set eax, which shouldn't * affect the result of the 1st syscall. Xref i#1. */ /* after restore of xbp so client sees it as though was sysenter */ instrument_post_syscall(dcontext, sysnum); #endif dcontext->whereami = old_whereami; } /* initializes dynamorio library bounds. * does not use any heap. * assumed to be called prior to find_executable_vm_areas. */ static int get_dynamo_library_bounds(void) { /* Note that we're not counting DYNAMORIO_PRELOAD_NAME as a DR area, to match * Windows, so we should unload it like we do there. The other reason not to * count it is so is_in_dynamo_dll() can be the only exception to the * never-execute-from-DR-areas list rule */ int res; app_pc check_start, check_end; char *libdir; const char *dynamorio_libname; #ifdef STATIC_LIBRARY /* We don't know our image name, so look up our bounds with an internal * address. */ dynamorio_libname = NULL; check_start = (app_pc)&get_dynamo_library_bounds; #else /* !STATIC_LIBRARY */ # ifdef LINUX /* PR 361594: we get our bounds from linker-provided symbols. * Note that referencing the value of these symbols will crash: * always use the address only. */ extern int dynamorio_so_start, dynamorio_so_end; dynamo_dll_start = (app_pc) &dynamorio_so_start; dynamo_dll_end = (app_pc) ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE); # elif defined(MACOS) dynamo_dll_start = module_dynamorio_lib_base(); # endif check_start = dynamo_dll_start; dynamorio_libname = IF_UNIT_TEST_ELSE(UNIT_TEST_EXE_NAME,DYNAMORIO_LIBRARY_NAME); #endif /* STATIC_LIBRARY */ res = memquery_library_bounds(dynamorio_libname, &check_start, &check_end, dynamorio_library_path, BUFFER_SIZE_ELEMENTS(dynamorio_library_path)); LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" library path: %s\n", dynamorio_library_path); snprintf(dynamorio_library_filepath, BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath), "%s%s", dynamorio_library_path, dynamorio_libname); NULL_TERMINATE_BUFFER(dynamorio_library_filepath); #if !defined(STATIC_LIBRARY) && defined(LINUX) ASSERT(check_start == dynamo_dll_start && check_end == dynamo_dll_end); #elif defined(MACOS) ASSERT(check_start == dynamo_dll_start); dynamo_dll_end = check_end; #else dynamo_dll_start = check_start; dynamo_dll_end = check_end; #endif LOG(GLOBAL, LOG_VMAREAS, 1, "DR library bounds: "PFX" to "PFX"\n", dynamo_dll_start, dynamo_dll_end); ASSERT(res > 0); /* Issue 20: we need the path to the alt arch */ strncpy(dynamorio_alt_arch_path, dynamorio_library_path, BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_path)); /* Assumption: libdir name is not repeated elsewhere in path */ libdir = strstr(dynamorio_alt_arch_path, IF_X64_ELSE(DR_LIBDIR_X64, DR_LIBDIR_X86)); if (libdir != NULL) { const char *newdir = IF_X64_ELSE(DR_LIBDIR_X86, DR_LIBDIR_X64); /* do NOT place the NULL */ strncpy(libdir, newdir, strlen(newdir)); } else { SYSLOG_INTERNAL_WARNING("unable to determine lib path for cross-arch execve"); } NULL_TERMINATE_BUFFER(dynamorio_alt_arch_path); LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" alt arch path: %s\n", dynamorio_alt_arch_path); snprintf(dynamorio_alt_arch_filepath, BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_filepath), "%s%s", dynamorio_alt_arch_path, dynamorio_libname); NULL_TERMINATE_BUFFER(dynamorio_alt_arch_filepath); return res; } /* get full path to our own library, (cached), used for forking and message file name */ char* get_dynamorio_library_path(void) { if (!dynamorio_library_filepath[0]) { /* not cached */ get_dynamo_library_bounds(); } return dynamorio_library_filepath; } #ifdef LINUX /* Get full path+name of executable file from /proc/self/exe. Returns an empty * string on error. * FIXME i#47: This will return DR's path when using early injection. */ static char * read_proc_self_exe(bool ignore_cache) { static char exepath[MAXIMUM_PATH]; static bool tried = false; # ifdef MACOS ASSERT_NOT_IMPLEMENTED(false); # endif if (!tried || ignore_cache) { tried = true; /* assume we have /proc/self/exe symlink: could add HAVE_PROC_EXE * but we have no alternative solution except assuming the first * /proc/self/maps entry is the executable */ ssize_t res; DEBUG_DECLARE(int len = ) snprintf(exepath, BUFFER_SIZE_ELEMENTS(exepath), "/proc/%d/exe", get_process_id()); ASSERT(len > 0); NULL_TERMINATE_BUFFER(exepath); /* i#960: readlink does not null terminate, so we do it. */ # ifdef SYS_readlink res = dynamorio_syscall(SYS_readlink, 3, exepath, exepath, BUFFER_SIZE_ELEMENTS(exepath)-1); # else res = dynamorio_syscall(SYS_readlinkat, 4, AT_FDCWD, exepath, exepath, BUFFER_SIZE_ELEMENTS(exepath)-1); # endif ASSERT(res < BUFFER_SIZE_ELEMENTS(exepath)); exepath[MAX(res, 0)] = '\0'; NULL_TERMINATE_BUFFER(exepath); } return exepath; } #endif /* LINUX */ app_pc get_application_base(void) { if (executable_start == NULL) { #ifdef HAVE_MEMINFO /* Haven't done find_executable_vm_areas() yet so walk maps ourselves */ const char *name = get_application_name(); if (name != NULL && name[0] != '\0') { memquery_iter_t iter; memquery_iterator_start(&iter, NULL, false/*won't alloc*/); while (memquery_iterator_next(&iter)) { if (strcmp(iter.comment, name) == 0) { executable_start = iter.vm_start; executable_end = iter.vm_end; break; } } memquery_iterator_stop(&iter); } #else /* We have to fail. Should we dl_iterate this early? */ #endif } return executable_start; } app_pc get_application_end(void) { if (executable_end == NULL) get_application_base(); return executable_end; } app_pc get_image_entry() { static app_pc image_entry_point = NULL; if (image_entry_point == NULL && executable_start != NULL) { module_area_t *ma; os_get_module_info_lock(); ma = module_pc_lookup(executable_start); ASSERT(ma != NULL); if (ma != NULL) { ASSERT(executable_start == ma->start); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); image_entry_point = ma->entry_point; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } os_get_module_info_unlock(); } return image_entry_point; } #ifdef DEBUG void mem_stats_snapshot() { /* FIXME: NYI */ } #endif bool is_in_dynamo_dll(app_pc pc) { ASSERT(dynamo_dll_start != NULL); #ifdef VMX86_SERVER /* We want to consider vmklib as part of the DR lib for allowing * execution (_init calls os_in_vmkernel_classic()) and for * reporting crashes as our fault */ if (vmk_in_vmklib(pc)) return true; #endif return (pc >= dynamo_dll_start && pc < dynamo_dll_end); } app_pc get_dynamorio_dll_start() { if (dynamo_dll_start == NULL) get_dynamo_library_bounds(); ASSERT(dynamo_dll_start != NULL); return dynamo_dll_start; } app_pc get_dynamorio_dll_end() { if (dynamo_dll_end == NULL) get_dynamo_library_bounds(); ASSERT(dynamo_dll_end != NULL); return dynamo_dll_end; } app_pc get_dynamorio_dll_preferred_base() { /* on Linux there is no preferred base if we're PIC, * therefore is always equal to dynamo_dll_start */ return get_dynamorio_dll_start(); } /* assumed to be called after find_dynamo_library_vm_areas() */ int find_executable_vm_areas(void) { int count = 0; #ifdef MACOS app_pc shared_start, shared_end; bool have_shared = module_dyld_shared_region(&shared_start, &shared_end); #endif #ifdef RETURN_AFTER_CALL dcontext_t *dcontext = get_thread_private_dcontext(); os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; #endif #ifndef HAVE_MEMINFO_QUERY /* We avoid tracking the innards of vmheap for all_memory_areas by * adding a single no-access region for the whole vmheap. * Queries from heap routines use _from_os. * Queries in check_thread_vm_area are fine getting "noaccess": wants * any DR memory not on exec areas list to be noaccess. * Queries from clients: should be ok to hide innards. Marking noaccess * should be safer than marking free, as unruly client might try to mmap * something in the free space: better to have it think it's reserved but * not yet used memory. FIXME: we're not marking beyond-vmheap DR regions * as noaccess! */ byte *our_heap_start, *our_heap_end; get_vmm_heap_bounds(&our_heap_start, &our_heap_end); if (our_heap_end - our_heap_start > 0) { memcache_update_locked(our_heap_start, our_heap_end, MEMPROT_NONE, DR_MEMTYPE_DATA, false/*!exists*/); } #endif #ifndef HAVE_MEMINFO count = find_vm_areas_via_probe(); #else memquery_iter_t iter; memquery_iterator_start(&iter, NULL, true/*may alloc*/); while (memquery_iterator_next(&iter)) { bool image = false; size_t size = iter.vm_end - iter.vm_start; /* i#479, hide private module and match Windows's behavior */ bool skip = dynamo_vm_area_overlap(iter.vm_start, iter.vm_end) && !is_in_dynamo_dll(iter.vm_start) /* our own text section is ok */ /* client lib text section is ok (xref i#487) */ IF_CLIENT_INTERFACE(&& !is_in_client_lib(iter.vm_start)); DEBUG_DECLARE(const char *map_type = "Private"); /* we can't really tell what's a stack and what's not, but we rely on * our passing NULL preventing rwx regions from being added to executable * or future list, even w/ -executable_if_alloc */ LOG(GLOBAL, LOG_VMAREAS, 2, "start="PFX" end="PFX" prot=%x comment=%s\n", iter.vm_start, iter.vm_end, iter.prot, iter.comment); /* Issue 89: the vdso might be loaded inside ld.so as below, * which causes ASSERT_CURIOSITY in mmap_check_for_module_overlap fail. * b7fa3000-b7fbd000 r-xp 00000000 08:01 108679 /lib/ld-2.8.90.so * b7fbd000-b7fbe000 r-xp b7fbd000 00:00 0 [vdso] * b7fbe000-b7fbf000 r--p 0001a000 08:01 108679 /lib/ld-2.8.90.so * b7fbf000-b7fc0000 rw-p 0001b000 08:01 108679 /lib/ld-2.8.90.so * So we always first check if it is a vdso page before calling * mmap_check_for_module_overlap. * Update: with i#160/PR 562667 handling non-contiguous modules like * ld.so we now gracefully handle other objects like vdso in gaps in * module, but it's simpler to leave this ordering here. */ if (skip) { /* i#479, hide private module and match Windows's behavior */ LOG(GLOBAL, LOG_VMAREAS, 2, PFX"-"PFX" skipping: internal DR region\n", iter.vm_start, iter.vm_end); #ifdef MACOS } else if (have_shared && iter.vm_start >= shared_start && iter.vm_start < shared_end) { /* Skip modules we happen to find inside the dyld shared cache, * as we'll fail to identify the library. We add them * in module_walk_dyld_list instead. */ image = true; #endif } else if (strncmp(iter.comment, VSYSCALL_PAGE_MAPS_NAME, strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0 || IF_X64_ELSE(strncmp(iter.comment, VSYSCALL_REGION_MAPS_NAME, strlen(VSYSCALL_REGION_MAPS_NAME)) == 0, /* Older kernels do not label it as "[vdso]", but it is hardcoded there */ /* 32-bit */ iter.vm_start == VSYSCALL_PAGE_START_HARDCODED)) { # ifndef X64 /* We assume no vsyscall page for x64; thus, checking the * hardcoded address shouldn't have any false positives. */ ASSERT(iter.vm_end - iter.vm_start == PAGE_SIZE || /* i#1583: recent kernels have 2-page vdso */ iter.vm_end - iter.vm_start == 2*PAGE_SIZE); ASSERT(!dynamo_initialized); /* .data should be +w */ /* we're not considering as "image" even if part of ld.so (xref i#89) and * thus we aren't adjusting our code origins policies to remove the * vsyscall page exemption. */ DODEBUG({ map_type = "VDSO"; }); /* On re-attach, the vdso can be split into two entries (from DR's hook), * so take just the first one as the start (xref i#2157). */ if (vsyscall_page_start == NULL) vsyscall_page_start = iter.vm_start; if (vdso_page_start == NULL) vdso_page_start = vsyscall_page_start; /* assume identical for now */ LOG(GLOBAL, LOG_VMAREAS, 1, "found vsyscall page @ "PFX" %s\n", vsyscall_page_start, iter.comment); # else /* i#172 * fix bugs for OS where vdso page is set unreadable as below * ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso] * but it is readable indeed. */ /* i#430 * fix bugs for OS where vdso page is set unreadable as below * ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall] * but it is readable indeed. */ if (!TESTALL((PROT_READ|PROT_EXEC), iter.prot)) iter.prot |= (PROT_READ|PROT_EXEC); /* i#1908: vdso and vsyscall pages are now split */ if (strncmp(iter.comment, VSYSCALL_PAGE_MAPS_NAME, strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0) vdso_page_start = iter.vm_start; else if (strncmp(iter.comment, VSYSCALL_REGION_MAPS_NAME, strlen(VSYSCALL_REGION_MAPS_NAME)) == 0) vsyscall_page_start = iter.vm_start; # endif } else if (mmap_check_for_module_overlap(iter.vm_start, size, TEST(MEMPROT_READ, iter.prot), iter.inode, false)) { /* we already added the whole image region when we hit the first map for it */ image = true; DODEBUG({ map_type = "ELF SO"; }); } else if (TEST(MEMPROT_READ, iter.prot) && module_is_header(iter.vm_start, size)) { size_t image_size = size; app_pc mod_base, mod_first_end, mod_max_end; char *exec_match; bool found_exec = false; image = true; DODEBUG({ map_type = "ELF SO"; }); LOG(GLOBAL, LOG_VMAREAS, 2, "Found already mapped module first segment :\n" "\t"PFX"-"PFX"%s inode="UINT64_FORMAT_STRING" name=%s\n", iter.vm_start, iter.vm_end, TEST(MEMPROT_EXEC, iter.prot) ? " +x": "", iter.inode, iter.comment); #ifdef LINUX ASSERT_CURIOSITY(iter.inode != 0); /* mapped images should have inodes */ #endif ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */ /* Get size by walking the program headers. This includes .bss. */ if (module_walk_program_headers(iter.vm_start, size, false, true, /* i#1589: ld.so relocated .dynamic */ &mod_base, &mod_first_end, &mod_max_end, NULL, NULL)) { image_size = mod_max_end - mod_base; } else { ASSERT_NOT_REACHED(); } LOG(GLOBAL, LOG_VMAREAS, 2, "Found already mapped module total module :\n" "\t"PFX"-"PFX" inode="UINT64_FORMAT_STRING" name=%s\n", iter.vm_start, iter.vm_start+image_size, iter.inode, iter.comment); /* look for executable */ #ifdef LINUX exec_match = get_application_name(); if (exec_match != NULL && exec_match[0] != '\0') found_exec = (strcmp(iter.comment, exec_match) == 0); #else /* We don't have a nice normalized name: it can have ./ or ../ inside * it. But, we can distinguish an exe from a lib here, even for PIE, * so we go with that plus a basename comparison. */ exec_match = (char *) get_application_short_name(); if (module_is_executable(iter.vm_start) && exec_match != NULL && exec_match[0] != '\0') { const char *iter_basename = strrchr(iter.comment, '/'); if (iter_basename == NULL) iter_basename = iter.comment; else iter_basename++; found_exec = (strcmp(iter_basename, exec_match) == 0); } #endif if (found_exec) { if (executable_start == NULL) executable_start = iter.vm_start; else ASSERT(iter.vm_start == executable_start); LOG(GLOBAL, LOG_VMAREAS, 2, "Found executable %s @"PFX"-"PFX" %s\n", get_application_name(), iter.vm_start, iter.vm_start+image_size, iter.comment); } /* We don't yet know whether contiguous so we have to settle for the * first segment's size. We'll update it in module_list_add(). */ module_list_add(iter.vm_start, mod_first_end - mod_base, false, iter.comment, iter.inode); #ifdef MACOS /* look for dyld */ if (strcmp(iter.comment, "/usr/lib/dyld") == 0) module_walk_dyld_list(iter.vm_start); #endif } else if (iter.inode != 0) { DODEBUG({ map_type = "Mapped File"; }); } /* add all regions (incl. dynamo_areas and stack) to all_memory_areas */ LOG(GLOBAL, LOG_VMAREAS, 4, "find_executable_vm_areas: adding: "PFX"-"PFX" prot=%d\n", iter.vm_start, iter.vm_end, iter.prot); IF_NO_MEMQUERY(memcache_update_locked(iter.vm_start, iter.vm_end, iter.prot, image ? DR_MEMTYPE_IMAGE : DR_MEMTYPE_DATA, false/*!exists*/)); /* FIXME: best if we could pass every region to vmareas, but * it has no way of determining if this is a stack b/c we don't have * a dcontext at this point -- so we just don't pass the stack */ if (!skip /* i#479, hide private module and match Windows's behavior */ && app_memory_allocation(NULL, iter.vm_start, (iter.vm_end - iter.vm_start), iter.prot, image _IF_DEBUG(map_type))) { count++; } } memquery_iterator_stop(&iter); #endif /* !HAVE_MEMINFO */ #ifndef HAVE_MEMINFO_QUERY DOLOG(4, LOG_VMAREAS, memcache_print(GLOBAL,"init: all memory areas:\n");); #endif #ifdef RETURN_AFTER_CALL /* Find the bottom of the stack of the initial (native) entry */ ostd->stack_bottom_pc = find_stack_bottom(); LOG(THREAD, LOG_ALL, 1, "Stack bottom pc = "PFX"\n", ostd->stack_bottom_pc); #endif /* now that we've walked memory print all modules */ LOG(GLOBAL, LOG_VMAREAS, 2, "Module list after memory walk\n"); DOLOG(1, LOG_VMAREAS, { print_modules(GLOBAL, DUMP_NOT_XML); }); STATS_ADD(num_app_code_modules, count); /* now that we have the modules set up, query libc */ get_libc_errno_location(true/*force init*/); return count; } /* initializes dynamorio library bounds. * does not use any heap. * assumed to be called prior to find_executable_vm_areas. */ int find_dynamo_library_vm_areas(void) { #ifndef STATIC_LIBRARY /* We didn't add inside get_dynamo_library_bounds b/c it was called pre-alloc. * We don't bother to break down the sub-regions. * Assumption: we don't need to have the protection flags for DR sub-regions. * For static library builds, DR's code is in the exe and isn't considered * to be a DR area. */ add_dynamo_vm_area(get_dynamorio_dll_start(), get_dynamorio_dll_end(), MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC, true /* from image */ _IF_DEBUG(dynamorio_library_filepath)); #endif #ifdef VMX86_SERVER if (os_in_vmkernel_userworld()) vmk_add_vmklib_to_dynamo_areas(); #endif return 1; } bool get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; if (ostd->stack_base == NULL) { /* initialize on-demand since don't have app esp handy in os_thread_init() * FIXME: the comment here -- ignoring it for now, if hit cases confirming * it the right thing will be to merge adjacent rwx regions and assume * their union is the stack -- otherwise have to have special stack init * routine called from x86.asm new_thread_dynamo_start and internal_dynamo_start, * and the latter is not a do-once... */ size_t size = 0; bool ok; /* store stack info at thread startup, since stack can get fragmented in * /proc/self/maps w/ later mprotects and it can be hard to piece together later */ if (IF_MEMQUERY_ELSE(false, DYNAMO_OPTION(use_all_memory_areas))) { ok = get_memory_info((app_pc)get_mcontext(dcontext)->xsp, &ostd->stack_base, &size, NULL); } else { ok = get_memory_info_from_os((app_pc)get_mcontext(dcontext)->xsp, &ostd->stack_base, &size, NULL); } ASSERT(ok); ostd->stack_top = ostd->stack_base + size; LOG(THREAD, LOG_THREADS, 1, "App stack is "PFX"-"PFX"\n", ostd->stack_base, ostd->stack_top); } if (base != NULL) *base = ostd->stack_base; if (top != NULL) *top = ostd->stack_top; return true; } #ifdef RETURN_AFTER_CALL initial_call_stack_status_t at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc) { /* We can't rely exclusively on finding the true stack bottom * b/c we can't always walk the call stack (PR 608990) so we * use the image entry as our primary trigger */ if (executable_start != NULL/*defensive*/ && reached_image_entry_yet()) { return INITIAL_STACK_EMPTY; } else { /* If our stack walk ends early we could have false positives, but * that's better than false negatives if we miss the image entry * or we were unable to find the executable_start */ os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; if (target_pc == ostd->stack_bottom_pc) { return INITIAL_STACK_BOTTOM_REACHED; } else { return INITIAL_STACK_BOTTOM_NOT_REACHED; } } } #endif /* RETURN_AFTER_CALL */ /* Uses our cached data structures (if in use, else raw query) to retrieve memory info */ bool query_memory_ex(const byte *pc, OUT dr_mem_info_t *out_info) { #ifdef HAVE_MEMINFO_QUERY return query_memory_ex_from_os(pc, out_info); #else return memcache_query_memory(pc, out_info); #endif } bool query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info) { return query_memory_ex(pc, info); } /* Use our cached data structures (if in use, else raw query) to retrieve memory info */ bool get_memory_info(const byte *pc, byte **base_pc, size_t *size, uint *prot /* OUT optional, returns MEMPROT_* value */) { dr_mem_info_t info; if (is_vmm_reserved_address((byte*)pc, 1)) { if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE) return false; } else { if (!query_memory_ex(pc, &info) || info.type == DR_MEMTYPE_FREE) return false; } if (base_pc != NULL) *base_pc = info.base_pc; if (size != NULL) *size = info.size; if (prot != NULL) *prot = info.prot; return true; } /* We assume that this routine might be called instead of query_memory_ex() * b/c the caller is in a fragile location and cannot acquire locks, so * we try to do the same here. */ bool query_memory_ex_from_os(const byte *pc, OUT dr_mem_info_t *info) { bool have_type = false; bool res = memquery_from_os(pc, info, &have_type); if (!res) { /* No other failure types for now */ info->type = DR_MEMTYPE_ERROR; } else if (res && !have_type) { /* We pass 0 instead of info->size b/c even if marked as +r we can still * get SIGBUS if beyond end of mmapped file: not uncommon if querying * in middle of library load before .bss fully set up (PR 528744). * However, if there is no fault handler, is_elf_so_header's safe_read will * recurse to here, so in that case we use info->size but we assume * it's only at init or exit and so not in the middle of a load * and less likely to be querying a random mmapped file. * The cleaner fix is to allow safe_read to work w/o a dcontext or * fault handling: i#350/PR 529066. */ if (TEST(MEMPROT_READ, info->prot) && module_is_header(info->base_pc, fault_handling_initialized ? 0 : info->size)) info->type = DR_MEMTYPE_IMAGE; else { /* FIXME: won't quite match find_executable_vm_areas marking as * image: can be doubly-mapped so; don't want to count vdso; etc. */ info->type = DR_MEMTYPE_DATA; } } return res; } bool get_memory_info_from_os(const byte *pc, byte **base_pc, size_t *size, uint *prot /* OUT optional, returns MEMPROT_* value */) { dr_mem_info_t info; if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE) return false; if (base_pc != NULL) *base_pc = info.base_pc; if (size != NULL) *size = info.size; if (prot != NULL) *prot = info.prot; return true; } /* in utils.c, exported only for our hack! */ extern void deadlock_avoidance_unlock(mutex_t *lock, bool ownable); void mutex_wait_contended_lock(mutex_t *lock) { #ifdef CLIENT_INTERFACE dcontext_t *dcontext = get_thread_private_dcontext(); bool set_client_safe_for_synch = ((dcontext != NULL) && IS_CLIENT_THREAD(dcontext) && ((mutex_t *)dcontext->client_data->client_grab_mutex == lock)); #endif /* i#96/PR 295561: use futex(2) if available */ if (ksynch_kernel_support()) { /* Try to get the lock. If already held, it's fine to store any value * > LOCK_SET_STATE (we don't rely on paired incs/decs) so that * the next unlocker will call mutex_notify_released_lock(). */ ptr_int_t res; #ifndef LINUX /* we actually don't use this for Linux: see below */ KSYNCH_TYPE *event = mutex_get_contended_event(lock); ASSERT(event != NULL && ksynch_var_initialized(event)); #endif while (atomic_exchange_int(&lock->lock_requests, LOCK_CONTENDED_STATE) != LOCK_FREE_STATE) { #ifdef CLIENT_INTERFACE if (set_client_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = true; #endif /* Unfortunately the synch semantics are different for Linux vs Mac. * We have to use lock_requests as the futex to avoid waiting if * lock_requests changes, while on Mac the underlying synch prevents * a wait there. */ #ifdef LINUX /* We'll abort the wait if lock_requests has changed at all. * We can't have a series of changes that result in no apparent * change w/o someone acquiring the lock, b/c * mutex_notify_released_lock() sets lock_requests to LOCK_FREE_STATE. */ res = ksynch_wait(&lock->lock_requests, LOCK_CONTENDED_STATE); #else res = ksynch_wait(event, 0); #endif if (res != 0 && res != -EWOULDBLOCK) os_thread_yield(); #ifdef CLIENT_INTERFACE if (set_client_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = false; #endif /* we don't care whether properly woken (res==0), var mismatch * (res==-EWOULDBLOCK), or error: regardless, someone else * could have acquired the lock, so we try again */ } } else { /* we now have to undo our earlier request */ atomic_dec_and_test(&lock->lock_requests); while (!mutex_trylock(lock)) { #ifdef CLIENT_INTERFACE if (set_client_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = true; #endif os_thread_yield(); #ifdef CLIENT_INTERFACE if (set_client_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = false; #endif } #ifdef DEADLOCK_AVOIDANCE /* HACK: trylock's success causes it to do DEADLOCK_AVOIDANCE_LOCK, so to * avoid two in a row (causes assertion on owner) we unlock here * In the future we will remove the trylock here and this will go away. */ deadlock_avoidance_unlock(lock, true); #endif } return; } void mutex_notify_released_lock(mutex_t *lock) { /* i#96/PR 295561: use futex(2) if available. */ if (ksynch_kernel_support()) { /* Set to LOCK_FREE_STATE to avoid concurrent lock attempts from * resulting in a futex_wait value match w/o anyone owning the lock */ lock->lock_requests = LOCK_FREE_STATE; /* No reason to wake multiple threads: just one */ #ifdef LINUX ksynch_wake(&lock->lock_requests); #else ksynch_wake(&lock->contended_event); #endif } /* else nothing to do */ } /* read_write_lock_t implementation doesn't expect the contention path helpers to guarantee the lock is held (unlike mutexes) so simple yields are still acceptable. */ void rwlock_wait_contended_writer(read_write_lock_t *rwlock) { os_thread_yield(); } void rwlock_notify_writer(read_write_lock_t *rwlock) { /* nothing to do here */ } void rwlock_wait_contended_reader(read_write_lock_t *rwlock) { os_thread_yield(); } void rwlock_notify_readers(read_write_lock_t *rwlock) { /* nothing to do here */ } /***************************************************************************/ /* events are un-signaled when successfully waited upon. */ typedef struct linux_event_t { /* Any function that sets this flag must also notify possibly waiting * thread(s). See i#96/PR 295561. */ KSYNCH_TYPE signaled; mutex_t lock; } linux_event_t; /* FIXME: this routine will need to have a macro wrapper to let us assign different ranks to * all events for DEADLOCK_AVOIDANCE. Currently a single rank seems to work. */ event_t create_event() { event_t e = (event_t) global_heap_alloc(sizeof(linux_event_t) HEAPACCT(ACCT_OTHER)); ksynch_init_var(&e->signaled); ASSIGN_INIT_LOCK_FREE(e->lock, event_lock); /* FIXME: we'll need to pass the event name here */ return e; } void destroy_event(event_t e) { DELETE_LOCK(e->lock); ksynch_free_var(&e->signaled); global_heap_free(e, sizeof(linux_event_t) HEAPACCT(ACCT_OTHER)); } void signal_event(event_t e) { mutex_lock(&e->lock); ksynch_set_value(&e->signaled, 1); ksynch_wake(&e->signaled); LOG(THREAD_GET, LOG_THREADS, 3,"thread "TIDFMT" signalling event "PFX"\n",get_thread_id(),e); mutex_unlock(&e->lock); } void reset_event(event_t e) { mutex_lock(&e->lock); ksynch_set_value(&e->signaled, 0); LOG(THREAD_GET, LOG_THREADS, 3,"thread "TIDFMT" resetting event "PFX"\n",get_thread_id(),e); mutex_unlock(&e->lock); } void wait_for_event(event_t e) { #ifdef DEBUG dcontext_t *dcontext = get_thread_private_dcontext(); #endif /* Use a user-space event on Linux, a kernel event on Windows. */ LOG(THREAD, LOG_THREADS, 3, "thread "TIDFMT" waiting for event "PFX"\n",get_thread_id(),e); while (true) { if (ksynch_get_value(&e->signaled) == 1) { mutex_lock(&e->lock); if (ksynch_get_value(&e->signaled) == 0) { /* some other thread beat us to it */ LOG(THREAD, LOG_THREADS, 3, "thread "TIDFMT" was beaten to event "PFX"\n", get_thread_id(),e); mutex_unlock(&e->lock); } else { /* reset the event */ ksynch_set_value(&e->signaled, 0); mutex_unlock(&e->lock); LOG(THREAD, LOG_THREADS, 3, "thread "TIDFMT" finished waiting for event "PFX"\n", get_thread_id(),e); return; } } else { /* Waits only if the signaled flag is not set as 1. Return value * doesn't matter because the flag will be re-checked. */ ksynch_wait(&e->signaled, 0); } if (ksynch_get_value(&e->signaled) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); } } } /*************************************************************************** * DIRECTORY ITERATOR */ /* These structs are written to the buf that we pass to getdents. We can * iterate them by adding d_reclen to the current buffer offset and interpreting * that as the next entry. */ struct linux_dirent { #ifdef SYS_getdents /* Adapted from struct old_linux_dirent in linux/fs/readdir.c: */ unsigned long d_ino; unsigned long d_off; unsigned short d_reclen; char d_name[]; #else /* Adapted from struct linux_dirent64 in linux/include/linux/dirent.h: */ uint64 d_ino; int64 d_off; unsigned short d_reclen; unsigned char d_type; char d_name[]; #endif }; #define CURRENT_DIRENT(iter) \ ((struct linux_dirent *)(&iter->buf[iter->off])) static void os_dir_iterator_start(dir_iterator_t *iter, file_t fd) { iter->fd = fd; iter->off = 0; iter->end = 0; } static bool os_dir_iterator_next(dir_iterator_t *iter) { #ifdef MACOS /* We can use SYS_getdirentries, but do we even need a dir iterator? * On Linux it's only used to enumerate /proc/pid/task. */ ASSERT_NOT_IMPLEMENTED(false); return false; #else if (iter->off < iter->end) { /* Have existing dents, get the next offset. */ iter->off += CURRENT_DIRENT(iter)->d_reclen; ASSERT(iter->off <= iter->end); } if (iter->off == iter->end) { /* Do a getdents syscall. Unlike when reading a file, the kernel will * not read a partial linux_dirent struct, so we don't need to shift the * left over bytes to the buffer start. See the getdents manpage for * the example code that this is based on. */ iter->off = 0; # ifdef SYS_getdents iter->end = dynamorio_syscall(SYS_getdents, 3, iter->fd, iter->buf, sizeof(iter->buf)); # else iter->end = dynamorio_syscall(SYS_getdents64, 3, iter->fd, iter->buf, sizeof(iter->buf)); # endif ASSERT(iter->end <= sizeof(iter->buf)); if (iter->end <= 0) { /* No more dents, or error. */ iter->name = NULL; if (iter->end < 0) { LOG(GLOBAL, LOG_SYSCALLS, 1, "getdents syscall failed with errno %d\n", -iter->end); } return false; } } iter->name = CURRENT_DIRENT(iter)->d_name; return true; #endif } /*************************************************************************** * THREAD TAKEOVER */ /* Record used to synchronize thread takeover. */ typedef struct _takeover_record_t { thread_id_t tid; event_t event; } takeover_record_t; /* When attempting thread takeover, we store an array of thread id and event * pairs here. Each thread we signal is supposed to enter DR control and signal * this event after it has added itself to all_threads. * * XXX: What we really want is to be able to use SYS_rt_tgsigqueueinfo (Linux >= * 2.6.31) to pass the event_t to each thread directly, rather than using this * side data structure. */ static takeover_record_t *thread_takeover_records; static uint num_thread_takeover_records; /* This is the dcontext of the thread that initiated the takeover. We read the * owning_thread and signal_field threads from it in the signaled threads to * set up siginfo sharing. */ static dcontext_t *takeover_dcontext; /* Lists active threads in the process. * XXX: The /proc man page says /proc/pid/task is only available if the main * thread is still alive, but experiments on 2.6.38 show otherwise. */ static thread_id_t * os_list_threads(dcontext_t *dcontext, uint *num_threads_out) { dir_iterator_t iter; file_t task_dir; uint tids_alloced = 10; uint num_threads = 0; thread_id_t *new_tids; thread_id_t *tids; ASSERT(num_threads_out != NULL); #ifdef MACOS /* XXX i#58: NYI. * We may want SYS_proc_info with PROC_INFO_PID_INFO and PROC_PIDLISTTHREADS, * or is that just BSD threads and instead we want process_set_tasks() * and task_info() as in 7.3.1.3 in Singh's OSX book? */ *num_threads_out = 0; return NULL; #endif tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED); task_dir = os_open_directory("/proc/self/task", OS_OPEN_READ); ASSERT(task_dir != INVALID_FILE); os_dir_iterator_start(&iter, task_dir); while (os_dir_iterator_next(&iter)) { thread_id_t tid; DEBUG_DECLARE(int r;) if (strcmp(iter.name, ".") == 0 || strcmp(iter.name, "..") == 0) continue; IF_DEBUG(r =) sscanf(iter.name, "%u", &tid); ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to parse /proc/pid/task entry", r == 1); if (tid <= 0) continue; if (num_threads == tids_alloced) { /* realloc, essentially. Less expensive than counting first. */ new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced * 2, ACCT_THREAD_MGT, PROTECTED); memcpy(new_tids, tids, sizeof(thread_id_t) * tids_alloced); HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED); tids = new_tids; tids_alloced *= 2; } tids[num_threads++] = tid; } ASSERT(iter.end == 0); /* No reading errors. */ os_close(task_dir); /* realloc back down to num_threads for caller simplicity. */ new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED); memcpy(new_tids, tids, sizeof(thread_id_t) * num_threads); HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED); tids = new_tids; *num_threads_out = num_threads; return tids; } /* List the /proc/self/task directory and add all unknown thread ids to the * all_threads hashtable in dynamo.c. Returns true if we found any unknown * threads and false otherwise. We assume that since we don't know about them * they are not under DR and have no dcontexts. */ bool os_take_over_all_unknown_threads(dcontext_t *dcontext) { uint i; uint num_threads; thread_id_t *tids; uint threads_to_signal = 0; mutex_lock(&thread_initexit_lock); CLIENT_ASSERT(thread_takeover_records == NULL, "Only one thread should attempt app take over!"); /* Find tids for which we have no thread record, meaning they are not under * our control. Shift them to the beginning of the tids array. */ tids = os_list_threads(dcontext, &num_threads); if (tids == NULL) { mutex_unlock(&thread_initexit_lock); return false; /* have to assume no unknown */ } for (i = 0; i < num_threads; i++) { thread_record_t *tr = thread_lookup(tids[i]); if (tr == NULL || /* Re-takeover known threads that are currently native as well. * XXX i#95: we need a synchall-style loop for known threads as * they can be in DR for syscall hook handling. * Update: we now remove the hook for start/stop: but native_exec * or other individual threads going native could still hit this. */ (is_thread_currently_native(tr) IF_CLIENT_INTERFACE(&& !IS_CLIENT_THREAD(tr->dcontext)))) tids[threads_to_signal++] = tids[i]; } LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: %d threads to take over\n", threads_to_signal); if (threads_to_signal > 0) { takeover_record_t *records; /* Assuming pthreads, prepare signal_field for sharing. */ handle_clone(dcontext, PTHREAD_CLONE_FLAGS); /* Create records with events for all the threads we want to signal. */ LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: publishing takeover records\n"); records = HEAP_ARRAY_ALLOC(dcontext, takeover_record_t, threads_to_signal, ACCT_THREAD_MGT, PROTECTED); for (i = 0; i < threads_to_signal; i++) { LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: will signal thread "TIDFMT"\n", tids[i]); records[i].tid = tids[i]; records[i].event = create_event(); } /* Publish the records and the initial take over dcontext. */ thread_takeover_records = records; num_thread_takeover_records = threads_to_signal; takeover_dcontext = dcontext; /* Signal the other threads. */ for (i = 0; i < threads_to_signal; i++) { thread_signal(get_process_id(), records[i].tid, SUSPEND_SIGNAL); } mutex_unlock(&thread_initexit_lock); /* Wait for all the threads we signaled. */ ASSERT_OWN_NO_LOCKS(); for (i = 0; i < threads_to_signal; i++) { wait_for_event(records[i].event); } /* Now that we've taken over the other threads, we can safely free the * records and reset the shared globals. */ mutex_lock(&thread_initexit_lock); LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: takeover complete, unpublishing records\n"); thread_takeover_records = NULL; num_thread_takeover_records = 0; takeover_dcontext = NULL; for (i = 0; i < threads_to_signal; i++) { destroy_event(records[i].event); } HEAP_ARRAY_FREE(dcontext, records, takeover_record_t, threads_to_signal, ACCT_THREAD_MGT, PROTECTED); } mutex_unlock(&thread_initexit_lock); HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED); return threads_to_signal > 0; } bool os_thread_re_take_over(void) { #ifdef X86 /* i#2089: is_thread_initialized() will fail for a currently-native app. * We bypass the magic field checks here of is_thread_tls_initialized(). * XXX: should this be inside is_thread_initialized()? But that may mislead * other callers: the caller has to restore the TLs. Some old code also * used get_thread_private_dcontext() being NULL to indicate an unknown thread: * that should also call here. */ if (!is_thread_initialized() && is_thread_tls_allocated()) { /* It's safe to call thread_lookup() for ourself. */ thread_record_t *tr = thread_lookup(get_sys_thread_id()); if (tr != NULL) { ASSERT(is_thread_currently_native(tr)); LOG(GLOBAL, LOG_THREADS, 1, "\tretakeover for cur-native thread "TIDFMT"\n", get_sys_thread_id()); LOG(tr->dcontext->logfile, LOG_THREADS, 1, "\nretakeover for cur-native thread "TIDFMT"\n", get_sys_thread_id()); os_swap_dr_tls(tr->dcontext, false/*to dr*/); ASSERT(is_thread_initialized()); return true; } } #endif return false; } /* Takes over the current thread from the signal handler. We notify the thread * that signaled us by signalling our event in thread_takeover_records. */ void os_thread_take_over(priv_mcontext_t *mc, kernel_sigset_t *sigset) { uint i; thread_id_t mytid; dcontext_t *dcontext; priv_mcontext_t *dc_mc; event_t event = NULL; LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: received signal in thread "TIDFMT"\n", get_sys_thread_id()); /* Do standard DR thread initialization. Mirrors code in * create_clone_record and new_thread_setup, except we're not putting a * clone record on the dstack. */ os_thread_re_take_over(); if (!is_thread_initialized()) { IF_DEBUG(int r =) dynamo_thread_init(NULL, mc _IF_CLIENT_INTERFACE(false)); ASSERT(r == SUCCESS); dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); share_siginfo_after_take_over(dcontext, takeover_dcontext); } else { /* Re-takeover a thread that we let go native */ dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); } signal_set_mask(dcontext, sigset); dynamo_thread_under_dynamo(dcontext); dc_mc = get_mcontext(dcontext); *dc_mc = *mc; dcontext->whereami = WHERE_APP; dcontext->next_tag = mc->pc; /* Wake up the thread that initiated the take over. */ mytid = get_thread_id(); ASSERT(thread_takeover_records != NULL); for (i = 0; i < num_thread_takeover_records; i++) { if (thread_takeover_records[i].tid == mytid) { event = thread_takeover_records[i].event; break; } } ASSERT_MESSAGE(CHKLVL_ASSERTS, "mytid not present in takeover records!", event != NULL); signal_event(event); DOLOG(2, LOG_TOP, { byte *cur_esp; GET_STACK_PTR(cur_esp); LOG(THREAD, LOG_TOP, 2, "%s: next_tag="PFX", cur xsp="PFX", mc->xsp="PFX"\n", __FUNCTION__, dcontext->next_tag, cur_esp, mc->xsp); }); /* Start interpreting from the signal context. */ call_switch_stack(dcontext, dcontext->dstack, (void(*)(void*))dispatch, NULL/*not on initstack*/, false/*shouldn't return*/); ASSERT_NOT_REACHED(); } bool os_thread_take_over_suspended_native(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; if (!is_thread_currently_native(dcontext->thread_record) || ksynch_get_value(&ostd->suspended) < 0) return false; /* Thread is sitting in suspend signal loop so we just set a flag * for when it resumes: */ /* XXX: there's no event for a client to trigger this on so not yet * tested. i#721 may help. */ ASSERT_NOT_TESTED(); ostd->retakeover = true; return true; } /* Called for os-specific takeover of a secondary thread from the one * that called dr_app_setup(). */ void os_thread_take_over_secondary(dcontext_t *dcontext) { thread_record_t **list; int num_threads; int i; /* We want to share with the thread that called dr_app_setup. */ mutex_lock(&thread_initexit_lock); get_list_of_threads(&list, &num_threads); ASSERT(num_threads >= 1); for (i = 0; i < num_threads; i++) { /* Find a thread that's already set up */ if (is_thread_signal_info_initialized(list[i]->dcontext)) break; } ASSERT(i < num_threads); ASSERT(list[i]->dcontext != dcontext); /* Assuming pthreads, prepare signal_field for sharing. */ handle_clone(list[i]->dcontext, PTHREAD_CLONE_FLAGS); share_siginfo_after_take_over(dcontext, list[i]->dcontext); mutex_unlock(&thread_initexit_lock); global_heap_free(list, num_threads*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); } /***************************************************************************/ uint os_random_seed(void) { uint seed; /* reading from /dev/urandom for a non-blocking random */ int urand = os_open("/dev/urandom", OS_OPEN_READ); DEBUG_DECLARE(int read = )os_read(urand, &seed, sizeof(seed)); ASSERT(read == sizeof(seed)); os_close(urand); return seed; } #ifdef RCT_IND_BRANCH /* Analyze a range in a possibly new module * return false if not a code section in a module * otherwise returns true and adds all valid targets for rct_ind_branch_check */ bool rct_analyze_module_at_violation(dcontext_t *dcontext, app_pc target_pc) { /* FIXME: note that this will NOT find the data section corresponding to the given PC * we don't yet have a corresponding get_allocation_size or an ELF header walk routine * on linux */ app_pc code_start; size_t code_size; uint prot; if (!get_memory_info(target_pc, &code_start, &code_size, &prot)) return false; /* TODO: in almost all cases expect the region at module_base+module_size to be * the corresponding data section. * Writable yet initialized data indeed needs to be processed. */ if (code_size > 0) { app_pc code_end = code_start + code_size; app_pc data_start; size_t data_size; ASSERT(TESTALL(MEMPROT_READ|MEMPROT_EXEC, prot)); /* code */ if (!get_memory_info(code_end, &data_start, &data_size, &prot)) return false; ASSERT(data_start == code_end); ASSERT(TESTALL(MEMPROT_READ|MEMPROT_WRITE, prot)); /* data */ app_pc text_start = code_start; app_pc text_end = data_start + data_size; /* TODO: performance: should do this only in case relocation info is not present */ DEBUG_DECLARE(uint found = ) find_address_references(dcontext, text_start, text_end, code_start, code_end); LOG(GLOBAL, LOG_RCT, 2, PFX"-"PFX" : %d ind targets of %d code size", text_start, text_end, found, code_size); return true; } return false; } #ifdef X64 bool rct_add_rip_rel_addr(dcontext_t *dcontext, app_pc tgt _IF_DEBUG(app_pc src)) { /* FIXME PR 276762: not implemented */ return false; } #endif #endif /* RCT_IND_BRANCH */ #ifdef HOT_PATCHING_INTERFACE void* get_drmarker_hotp_policy_status_table() { ASSERT_NOT_IMPLEMENTED(false); return NULL; } void set_drmarker_hotp_policy_status_table(void *new_table) { ASSERT_NOT_IMPLEMENTED(false); } byte * hook_text(byte *hook_code_buf, const app_pc image_addr, intercept_function_t hook_func, const void *callee_arg, const after_intercept_action_t action_after, const bool abort_if_hooked, const bool ignore_cti, byte **app_code_copy_p, byte **alt_exit_tgt_p) { ASSERT_NOT_IMPLEMENTED(false); return NULL; } void unhook_text(byte *hook_code_buf, app_pc image_addr) { ASSERT_NOT_IMPLEMENTED(false); } void insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target) { ASSERT_NOT_IMPLEMENTED(false); } #endif /* HOT_PATCHING_INTERFACE */ bool aslr_is_possible_attack(app_pc target) { /* FIXME: ASLR not implemented */ return false; } app_pc aslr_possible_preferred_address(app_pc target_addr) { /* FIXME: ASLR not implemented */ return NULL; } void take_over_primary_thread() { /* nothing to do here */ } bool os_current_user_directory(char *directory_prefix /* INOUT */, uint directory_len, bool create) { /* XXX: could share some of this code w/ corresponding windows routine */ uid_t uid = dynamorio_syscall(SYS_getuid, 0); char *directory = directory_prefix; char *dirend = directory_prefix + strlen(directory_prefix); snprintf(dirend, directory_len - (dirend - directory_prefix), "%cdpc-%d", DIRSEP, uid); directory_prefix[directory_len - 1] = '\0'; if (!os_file_exists(directory, true/*is dir*/) && create) { /* XXX: we should ensure we do not follow symlinks */ /* XXX: should add support for CREATE_DIR_FORCE_OWNER */ if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW)) { LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", directory); return false; } else { LOG(GLOBAL, LOG_CACHE, 2, "\tcreated per-user dir %s\n", directory); } } return true; } bool os_validate_user_owned(file_t file_or_directory_handle) { /* note on Linux this scheme should never be used */ ASSERT(false && "chown Alice evilfile"); return false; } bool os_check_option_compatibility(void) { /* no options are Linux OS version dependent */ return false; } #ifdef X86_32 /* Emulate uint64 modulo and division by uint32 on ia32. * XXX: Does *not* handle 64-bit divisors! */ static uint64 uint64_divmod(uint64 dividend, uint64 divisor64, uint32 *remainder) { /* Assumes little endian, which x86 is. */ union { uint64 v64; struct { uint32 lo; uint32 hi; }; } res; uint32 upper; uint32 divisor = (uint32) divisor64; /* Our uses don't use large divisors. */ ASSERT(divisor64 <= UINT_MAX && "divisor is larger than uint32 can hold"); /* Divide out the high bits first. */ res.v64 = dividend; upper = res.hi; res.hi = upper / divisor; upper %= divisor; /* Use the unsigned div instruction, which uses EDX:EAX to form a 64-bit * dividend. We only get a 32-bit quotient out, which is why we divide out * the high bits first. The quotient will fit in EAX. * * DIV r/m32 F7 /6 Unsigned divide EDX:EAX by r/m32, with result stored * in EAX <- Quotient, EDX <- Remainder. * inputs: * EAX = res.lo * EDX = upper * rm = divisor * outputs: * res.lo = EAX * *remainder = EDX * The outputs precede the inputs in gcc inline asm syntax, and so to put * inputs in EAX and EDX we use "0" and "1". */ asm ("divl %2" : "=a" (res.lo), "=d" (*remainder) : "rm" (divisor), "0" (res.lo), "1" (upper)); return res.v64; } /* Match libgcc's prototype. */ uint64 __udivdi3(uint64 dividend, uint64 divisor) { uint32 remainder; return uint64_divmod(dividend, divisor, &remainder); } /* Match libgcc's prototype. */ uint64 __umoddi3(uint64 dividend, uint64 divisor) { uint32 remainder; uint64_divmod(dividend, divisor, &remainder); return (uint64) remainder; } #elif defined (ARM) /* i#1566: for ARM, __aeabi versions are used instead of udivdi3 and umoddi3. * We link with __aeabi routines from libgcc via third_party/libgcc. */ #endif /* X86_32 */ #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ /**************************************************************************** * Page size discovery and query */ /* This variable is only used by os_set_page_size and os_page_size, but those * functions may be called before libdynamorio.so has been relocated. So check * the disassembly of those functions: there should be no relocations. */ static size_t page_size = 0; /* Return true if size is a multiple of the page size. * XXX: This function may be called when DynamoRIO is in a fragile state, or not * yet relocated, so keep this self-contained and do not use global variables or * logging. */ static bool os_try_page_size(size_t size) { byte *addr = mmap_syscall(NULL, size * 2, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if ((ptr_uint_t)addr >= (ptr_uint_t)-4096) /* mmap failed: should not happen */ return false; if (munmap_syscall(addr + size, size) == 0) { /* munmap of top half succeeded: munmap bottom half and return true */ munmap_syscall(addr, size); return true; } /* munmap of top half failed: munmap whole region and return false */ munmap_syscall(addr, size * 2); return false; } /* Directly determine the granularity of memory allocation using mmap and munmap. * This is used as a last resort if the page size is required before it has been * discovered in any other way, such as from AT_PAGESZ. * XXX: This function may be called when DynamoRIO is in a fragile state, or not * yet relocated, so keep this self-contained and do not use global variables or * logging. */ static size_t os_find_page_size(void) { size_t size = 4096; if (os_try_page_size(size)) { /* Try smaller sizes. */ for (size /= 2; size > 0; size /= 2) { if (!os_try_page_size(size)) return size * 2; } } else { /* Try larger sizes. */ for (size *= 2; size * 2 > 0; size *= 2) { if (os_try_page_size(size)) return size; } } /* Something went wrong... */ return 4096; } static void os_set_page_size(size_t size) { page_size = size; /* atomic write */ } size_t os_page_size(void) { size_t size = page_size; /* atomic read */ if (size == 0) { /* XXX: On Mac OSX we should use sysctl_query on hw.pagesize. */ size = os_find_page_size(); os_set_page_size(size); } return size; } void os_page_size_init(const char **env) { #if defined(LINUX) && !defined(STATIC_LIBRARY) /* On Linux we get the page size from the auxiliary vector, which is what * the C library typically does for implementing sysconf(_SC_PAGESIZE). * However, for STATIC_LIBRARY, our_environ is not guaranteed to point * at the stack as we're so late, so we do not try to read off the end of it * (i#2122). */ size_t size = page_size; /* atomic read */ if (size == 0) { ELF_AUXV_TYPE *auxv; /* Skip environment. */ while (*env != 0) ++env; /* Look for AT_PAGESZ in the auxiliary vector. */ for (auxv = (ELF_AUXV_TYPE *)(env + 1); auxv->a_type != AT_NULL; auxv++) { if (auxv->a_type == AT_PAGESZ) { os_set_page_size(auxv->a_un.a_val); break; } } } #endif /* LINUX */ } /**************************************************************************** * Tests */ #if defined(STANDALONE_UNIT_TEST) void test_uint64_divmod(void) { #ifdef X86_32 uint64 quotient; uint32 remainder; /* Simple division below 2^32. */ quotient = uint64_divmod(9, 3, &remainder); EXPECT(quotient == 3, true); EXPECT(remainder == 0, true); quotient = uint64_divmod(10, 3, &remainder); EXPECT(quotient == 3, true); EXPECT(remainder == 1, true); /* Division when upper bits are less than the divisor. */ quotient = uint64_divmod(45ULL << 31, 1U << 31, &remainder); EXPECT(quotient == 45, true); EXPECT(remainder == 0, true); /* Division when upper bits are greater than the divisor. */ quotient = uint64_divmod(45ULL << 32, 15, &remainder); EXPECT(quotient == 3ULL << 32, true); EXPECT(remainder == 0, true); quotient = uint64_divmod((45ULL << 32) + 13, 15, &remainder); EXPECT(quotient == 3ULL << 32, true); EXPECT(remainder == 13, true); /* Try calling the intrinsics. Don't divide by powers of two, gcc will * lower that to a shift. */ quotient = (45ULL << 32); quotient /= 15; EXPECT(quotient == (3ULL << 32), true); quotient = (45ULL << 32) + 13; remainder = quotient % 15; EXPECT(remainder == 13, true); #endif /* X86_32 */ } void unit_test_os(void) { test_uint64_divmod(); } #endif /* STANDALONE_UNIT_TEST */
1
11,373
No need to check dynamo_exited
DynamoRIO-dynamorio
c
@@ -47,8 +47,11 @@ func (c *Config) SetDevice(deviceName string) { c.setParam("dev", deviceName) } -func (c *Config) SetTlsCertificate(caFile, certFile, certKeyFile string) { +func (c *Config) SetTlsCACertificate(caFile string) { c.AddOptions(OptionFile("ca", caFile)) +} + +func (c *Config) SetTlsPrivatePubKeys(certFile string, certKeyFile string) { c.AddOptions(OptionFile("cert", certFile)) c.AddOptions(OptionFile("key", certKeyFile)) }
1
package openvpn import ( "strconv" ) func NewConfig() *Config { return &Config{ options: make([]configOption, 0), } } type Config struct { options []configOption } type configOption interface { getName() string } func (c *Config) AddOptions(options ...configOption) { c.options = append(c.options, options...) } func (c *Config) setParam(name, value string) { c.AddOptions( OptionParam(name, value), ) } func (c *Config) setFlag(name string) { c.AddOptions( OptionFlag(name), ) } func (c *Config) SetManagementSocket(socketAddress string) { c.setParam("management", socketAddress+" unix") c.setFlag("management-client") } func (c *Config) SetPort(port int) { c.setParam("port", strconv.Itoa(port)) } func (c *Config) SetDevice(deviceName string) { c.setParam("dev", deviceName) } func (c *Config) SetTlsCertificate(caFile, certFile, certKeyFile string) { c.AddOptions(OptionFile("ca", caFile)) c.AddOptions(OptionFile("cert", certFile)) c.AddOptions(OptionFile("key", certKeyFile)) } func (c *Config) SetTlsAuth(authFile string) { c.AddOptions(OptionFile("tls-auth", authFile)) } func (c *Config) SetKeepAlive(interval, timeout int) { c.setParam("keepalive", strconv.Itoa(interval)+" "+strconv.Itoa(timeout)) } func (c *Config) SetPingTimerRemote() { c.setFlag("ping-timer-rem") } func (c *Config) SetPersistTun() { c.setFlag("persist-tun") } func (c *Config) SetPersistKey() { c.setFlag("persist-key") }
1
10,227
from linter: >method SetTlsCACertificate should be SetTLSCACertificate
mysteriumnetwork-node
go
@@ -519,11 +519,11 @@ public abstract class SpanStoreTest { Span trace2 = Span.builder().traceId(2).name("get").id(2) .timestamp((today + 2) * 1000) - .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_SEND, APP_ENDPOINT)) - .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_RECV, WEB_ENDPOINT)) - .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_SEND, WEB_ENDPOINT)) - .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_RECV, APP_ENDPOINT)) - .addAnnotation(Annotation.create((today + 1) * 1000, "app", APP_ENDPOINT)) + .addAnnotation(Annotation.create((today + 2) * 1000, CLIENT_SEND, APP_ENDPOINT)) + .addAnnotation(Annotation.create((today + 2) * 1000, SERVER_RECV, WEB_ENDPOINT)) + .addAnnotation(Annotation.create((today + 2) * 1000, SERVER_SEND, WEB_ENDPOINT)) + .addAnnotation(Annotation.create((today + 2) * 1000, CLIENT_RECV, APP_ENDPOINT)) + .addAnnotation(Annotation.create((today + 2) * 1000, "app", APP_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("local", "app", APP_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("app-b", "app", APP_ENDPOINT)) .build();
1
/** * Copyright 2015-2017 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin.storage; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.assertj.core.groups.Tuple; import org.junit.Before; import org.junit.Test; import zipkin.Annotation; import zipkin.BinaryAnnotation; import zipkin.Endpoint; import zipkin.Span; import zipkin.TestObjects; import zipkin.internal.CallbackCaptor; import zipkin.internal.Util; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static zipkin.Constants.CLIENT_RECV; import static zipkin.Constants.CLIENT_SEND; import static zipkin.Constants.LOCAL_COMPONENT; import static zipkin.Constants.SERVER_RECV; import static zipkin.Constants.SERVER_SEND; import static zipkin.TestObjects.APP_ENDPOINT; import static zipkin.TestObjects.TODAY; import static zipkin.TestObjects.WEB_ENDPOINT; /** * Base test for {@link SpanStore} implementations. Subtypes should create a connection to a real * backend, even if that backend is in-process. * * <p>This is a replacement for {@code com.twitter.zipkin.storage.SpanStoreSpec}. */ public abstract class SpanStoreTest { /** Should maintain state between multiple calls within a test. */ protected abstract StorageComponent storage(); protected SpanStore store() { return storage().spanStore(); } /** Blocks until the callback completes to allow read-your-writes consistency during tests. */ protected void accept(Span... spans) { CallbackCaptor<Void> captor = new CallbackCaptor<>(); storage().asyncSpanConsumer().accept(asList(spans), captor); captor.get(); // block on result } /** Clears store between tests. */ @Before public abstract void clear() throws IOException; /** Notably, the cassandra implementation has day granularity */ // Use real time, as most span-stores have TTL logic which looks back several days. long today = Util.midnightUTC(System.currentTimeMillis()); Endpoint ep = Endpoint.create("service", 127 << 24 | 1); long spanId = 456; Annotation ann1 = Annotation.create((today + 1) * 1000, "cs", ep); Annotation ann2 = Annotation.create((today + 2) * 1000, "sr", ep); Annotation ann3 = Annotation.create((today + 10) * 1000, "custom", ep); Annotation ann4 = Annotation.create((today + 20) * 1000, "custom", ep); Annotation ann5 = Annotation.create((today + 5) * 1000, "custom", ep); Annotation ann6 = Annotation.create((today + 6) * 1000, "custom", ep); Annotation ann7 = Annotation.create((today + 7) * 1000, "custom", ep); Annotation ann8 = Annotation.create((today + 8) * 1000, "custom", ep); Span span1 = Span.builder() .traceId(123) .name("methodcall") .id(spanId) .timestamp(ann1.timestamp).duration(9000L) .annotations(asList(ann1, ann3)) .addBinaryAnnotation(BinaryAnnotation.create("BAH", "BEH", ep)).build(); Span span2 = Span.builder() .traceId(456) .name("methodcall") .id(spanId) .timestamp(ann2.timestamp) .addAnnotation(ann2) .addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build(); Span span3 = Span.builder() .traceId(789) .name("methodcall") .id(spanId) .timestamp(ann2.timestamp).duration(18000L) .annotations(asList(ann2, ann3, ann4)) .addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build(); Span span4 = Span.builder() .traceId(999) .name("methodcall") .id(spanId) .timestamp(ann6.timestamp).duration(1000L) .annotations(asList(ann6, ann7)).build(); Span span5 = Span.builder() .traceId(999) .name("methodcall") .id(spanId) .timestamp(ann5.timestamp).duration(3000L) .annotations(asList(ann5, ann8)) .addBinaryAnnotation(BinaryAnnotation.create("BAH2", "BEH2", ep)).build(); Span spanEmptySpanName = Span.builder() .traceId(123) .name("") .id(spanId) .parentId(1L) .timestamp(ann1.timestamp).duration(1000L) .annotations(asList(ann1, ann2)).build(); Span spanEmptyServiceName = Span.builder() .traceId(123) .name("spanname") .id(spanId).build(); @Test public void getTrace_noTraceIdHighDefaultsToZero() { span1 = TestObjects.TRACE.get(0).toBuilder().traceIdHigh(0L).build(); span2 = span1.toBuilder().traceId(1111L).build(); accept(span1, span2); assertThat(store().getTrace(span1.traceId)).isEqualTo(asList(span1)); assertThat(store().getTrace(0L, span1.traceId)).isEqualTo(asList(span1)); } @Test public void getTrace_128() { span1 = span1.toBuilder().traceIdHigh(1L).build(); span2 = span1.toBuilder().traceIdHigh(2L).build(); accept(span1, span2); assertThat(store().getTrace(span1.traceIdHigh, span1.traceId)) .isEqualTo(asList(span1)); assertThat(store().getTrace(span2.traceIdHigh, span2.traceId)) .isEqualTo(asList(span2)); } @Test public void getTraces_128() { Span span1 = TestObjects.TRACE.get(0).toBuilder().traceIdHigh(1L) .binaryAnnotations(asList(BinaryAnnotation.create("key", "value1", WEB_ENDPOINT))).build(); Span span2 = span1.toBuilder().traceIdHigh(2L) .binaryAnnotations(asList(BinaryAnnotation.create("key", "value2", WEB_ENDPOINT))).build(); accept(span1, span2); assertThat( store().getTraces(QueryRequest.builder().serviceName(WEB_ENDPOINT.serviceName) .addBinaryAnnotation("key", "value2") .build())) .containsExactly(asList(span2)); } @Test public void getTrace_nullWhenNotFound() { assertThat(store().getTrace(0L, 111111L)).isNull(); assertThat(store().getTrace(222222L, 111111L)).isNull(); assertThat(store().getRawTrace(0L, 111111L)).isNull(); assertThat(store().getRawTrace(222222L, 111111L)).isNull(); } /** * Filtered traces are returned in reverse insertion order. This is because the primary search * interface is a timeline view, looking back from an end timestamp. */ @Test public void tracesRetrieveInOrderDesc() { accept(span2, span1.toBuilder().annotations(asList(ann3, ann1)).build()); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())) .containsOnly(asList(span2), asList(span1)); } /** Legacy instrumentation will not set timestamp and duration explicitly */ @Test public void derivesTimestampAndDurationFromAnnotations() { accept(span1.toBuilder().timestamp(null).duration(null).build()); assertThat(store().getTrace(span1.traceIdHigh, span1.traceId)) .containsOnly(span1); } @Test public void getSpanNames() { accept(span1.toBuilder().name("yak").build(), span4); // should be in order assertThat(store().getSpanNames("service")).containsExactly("methodcall", "yak"); } @Test public void getSpanNames_allReturned() { // Assure a default spanstore limit isn't hit by assuming if 50 are returned, all are returned List<String> spanNames = new ArrayList<>(); for (int i = 0; i < 50; i++) { String suffix = i < 10 ? "0" + i : String.valueOf(i); accept(span1.toBuilder().id(i).name("yak" + suffix).build()); spanNames.add("yak" + suffix); } // should be in order assertThat(store().getSpanNames("service")).containsOnlyElementsOf(spanNames); } @Test public void getAllServiceNames_mergesAnnotation_andBinaryAnnotation() { // creates a span with mutual exclusive endpoints in binary annotations and annotations BinaryAnnotation yak = BinaryAnnotation.address("sa", Endpoint.create("yak", 127 << 24 | 1)); accept(span1.toBuilder().binaryAnnotations(asList(yak)).build()); // should be in order assertThat(store().getServiceNames()).containsExactly("service", "yak"); } @Test public void getAllServiceNames__allReturned() { // Assure a default spanstore limit isn't hit by assuming if 50 are returned, all are returned List<String> serviceNames = new ArrayList<>(); serviceNames.add("service"); for (int i = 0; i < 50; i++) { String suffix = i < 10 ? "0" + i : String.valueOf(i); BinaryAnnotation yak = BinaryAnnotation.address("sa", Endpoint.create("yak" + suffix, 127 << 24 | 1)); accept(span1.toBuilder().id(i).addBinaryAnnotation(yak).build()); serviceNames.add("yak" + suffix); } assertThat(store().getServiceNames()).containsOnlyElementsOf(serviceNames); } /** * This would only happen when the store layer is bootstrapping, or has been purged. */ @Test public void allShouldWorkWhenEmpty() { QueryRequest.Builder q = QueryRequest.builder().serviceName("service"); assertThat(store().getTraces(q.build())).isEmpty(); assertThat(store().getTraces(q.spanName("methodcall").build())).isEmpty(); assertThat(store().getTraces(q.addAnnotation("custom").build())).isEmpty(); assertThat(store().getTraces(q.addBinaryAnnotation("BAH", "BEH").build())).isEmpty(); } /** * This is unlikely and means instrumentation sends empty spans by mistake. */ @Test public void allShouldWorkWhenNoAnnotationsYet() { accept(spanEmptyServiceName); QueryRequest.Builder q = QueryRequest.builder().serviceName("service"); assertThat(store().getTraces(q.build())).isEmpty(); assertThat(store().getTraces(q.spanName("methodcall").build())).isEmpty(); assertThat(store().getTraces(q.addAnnotation("custom").build())).isEmpty(); assertThat(store().getTraces(q.addBinaryAnnotation("BAH", "BEH").build())).isEmpty(); } @Test public void getTraces_spanName() { accept(span1); QueryRequest.Builder q = QueryRequest.builder().serviceName("service"); assertThat(store().getTraces(q.build())) .containsExactly(asList(span1)); assertThat(store().getTraces(q.spanName("methodcall").build())) .containsExactly(asList(span1)); assertThat(store().getTraces(q.spanName("badmethod").build())).isEmpty(); assertThat(store().getTraces(q.serviceName("badservice").build())).isEmpty(); assertThat(store().getTraces(q.spanName(null).build())).isEmpty(); } @Test public void getTraces_spanName_128() { span1 = span1.toBuilder().traceIdHigh(1L).name("foo").build(); span2 = span1.toBuilder().traceIdHigh(2L).name("bar").build(); accept(span1, span2); QueryRequest.Builder q = QueryRequest.builder().serviceName("service"); assertThat(store().getTraces(q.spanName(span1.name).build())) .containsExactly(asList(span1)); } @Test public void getTraces_serviceNameInBinaryAnnotation() { Span localTrace = Span.builder().traceId(1L).name("targz").id(1L) .timestamp(today * 1000 + 100L).duration(200L) .addBinaryAnnotation(BinaryAnnotation.create(LOCAL_COMPONENT, "archiver", ep)).build(); accept(localTrace); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())) .containsExactly(asList(localTrace)); } /** * While large spans are discouraged, and maybe not indexed, we should be able to read them back. */ @Test public void readsBackLargeValues() { char[] kilobyteOfText = new char[1024]; Arrays.fill(kilobyteOfText, 'a'); // Make a span that's over 1KiB in size Span span = Span.builder().traceId(1L).name("big").id(1L) .timestamp(today * 1000 + 100L).duration(200L) .addBinaryAnnotation(BinaryAnnotation.create("a", new String(kilobyteOfText), ep)).build(); accept(span); // read back to ensure the data wasn't truncated assertThat(store().getTraces(QueryRequest.builder().build())) .containsExactly(asList(span)); assertThat(store().getTrace(span.traceIdHigh, span.traceId)) .isEqualTo(asList(span)); } /** * Formerly, a bug was present where cassandra didn't index more than bucket count traces per * millisecond. This stores a lot of spans to ensure indexes work under high-traffic scenarios. */ @Test public void getTraces_manyTraces() { int traceCount = 1000; Span span = TestObjects.LOTS_OF_SPANS[0]; BinaryAnnotation b = span.binaryAnnotations.get(0); accept(Arrays.copyOfRange(TestObjects.LOTS_OF_SPANS, 0, traceCount)); assertThat(store().getTraces(new QueryRequest.Builder().limit(traceCount).build())) .hasSize(traceCount); QueryRequest.Builder builder = QueryRequest.builder().limit(traceCount).serviceName(b.endpoint.serviceName); assertThat(store().getTraces(builder.build())) .hasSize(traceCount); assertThat(store().getTraces(builder.spanName(span.name).build())) .hasSize(traceCount); assertThat(store().getTraces(builder.addBinaryAnnotation(b.key, new String(b.value)).build())) .hasSize(traceCount); } /** Shows that duration queries go against the root span, not the child */ @Test public void getTraces_duration() { Endpoint service1 = Endpoint.create("service1", 127 << 24 | 1); Endpoint service2 = Endpoint.create("service2", 127 << 24 | 2); Endpoint service3 = Endpoint.create("service3", 127 << 24 | 3); BinaryAnnotation.Builder component = BinaryAnnotation.builder().key(LOCAL_COMPONENT).value("archiver"); BinaryAnnotation archiver1 = component.endpoint(service1).build(); BinaryAnnotation archiver2 = component.endpoint(service2).build(); BinaryAnnotation archiver3 = component.endpoint(service3).build(); Span targz = Span.builder().traceId(1L).id(1L) .name("targz").timestamp(today * 1000 + 100L).duration(200L).addBinaryAnnotation(archiver1).build(); Span tar = Span.builder().traceId(1L).id(2L).parentId(1L) .name("tar").timestamp(today * 1000 + 200L).duration(150L).addBinaryAnnotation(archiver2).build(); Span gz = Span.builder().traceId(1L).id(3L).parentId(1L) .name("gz").timestamp(today * 1000 + 250L).duration(50L).addBinaryAnnotation(archiver3).build(); Span zip = Span.builder().traceId(3L).id(3L) .name("zip").timestamp(today * 1000 + 130L).duration(50L).addBinaryAnnotation(archiver2).build(); List<Span> trace1 = asList(targz, tar, gz); List<Span> trace2 = asList( targz.toBuilder().traceId(2L).timestamp(today * 1000 + 110L).binaryAnnotations(asList(archiver3)).build(), tar.toBuilder().traceId(2L).timestamp(today * 1000 + 210L).binaryAnnotations(asList(archiver2)).build(), gz.toBuilder().traceId(2L).timestamp(today * 1000 + 260L).binaryAnnotations(asList(archiver1)).build()); List<Span> trace3 = asList(zip); accept(trace1.toArray(new Span[0])); accept(trace2.toArray(new Span[0])); accept(trace3.toArray(new Span[0])); long lookback = 12L * 60 * 60 * 1000; // 12hrs, instead of 7days long endTs = today + 1; // greater than all timestamps above QueryRequest.Builder q = QueryRequest.builder().serviceName("service1").lookback(lookback).endTs(endTs); // Min duration is inclusive and is applied by service. assertThat(store().getTraces(q.serviceName("service1").minDuration(targz.duration).build())) .containsExactly(trace1); assertThat(store().getTraces(q.serviceName("service3").minDuration(targz.duration).build())) .containsExactly(trace2); // Duration bounds aren't limited to root spans: they apply to all spans by service in a trace assertThat(store().getTraces(q.serviceName("service2").minDuration(zip.duration).maxDuration(tar.duration).build())) .containsExactly(trace3, trace2, trace1); // service2 is in the middle of trace1 and 2, but root of trace3 // Span name should apply to the duration filter assertThat( store().getTraces(q.serviceName("service2").spanName("zip").maxDuration(zip.duration).build())) .containsExactly(trace3); // Max duration should filter our longer spans from the same service assertThat(store().getTraces(q.serviceName("service2").minDuration(gz.duration).maxDuration(zip.duration).build())) .containsExactly(trace3); } /** * Spans and traces are meaningless unless they have a timestamp. While unlikley, this could * happen if a binary annotation is logged before a timestamped one is. */ @Test public void getTraces_absentWhenNoTimestamp() { // store the binary annotations accept(span1.toBuilder().timestamp(null).duration(null).annotations(emptyList()).build()); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())).isEmpty(); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").serviceName("methodcall").build())).isEmpty(); // now store the timestamped annotations accept(span1.toBuilder().binaryAnnotations(emptyList()).build()); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())) .containsExactly(asList(span1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").spanName("methodcall").build())) .containsExactly(asList(span1)); } @Test public void getTraces_annotation() { accept(span1); // fetch by time based annotation, find trace assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("custom").build())) .containsExactly(asList(span1)); // should find traces by the key and value annotation assertThat( store().getTraces(QueryRequest.builder().serviceName("service").addBinaryAnnotation("BAH", "BEH").build())) .containsExactly(asList(span1)); } @Test public void getTraces_multipleAnnotationsBecomeAndFilter() { Span foo = Span.builder().traceId(1).name("call1").id(1) .timestamp((today + 1) * 1000) .addAnnotation(Annotation.create((today + 1) * 1000, "foo", ep)).build(); // would be foo bar, except lexicographically bar precedes foo Span barAndFoo = Span.builder().traceId(2).name("call2").id(2) .timestamp((today + 2) * 1000) .addAnnotation(Annotation.create((today + 2) * 1000, "bar", ep)) .addAnnotation(Annotation.create((today + 2) * 1000, "foo", ep)).build(); Span fooAndBazAndQux = Span.builder().traceId(3).name("call3").id(3) .timestamp((today + 3) * 1000) .addAnnotation(Annotation.create((today + 3) * 1000, "foo", ep)) .addBinaryAnnotation(BinaryAnnotation.create("baz", "qux", ep)) .build(); Span barAndFooAndBazAndQux = Span.builder().traceId(4).name("call4").id(4) .timestamp((today + 4) * 1000) .addAnnotation(Annotation.create((today + 4) * 1000, "bar", ep)) .addAnnotation(Annotation.create((today + 4) * 1000, "foo", ep)) .addBinaryAnnotation(BinaryAnnotation.create("baz", "qux", ep)) .build(); accept(foo, barAndFoo, fooAndBazAndQux, barAndFooAndBazAndQux); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").build())) .containsExactly(asList(barAndFooAndBazAndQux), asList(fooAndBazAndQux), asList(barAndFoo), asList(foo)); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").addAnnotation("bar").build())) .containsExactly(asList(barAndFooAndBazAndQux), asList(barAndFoo)); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("foo").addAnnotation("bar").addBinaryAnnotation("baz", "qux").build())) .containsExactly(asList(barAndFooAndBazAndQux)); // ensure we can search only by tag/binaryAnnotation key assertThat(store().getTraces(QueryRequest.builder().serviceName("service").addAnnotation("baz").build())) .containsExactly(asList(barAndFooAndBazAndQux), asList(fooAndBazAndQux)); } /** * This test makes sure that annotation queries pay attention to which host logged an annotation. */ @Test public void getTraces_differentiateOnServiceName() { Span trace1 = Span.builder().traceId(1).name("get").id(1) .timestamp((today + 1) * 1000) .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_SEND, WEB_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_RECV, APP_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_SEND, APP_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_RECV, WEB_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, "web", WEB_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("local", "web", WEB_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("web-b", "web", WEB_ENDPOINT)) .build(); Span trace2 = Span.builder().traceId(2).name("get").id(2) .timestamp((today + 2) * 1000) .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_SEND, APP_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_RECV, WEB_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, SERVER_SEND, WEB_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, CLIENT_RECV, APP_ENDPOINT)) .addAnnotation(Annotation.create((today + 1) * 1000, "app", APP_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("local", "app", APP_ENDPOINT)) .addBinaryAnnotation(BinaryAnnotation.create("app-b", "app", APP_ENDPOINT)) .build(); accept(trace1, trace2); assertThat(store().getTraces(QueryRequest.builder().build())) .containsExactly(asList(trace2), asList(trace1)); // We only return traces where the service specified caused the annotation queried. assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("web").build())) .containsExactly(asList(trace1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("web").build())) .isEmpty(); assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("app").build())) .containsExactly(asList(trace2)); assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("app").build())) .isEmpty(); // Binary annotations are returned for annotation queries assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("web-b").build())) .containsExactly(asList(trace1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("web-b").build())) .isEmpty(); assertThat(store().getTraces(QueryRequest.builder().serviceName("app").addAnnotation("app-b").build())) .containsExactly(asList(trace2)); assertThat(store().getTraces(QueryRequest.builder().serviceName("web").addAnnotation("app-b").build())) .isEmpty(); // We only return traces where the service specified caused the binary value queried. assertThat(store().getTraces(QueryRequest.builder().serviceName("web") .addBinaryAnnotation("local", "web").build())) .containsExactly(asList(trace1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("app") .addBinaryAnnotation("local", "web").build())) .isEmpty(); assertThat(store().getTraces(QueryRequest.builder().serviceName("app") .addBinaryAnnotation("local", "app").build())) .containsExactly(asList(trace2)); assertThat(store().getTraces(QueryRequest.builder().serviceName("web") .addBinaryAnnotation("local", "app").build())) .isEmpty(); } /** Make sure empty binary annotation values don't crash */ @Test public void getTraces_binaryAnnotationWithEmptyValue() { Span span = Span.builder() .traceId(1) .name("call1") .id(1) .timestamp((today + 1) * 1000) .addBinaryAnnotation(BinaryAnnotation.create("empty", "", ep)).build(); accept(span); assertThat(store().getTraces((QueryRequest.builder().serviceName("service").build()))) .containsExactly(asList(span)); assertThat(store().getTrace(span.traceIdHigh, span.traceId)) .containsExactly(span); } /** This tests that the 128bit trace id is read back from storage. */ @Test public void getTraces_128BitTraceId() { Span span = span1.toBuilder().traceIdHigh(1).build(); accept(span); assertThat(store().getTraces(QueryRequest.builder().build())) .containsExactly(asList(span)); } /** * It is expected that [[com.twitter.zipkin.storage.SpanStore.apply]] will receive the same span * id multiple times with different annotations. At query time, these must be merged. */ @Test public void getTraces_mergesSpans() { accept(span1, span4, span5); // span4, span5 have the same span id SortedSet<Annotation> mergedAnnotations = new TreeSet<>(span4.annotations); mergedAnnotations.addAll(span5.annotations); Span merged = span4.toBuilder() .timestamp(mergedAnnotations.first().timestamp) .duration(mergedAnnotations.last().timestamp - mergedAnnotations.first().timestamp) .annotations(mergedAnnotations) .binaryAnnotations(span5.binaryAnnotations).build(); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").build())) .containsExactly(asList(merged), asList(span1)); } /** limit should apply to traces closest to endTs */ @Test public void getTraces_limit() { accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000 assertThat(store().getTraces(QueryRequest.builder().serviceName("service").limit(1).build())) .containsExactly(asList(span3)); } /** Traces whose root span has timestamps before or at endTs are returned */ @Test public void getTraces_endTsAndLookback() { accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000 assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 1L).build())) .containsExactly(asList(span1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 2L).build())) .containsExactly(asList(span3), asList(span1)); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).build())) .containsExactly(asList(span3), asList(span1)); } /** Traces whose root span has timestamps between (endTs - lookback) and endTs are returned */ @Test public void getTraces_lookback() { accept(span1, span3); // span1's timestamp is 1000, span3's timestamp is 2000 assertThat( store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 1L).lookback(1L).build())) .containsExactly(asList(span1)); assertThat( store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 2L).lookback(1L).build())) .containsExactly(asList(span3), asList(span1)); assertThat( store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).lookback(1L).build())) .containsExactly(asList(span3)); assertThat( store().getTraces(QueryRequest.builder().serviceName("service").endTs(today + 3L).lookback(2L).build())) .containsExactly(asList(span3), asList(span1)); } @Test public void getAllServiceNames_emptyServiceName() { accept(spanEmptyServiceName); assertThat(store().getServiceNames()).isEmpty(); } @Test public void getSpanNames_emptySpanName() { accept(spanEmptySpanName); assertThat(store().getSpanNames(spanEmptySpanName.name)).isEmpty(); } @Test public void spanNamesGoLowercase() { accept(span1); assertThat(store().getTraces(QueryRequest.builder().serviceName("service").spanName("MeThOdCaLl").build())) .containsOnly(asList(span1)); } @Test public void serviceNamesGoLowercase() { accept(span1); assertThat(store().getSpanNames("SeRvIcE")).containsExactly("methodcall"); assertThat(store().getTraces(QueryRequest.builder().serviceName("SeRvIcE").build())) .containsOnly(asList(span1)); } /** * Basic clock skew correction is something span stores should support, until the UI supports * happens-before without using timestamps. The easiest clock skew to correct is where a child * appears to happen before the parent. * * <p>It doesn't matter if clock-skew correction happens at store or query time, as long as it * occurs by the time results are returned. * * <p>Span stores who don't support this can override and disable this test, noting in the README * the limitation. */ @Test public void correctsClockSkew() { Endpoint client = Endpoint.create("client", 192 << 24 | 168 << 16 | 1); Endpoint frontend = Endpoint.create("frontend", 192 << 24 | 168 << 16 | 2); Endpoint backend = Endpoint.create("backend", 192 << 24 | 168 << 16 | 3); /** Intentionally not setting span.timestamp, duration */ Span parent = Span.builder() .traceId(1) .name("method1") .id(666) .addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, client)) .addAnnotation(Annotation.create((today + 95) * 1000, SERVER_RECV, frontend)) // before client sends .addAnnotation(Annotation.create((today + 120) * 1000, SERVER_SEND, frontend)) // before client receives .addAnnotation(Annotation.create((today + 135) * 1000, CLIENT_RECV, client)).build(); /** Intentionally not setting span.timestamp, duration */ Span remoteChild = Span.builder() .traceId(1) .name("method2") .id(777) .parentId(666L) .addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, frontend)) .addAnnotation(Annotation.create((today + 115) * 1000, SERVER_RECV, backend)) .addAnnotation(Annotation.create((today + 120) * 1000, SERVER_SEND, backend)) .addAnnotation(Annotation.create((today + 115) * 1000, CLIENT_RECV, frontend)) // before server sent .build(); /** Local spans must explicitly set timestamp */ Span localChild = Span.builder() .traceId(1) .name("local") .id(778) .parentId(666L) .timestamp((today + 101) * 1000).duration(50L) .addBinaryAnnotation(BinaryAnnotation.create(LOCAL_COMPONENT, "framey", frontend)).build(); List<Span> skewed = asList(parent, remoteChild, localChild); // There's clock skew when the child doesn't happen after the parent assertThat(skewed.get(0).annotations.get(0).timestamp) .isLessThanOrEqualTo(skewed.get(1).annotations.get(0).timestamp) .isLessThanOrEqualTo(skewed.get(2).timestamp); // local span // Regardless of when clock skew is corrected, it should be corrected before traces return accept(parent, remoteChild, localChild); List<Span> adjusted = store().getTrace(localChild.traceIdHigh, localChild.traceId); // After correction, the child happens after the parent assertThat(adjusted.get(0).timestamp) .isLessThanOrEqualTo(adjusted.get(0).timestamp); // After correction, children happen after their parent assertThat(adjusted.get(0).timestamp) .isLessThanOrEqualTo(adjusted.get(1).timestamp) .isLessThanOrEqualTo(adjusted.get(2).timestamp); // And we do not change the parent (client) duration, due to skew in the child (server) assertThat(adjusted.get(0).duration).isEqualTo(clientDuration(skewed.get(0))); assertThat(adjusted.get(1).duration).isEqualTo(clientDuration(skewed.get(1))); assertThat(adjusted.get(2).duration).isEqualTo(skewed.get(2).duration); } /** * This test shows that regardless of whether span.timestamp and duration are set directly or * derived from annotations, the client wins vs the server. This is important because the client * holds the critical path of a shared span. */ @Test public void clientTimestampAndDurationWinInSharedSpan() { Endpoint client = Endpoint.create("client", 192 << 24 | 168 << 16 | 1); Endpoint server = Endpoint.create("server", 192 << 24 | 168 << 16 | 2); long clientTimestamp = (today + 100) * 1000; long clientDuration = 35 * 1000; // both client and server set span.timestamp, duration Span clientView = Span.builder().traceId(1).name("direct").id(666) .timestamp(clientTimestamp).duration(clientDuration) .addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, client)) .addAnnotation(Annotation.create((today + 135) * 1000, CLIENT_RECV, client)) .build(); Span serverView = Span.builder().traceId(1).name("direct").id(666) .timestamp((today + 105) * 1000).duration(25 * 1000L) .addAnnotation(Annotation.create((today + 105) * 1000, SERVER_RECV, server)) .addAnnotation(Annotation.create((today + 130) * 1000, SERVER_SEND, server)) .build(); // neither client, nor server set span.timestamp, duration Span clientViewDerived = Span.builder().traceId(1).name("derived").id(666) .addAnnotation(Annotation.create(clientTimestamp, CLIENT_SEND, client)) .addAnnotation(Annotation.create(clientTimestamp + clientDuration, CLIENT_RECV, client)) .build(); Span serverViewDerived = Span.builder().traceId(1).name("derived").id(666) .addAnnotation(Annotation.create((today + 105) * 1000, SERVER_RECV, server)) .addAnnotation(Annotation.create((today + 130) * 1000, SERVER_SEND, server)) .build(); accept(serverView, serverViewDerived); // server span hits the collection tier first accept(clientView, clientViewDerived); // intentionally different collection event for (Span span : store().getTrace(clientView.traceIdHigh, clientView.traceId)) { assertThat(span.timestamp).isEqualTo(clientTimestamp); assertThat(span.duration).isEqualTo(clientDuration); } } // Bugs have happened in the past where trace limit was mistaken for span count. @Test public void traceWithManySpans() { Span[] trace = new Span[101]; trace[0] = TestObjects.TRACE.get(0); IntStream.range(0, 100).forEach(i -> { Span s = TestObjects.TRACE.get(1); trace[i + 1] = s.toBuilder() .id(s.id + i) .timestamp(s.timestamp + i) .annotations(s.annotations.stream() .map(a -> Annotation.create(a.timestamp + i, a.value, a.endpoint)) .collect(toList())) .build(); }); accept(trace); String serviceName = trace[1].annotations.get(0).endpoint.serviceName; assertThat(store().getTraces(QueryRequest.builder().serviceName(serviceName).build())) .containsExactly(asList(trace)); assertThat(store().getTrace(trace[0].traceIdHigh, trace[0].traceId)) .containsExactly(trace); assertThat(store().getRawTrace(trace[0].traceIdHigh, trace[0].traceId)) .containsAll(asList(trace)); // order isn't guaranteed in raw trace } /** * Spans report depth-first. Make sure the client timestamp is preferred when instrumentation * don't add a timestamp. */ @Test public void whenSpanTimestampIsMissingClientSendIsPreferred() { Endpoint frontend = Endpoint.create("frontend", 192 << 24 | 168 << 16 | 2); Annotation cs = Annotation.create((today + 50) * 1000, CLIENT_SEND, frontend); Annotation cr = Annotation.create((today + 150) * 1000, CLIENT_RECV, frontend); Endpoint backend = Endpoint.create("backend", 192 << 24 | 168 << 16 | 2); Annotation sr = Annotation.create((today + 95) * 1000, SERVER_RECV, backend); Annotation ss = Annotation.create((today + 100) * 1000, SERVER_SEND, backend); Span span = Span.builder().traceId(1).name("method1").id(666).build(); // Simulate the server-side of a shared span arriving first accept(span.toBuilder().addAnnotation(sr).addAnnotation(ss).build()); accept(span.toBuilder().addAnnotation(cs).addAnnotation(cr).build()); // Make sure that the client's timestamp won assertThat(store().getTrace(span1.traceIdHigh, span.traceId)) .containsExactly(span.toBuilder() .timestamp(cs.timestamp) .duration(cr.timestamp - cs.timestamp) .annotations(asList(cs, sr, ss, cr)).build()); } // This supports the "raw trace" feature, which skips application-level data cleaning @Test public void rawTrace_doesntPerformQueryTimeAdjustment() { Endpoint producer = Endpoint.create("producer", 192 << 24 | 168 << 16 | 1); Annotation ms = Annotation.create((today + 95) * 1000, "ms", producer); Endpoint consumer = Endpoint.create("consumer", 192 << 24 | 168 << 16 | 2); Annotation mr = Annotation.create((today + 100) * 1000, "mr", consumer); Span span = Span.builder().traceId(1).name("message").id(666).build(); // Simulate instrumentation that sends annotations one at-a-time. // This should prevent the collection tier from being able to calculate duration. accept(span.toBuilder().addAnnotation(ms).build()); accept(span.toBuilder().addAnnotation(mr).build()); // Normally, span store implementations will merge spans by id and add duration by query time assertThat(store().getTrace(span1.traceIdHigh, span.traceId)) .containsExactly(span.toBuilder() .timestamp(ms.timestamp) .duration(mr.timestamp - ms.timestamp) .annotations(asList(ms, mr)).build()); // Since a collector never saw both sides of the span, we'd not see duration in the raw trace. for (Span raw : store().getRawTrace(span1.traceIdHigh, span.traceId)) { assertThat(raw.timestamp).isNull(); assertThat(raw.duration).isNull(); } } @Test public void getTraces_acrossServices() { List<BinaryAnnotation> annotations = IntStream.rangeClosed(1, 10).mapToObj(i -> BinaryAnnotation.create(LOCAL_COMPONENT, "serviceAnnotation", Endpoint.create("service" + i, 127 << 24 | i))) .collect(Collectors.toList()); long gapBetweenSpans = 100; List<Span> earlySpans = IntStream.rangeClosed(1, 10).mapToObj(i -> Span.builder().name("early") .traceId(i).id(i).timestamp((today - i) * 1000).duration(1L) .addBinaryAnnotation(annotations.get(i - 1)).build()).collect(toList()); List<Span> lateSpans = IntStream.rangeClosed(1, 10).mapToObj(i -> Span.builder().name("late") .traceId(i + 10).id(i + 10).timestamp((today + gapBetweenSpans - i) * 1000).duration(1L) .addBinaryAnnotation(annotations.get(i - 1)).build()).collect(toList()); accept(earlySpans.toArray(new Span[10])); accept(lateSpans.toArray(new Span[10])); List<Span>[] earlyTraces = earlySpans.stream().map(Collections::singletonList).toArray(List[]::new); List<Span>[] lateTraces = lateSpans.stream().map(Collections::singletonList).toArray(List[]::new); //sanity checks assertThat(store().getTraces(QueryRequest.builder().serviceName("service1").build())) .containsExactly(lateTraces[0], earlyTraces[0]); assertThat(store().getTraces(QueryRequest.builder().limit(20).build())) .hasSize(20); assertThat(store().getTraces(QueryRequest.builder().limit(10).build())) .containsExactly(lateTraces); assertThat(store().getTraces(QueryRequest.builder().limit(20) .endTs(today + gapBetweenSpans).lookback(gapBetweenSpans).build())) .containsExactly(lateTraces); assertThat(store().getTraces(QueryRequest.builder().limit(20) .endTs(today).build())) .containsExactly(earlyTraces); } /** * Shared server-spans are not supposed to report timestamp, as that interferes with the * authoritative timestamp of the caller. This makes sure that server spans can still be looked up * when they didn't start a span. */ @Test public void traceIsSearchableBySRServiceName() throws Exception { Span clientSpan = Span.builder().traceId(20L).id(22L).name("").parentId(21L) .addAnnotation(Annotation.create((TODAY - 4) * 1000L, CLIENT_SEND, WEB_ENDPOINT)) .build(); Span serverSpan = Span.builder().traceId(20L).id(22L).name("get").parentId(21L) .addAnnotation(Annotation.create(TODAY * 1000L, SERVER_RECV, APP_ENDPOINT)) .build(); accept(serverSpan, clientSpan); List<List<Span>> traces = storage().spanStore().getTraces( QueryRequest.builder().serviceName(APP_ENDPOINT.serviceName).build() ); assertThat(traces) .hasSize(1) // we can lookup by the server's name .flatExtracting(l -> l) .extracting(s -> s.timestamp, s -> s.duration) .contains(Tuple.tuple((TODAY - 4) * 1000L, 4000L)); // but the client's timestamp wins } /** Not a good span name, but better to test it than break mysteriously */ @Test public void spanNameIsJson() { String json = "{\"foo\":\"bar\"}"; Span withJsonSpanName = span1.toBuilder().name(json).build(); accept(withJsonSpanName); assertThat(store().getTraces(QueryRequest.builder().spanName(json).build())) .flatExtracting(t -> t) .contains(withJsonSpanName); } static long clientDuration(Span span) { long[] timestamps = span.annotations.stream() .filter(a -> a.value.startsWith("c")) .mapToLong(a -> a.timestamp) .sorted().toArray(); return timestamps[1] - timestamps[0]; } }
1
12,605
this data had incorrectly aligned timestamps (which wasn't the point of the test)
openzipkin-zipkin
java
@@ -280,6 +280,16 @@ func (w *Workflow) populateStep(ctx context.Context, s *Step) error { return step.populate(ctx, s) } +func getUser() string { + if cu, err := user.Current(); err == nil { + return cu.Username + } + if hn, err := os.Hostname(); err == nil { + return hn + } + return "unknown" +} + func (w *Workflow) populate(ctx context.Context) error { var err error if w.ComputeClient == nil {
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package workflow describes a daisy workflow. package daisy import ( "bufio" "bytes" "context" "encoding/json" "fmt" "io" "io/ioutil" "log" "os" "os/user" "path" "path/filepath" "reflect" "strconv" "strings" "sync" "time" "cloud.google.com/go/storage" "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "google.golang.org/api/iterator" "google.golang.org/api/option" ) const defaultTimeout = "10m" type gcsLogger struct { client *storage.Client bucket, object string buf *bytes.Buffer ctx context.Context } func (l *gcsLogger) Write(b []byte) (int, error) { if l.buf == nil { l.buf = new(bytes.Buffer) } l.buf.Write(b) wc := l.client.Bucket(l.bucket).Object(l.object).NewWriter(l.ctx) wc.ContentType = "text/plain" n, err := wc.Write(l.buf.Bytes()) if err != nil { return 0, err } if err := wc.Close(); err != nil { return 0, err } return n, err } type syncedWriter struct { buf *bufio.Writer mx sync.Mutex } func (l *syncedWriter) Write(b []byte) (int, error) { l.mx.Lock() defer l.mx.Unlock() return l.buf.Write(b) } func (l *syncedWriter) Flush() error { l.mx.Lock() defer l.mx.Unlock() return l.buf.Flush() } func daisyBkt(ctx context.Context, client *storage.Client, project string) (string, error) { dBkt := project + "-daisy-bkt" it := client.Buckets(ctx, project) for bucketAttrs, err := it.Next(); err != iterator.Done; bucketAttrs, err = it.Next() { if err != nil { return "", err } if bucketAttrs.Name == dBkt { return dBkt, nil } } if err := client.Bucket(dBkt).Create(ctx, project, nil); err != nil { return "", err } return dBkt, nil } type vars struct { Value string Required bool Description string } func (v *vars) UnmarshalJSON(b []byte) error { var sv string if err := json.Unmarshal(b, &sv); err == nil { v.Value = sv return nil } // We can't unmarshal into vars directly as it would create an infinite loop. type aVars vars return json.Unmarshal(b, &struct{ *aVars }{aVars: (*aVars)(v)}) } // Workflow is a single Daisy workflow workflow. type Workflow struct { // Populated on New() construction. Cancel chan struct{} `json:"-"` // Workflow template fields. // Workflow name. Name string // Project to run in. Project string // Zone to run in. Zone string // GCS Path to use for scratch data and write logs/results to. GCSPath string // Path to OAuth credentials file. OAuthPath string `json:",omitempty"` // Sources used by this workflow, map of destination to source. Sources map[string]string `json:",omitempty"` // Vars defines workflow variables, substitution is done at Workflow run time. Vars map[string]vars `json:",omitempty"` Steps map[string]*Step // Map of steps to their dependencies. Dependencies map[string][]string // Working fields. autovars map[string]string workflowDir string parent *Workflow bucket string scratchPath string sourcesPath string logsPath string outsPath string username string gcsLogging bool gcsLogWriter *syncedWriter ComputeClient compute.Client `json:"-"` StorageClient *storage.Client `json:"-"` id string logger *log.Logger cleanupHooks []func() error cleanupHooksMx sync.Mutex } func (w *Workflow) AddVar(k, v string) { if w.Vars == nil { w.Vars = map[string]vars{} } w.Vars[k] = vars{Value: v} } func (w *Workflow) addCleanupHook(hook func() error) { w.cleanupHooksMx.Lock() w.cleanupHooks = append(w.cleanupHooks, hook) w.cleanupHooksMx.Unlock() } // Validate runs validation on the workflow. func (w *Workflow) Validate(ctx context.Context) error { if err := w.validateRequiredFields(); err != nil { close(w.Cancel) return fmt.Errorf("error validating workflow: %v", err) } if err := w.populate(ctx); err != nil { close(w.Cancel) return fmt.Errorf("error populating workflow: %v", err) } w.logger.Print("Validating workflow") if err := w.validate(ctx); err != nil { w.logger.Printf("Error validating workflow: %v", err) close(w.Cancel) return err } w.logger.Print("Validation Complete") return nil } // Run runs a workflow. func (w *Workflow) Run(ctx context.Context) error { w.gcsLogging = true if err := w.Validate(ctx); err != nil { return err } defer w.cleanup() w.logger.Println("Using the GCS path", "gs://"+path.Join(w.bucket, w.scratchPath)) w.logger.Print("Uploading sources") if err := w.uploadSources(ctx); err != nil { w.logger.Printf("Error uploading sources: %v", err) close(w.Cancel) return err } w.logger.Print("Running workflow") if err := w.run(ctx); err != nil { w.logger.Printf("Error running workflow: %v", err) select { case <-w.Cancel: default: close(w.Cancel) } return err } return nil } func (w *Workflow) String() string { f := "{Name:%q Project:%q Zone:%q Bucket:%q OAuthPath:%q Sources:%s Vars:%s Steps:%s Dependencies:%s id:%q}" return fmt.Sprintf(f, w.Name, w.Project, w.Zone, w.bucket, w.OAuthPath, w.Sources, w.Vars, w.Steps, w.Dependencies, w.id) } func (w *Workflow) cleanup() { w.logger.Printf("Workflow %q cleaning up (this may take up to 2 minutes.", w.Name) for _, hook := range w.cleanupHooks { if err := hook(); err != nil { w.logger.Printf("Error returned from cleanup hook: %s", err) } } if w.gcsLogWriter != nil { w.gcsLogWriter.Flush() } } func (w *Workflow) genName(n string) string { name := w.Name for parent := w.parent; parent != nil; parent = parent.parent { name = parent.Name + "-" + name } prefix := fmt.Sprintf("%s-%s", n, name) if len(prefix) > 57 { prefix = prefix[0:56] } result := fmt.Sprintf("%s-%s", prefix, w.id) if len(result) > 64 { result = result[0:63] } return strings.ToLower(result) } func (w *Workflow) getSourceGCSAPIPath(s string) string { return fmt.Sprintf("%s/%s", gcsAPIBase, path.Join(w.bucket, w.sourcesPath, s)) } func (w *Workflow) populateStep(ctx context.Context, s *Step) error { if s.Timeout == "" { s.Timeout = defaultTimeout } timeout, err := time.ParseDuration(s.Timeout) if err != nil { return err } s.timeout = timeout var step stepImpl if step, err = s.stepImpl(); err != nil { return err } return step.populate(ctx, s) } func (w *Workflow) populate(ctx context.Context) error { var err error if w.ComputeClient == nil { w.ComputeClient, err = compute.NewClient(ctx, option.WithCredentialsFile(w.OAuthPath)) if err != nil { return err } } if w.StorageClient == nil { w.StorageClient, err = storage.NewClient(ctx, option.WithCredentialsFile(w.OAuthPath)) if err != nil { return err } } if w.GCSPath == "" { dBkt, err := daisyBkt(ctx, w.StorageClient, w.Project) if err != nil { return err } w.GCSPath = "gs://" + dBkt } w.id = randString(5) now := time.Now().UTC() cu, err := user.Current() if err != nil { w.username = "unknown" } else { w.username = cu.Username } cwd, _ := os.Getwd() w.autovars = map[string]string{ "ID": w.id, "DATE": now.Format("20060102"), "DATETIME": now.Format("20060102150405"), "TIMESTAMP": strconv.FormatInt(now.Unix(), 10), "USERNAME": w.username, "WFDIR": w.workflowDir, "CWD": cwd, } var replacements []string for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } for k, v := range w.Vars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) // Set up GCS paths. bkt, p, err := splitGCSPath(w.GCSPath) if err != nil { return err } w.bucket = bkt w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id)) w.sourcesPath = path.Join(w.scratchPath, "sources") w.logsPath = path.Join(w.scratchPath, "logs") w.outsPath = path.Join(w.scratchPath, "outs") // Do replacement for autovars. Autovars pull from workflow fields, // so Vars replacement must run before this to resolve the final // value for those fields. w.autovars["NAME"] = w.Name w.autovars["ZONE"] = w.Zone w.autovars["PROJECT"] = w.Project w.autovars["GCSPATH"] = w.GCSPath w.autovars["SCRATCHPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath) w.autovars["SOURCESPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath) w.autovars["LOGSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath) w.autovars["OUTSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath) replacements = []string{} for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) w.populateLogger(ctx) for name, s := range w.Steps { s.name = name s.w = w if err := w.populateStep(ctx, s); err != nil { return err } } return nil } func (w *Workflow) populateLogger(ctx context.Context) { if w.logger != nil { return } name := w.Name for parent := w.parent; parent != nil; parent = parent.parent { name = parent.Name + "." + name } prefix := fmt.Sprintf("[%s]: ", name) flags := log.Ldate | log.Ltime writers := []io.Writer{os.Stdout} if w.gcsLogWriter == nil { if !w.gcsLogging { w.gcsLogWriter = &syncedWriter{buf: bufio.NewWriter(ioutil.Discard)} } w.gcsLogWriter = &syncedWriter{buf: bufio.NewWriter(&gcsLogger{client: w.StorageClient, bucket: w.bucket, object: path.Join(w.logsPath, "daisy.log"), ctx: ctx})} go func() { for { time.Sleep(5 * time.Second) w.gcsLogWriter.Flush() } }() writers = append(writers, w.gcsLogWriter) } w.logger = log.New(io.MultiWriter(writers...), prefix, flags) } // AddDependency creates a dependency of dependent on each dependency. Returns an // error if dependent or dependency are not steps in this workflow. func (w *Workflow) AddDependency(dependent string, dependencies ...string) error { if _, ok := w.Steps[dependent]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependent) } if w.Dependencies == nil { w.Dependencies = map[string][]string{} } for _, dependency := range dependencies { if _, ok := w.Steps[dependency]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependency) } if !strIn(dependency, w.Dependencies[dependent]) { // Don't add if dependency already exists. w.Dependencies[dependent] = append(w.Dependencies[dependent], dependency) } } return nil } // NewIncludedWorkflow instantiates a new workflow with the same resources as the parent. func (w *Workflow) NewIncludedWorkflow() *Workflow { iw := New() iw.Cancel = w.Cancel shareWorkflowResources(w, iw) return iw } // NewIncludedWorkflowFromFile reads and unmarshals a workflow with the same resources as the parent. func (w *Workflow) NewIncludedWorkflowFromFile(file string) (*Workflow, error) { iw := w.NewIncludedWorkflow() if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, iw); err != nil { return nil, err } return iw, nil } // NewStep instantiates a new, typeless step for this workflow. // The step type must be specified before running this workflow. func (w *Workflow) NewStep(name string) (*Step, error) { if _, ok := w.Steps[name]; ok { return nil, fmt.Errorf("can't create step %q: a step already exists with that name", name) } s := &Step{name: name, w: w} if w.Steps == nil { w.Steps = map[string]*Step{} } w.Steps[name] = s return s, nil } // NewSubWorkflow instantiates a new workflow as a child to this workflow. func (w *Workflow) NewSubWorkflow() *Workflow { sw := New() sw.Cancel = w.Cancel sw.parent = w return sw } // NewSubWorkflowFromFile reads and unmarshals a workflow as a child to this workflow. func (w *Workflow) NewSubWorkflowFromFile(file string) (*Workflow, error) { sw := w.NewSubWorkflow() if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, sw); err != nil { return nil, err } return sw, nil } // Print populates then pretty prints the workflow. func (w *Workflow) Print(ctx context.Context) { w.gcsLogging = false if err := w.populate(ctx); err != nil { fmt.Println("Error running populate:", err) } b, err := json.MarshalIndent(w, "", " ") if err != nil { fmt.Println("Error marshalling workflow for printing:", err) } fmt.Println(string(b)) } func (w *Workflow) run(ctx context.Context) error { return w.traverseDAG(func(s *Step) error { return w.runStep(ctx, s) }) } func (w *Workflow) runStep(ctx context.Context, s *Step) error { timeout := make(chan struct{}) go func() { time.Sleep(s.timeout) close(timeout) }() e := make(chan error) go func() { e <- s.run(ctx) }() select { case err := <-e: return err case <-timeout: return fmt.Errorf("step %q did not stop in specified timeout of %s", s.name, s.timeout) } } // Concurrently traverse the DAG, running func f on each step. // Return an error if f returns an error on any step. func (w *Workflow) traverseDAG(f func(*Step) error) error { // waiting = steps and the dependencies they are waiting for. // running = the currently running steps. // start = map of steps' start channels/semaphores. // done = map of steps' done channels for signaling step completion. waiting := map[string][]string{} var running []string start := map[string]chan error{} done := map[string]chan error{} // Setup: channels, copy dependencies. for name := range w.Steps { waiting[name] = w.Dependencies[name] start[name] = make(chan error) done[name] = make(chan error) } // Setup: goroutine for each step. Each waits to be notified to start. for name, s := range w.Steps { go func(name string, s *Step) { // Wait for signal, then run the function. Return any errs. if err := <-start[name]; err != nil { done[name] <- err } else if err := f(s); err != nil { done[name] <- err } close(done[name]) }(name, s) } // Main signaling logic. for len(waiting) != 0 || len(running) != 0 { // If we got a Cancel signal, kill all waiting steps. // Let running steps finish. select { case <-w.Cancel: waiting = map[string][]string{} default: } // Kick off all steps that aren't waiting for anything. for name, deps := range waiting { if len(deps) == 0 { delete(waiting, name) running = append(running, name) close(start[name]) } } // Sanity check. There should be at least one running step, // but loop back through if there isn't. if len(running) == 0 { continue } // Get next finished step. Return the step error if it erred. finished, err := stepsListen(running, done) if err != nil { return err } // Remove finished step from other steps' waiting lists. for name, deps := range waiting { waiting[name] = filter(deps, finished) } // Remove finished from currently running list. running = filter(running, finished) } return nil } // New instantiates a new workflow. func New() *Workflow { // We can't use context.WithCancel as we use the context even after cancel for cleanup. w := &Workflow{Cancel: make(chan struct{})} // Init nil'ed fields w.Sources = map[string]string{} w.Vars = map[string]vars{} w.Steps = map[string]*Step{} w.Dependencies = map[string][]string{} w.autovars = map[string]string{} initWorkflowResources(w) return w } // NewFromFile reads and unmarshals a workflow file. // Recursively reads subworkflow steps as well. func NewFromFile(file string) (*Workflow, error) { w := New() if err := readWorkflow(file, w); err != nil { return nil, err } return w, nil } func readWorkflow(file string, w *Workflow) error { data, err := ioutil.ReadFile(file) if err != nil { return err } w.workflowDir, err = filepath.Abs(filepath.Dir(file)) if err != nil { return err } if err := json.Unmarshal(data, &w); err != nil { // If this is a syntax error return a useful error. sErr, ok := err.(*json.SyntaxError) if !ok { return err } // Byte number where the error line starts. start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1 // Assume end byte of error line is EOF unless this isn't the last line. end := len(data) if i := bytes.Index(data[start:], []byte("\n")); i >= 0 { end = start + i } // Line number of error. line := bytes.Count(data[:start], []byte("\n")) + 1 // Position of error in line (where to place the '^'). pos := int(sErr.Offset) - start if pos != 0 { pos = pos - 1 } return fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos)) } if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) { w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath) } for name, s := range w.Steps { s.name = name s.w = w var err error if s.SubWorkflow != nil { if s.SubWorkflow.w, err = w.NewSubWorkflowFromFile(s.SubWorkflow.Path); err != nil { return err } } if s.IncludeWorkflow != nil { if s.IncludeWorkflow.w, err = w.NewIncludedWorkflowFromFile(s.IncludeWorkflow.Path); err != nil { return err } } } return nil } // stepsListen returns the first step that finishes/errs. func stepsListen(names []string, chans map[string]chan error) (string, error) { cases := make([]reflect.SelectCase, len(names)) for i, name := range names { cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])} } caseIndex, value, recvOk := reflect.Select(cases) name := names[caseIndex] if recvOk { // recvOk -> a step failed, return the error. return name, value.Interface().(error) } return name, nil }
1
6,683
Since this is workflow agnostic, should we put this in common?
GoogleCloudPlatform-compute-image-tools
go
@@ -54,6 +54,13 @@ class SourceAnalyzer(object): """ raise NotImplementedError("Subclasses should implement this!") + @classmethod + def version_compatible(cls, configured_binary, environ): + """ + Checker the version compatibility of the given analyzer binary. + """ + raise NotImplementedError("Subclasses should implement this!") + @classmethod def construct_config_handler(cls, args, context): """ Should return a subclass of AnalyzerConfigHandler."""
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- """ Base class for various source analyzers. """ from __future__ import print_function from __future__ import division from __future__ import absolute_import from abc import ABCMeta, abstractmethod import os import signal import subprocess import sys from codechecker_common.logger import get_logger LOG = get_logger('analyzer') class SourceAnalyzer(object): """ Base class for different source analyzers. """ __metaclass__ = ABCMeta def __init__(self, config_handler, buildaction): self.__config_handler = config_handler self.__build_action = buildaction # Currently analyzed source file. self.source_file = '' @property def buildaction(self): return self.__build_action @property def config_handler(self): return self.__config_handler @abstractmethod def construct_analyzer_cmd(self, result_handler): raise NotImplementedError("Subclasses should implement this!") @classmethod def resolve_missing_binary(cls, configured_binary, environ): """ In case of the configured binary for the analyzer is not found in the PATH, this method is used to find a callable binary. """ raise NotImplementedError("Subclasses should implement this!") @classmethod def construct_config_handler(cls, args, context): """ Should return a subclass of AnalyzerConfigHandler.""" raise NotImplementedError("Subclasses should implement this!") @abstractmethod def get_analyzer_mentioned_files(self, output): """ Return a collection of files that were mentioned by the analyzer in its standard outputs, which should be analyzer_stdout or analyzer_stderr from a result handler. """ raise NotImplementedError("Subclasses should implement this!") @abstractmethod def construct_result_handler(self, buildaction, report_output, severity_map, skiplist_handler): """ This method constructs the class that is responsible to handle the results of the analysis. The result should be a subclass of ResultHandler """ raise NotImplementedError("Subclasses should implement this!") def analyze(self, analyzer_cmd, res_handler, env=None, proc_callback=None): """ Run the analyzer. """ LOG.debug('Running analyzer ...') LOG.debug_analyzer('\n%s', ' '.join(analyzer_cmd)) res_handler.analyzer_cmd = analyzer_cmd try: ret_code, stdout, stderr \ = SourceAnalyzer.run_proc(analyzer_cmd, env, res_handler.buildaction.directory, proc_callback) res_handler.analyzer_returncode = ret_code res_handler.analyzer_stdout = stdout res_handler.analyzer_stderr = stderr return res_handler except Exception as ex: LOG.error(ex) res_handler.analyzer_returncode = 1 return res_handler @classmethod def get_analyzer_checkers(cls, cfg_handler, environ): """ Return the checkers available in the analyzer. """ raise NotImplementedError("Subclasses should implement this!") @staticmethod def run_proc(command, env=None, cwd=None, proc_callback=None): """ Just run the given command and return the return code and the stdout and stderr outputs of the process. """ def signal_handler(signum, frame): # Clang does not kill its child processes, so I have to. try: g_pid = proc.pid os.killpg(g_pid, signal.SIGTERM) finally: sys.exit(128 + signum) signal.signal(signal.SIGINT, signal_handler) proc = subprocess.Popen(command, bufsize=-1, env=env, preexec_fn=os.setsid, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # Send the created analyzer process' object if somebody wanted it. if proc_callback: proc_callback(proc) stdout, stderr = proc.communicate() return proc.returncode, stdout, stderr
1
10,826
using the `@abstractmethod` decorator wouldn't be better?
Ericsson-codechecker
c
@@ -21,7 +21,7 @@ final class BooleanConfigurator implements FieldConfiguratorInterface public function configure(FieldDto $field, EntityDto $entityDto, AdminContext $context): void { $isRenderedAsSwitch = true === $field->getCustomOption(BooleanField::OPTION_RENDER_AS_SWITCH); - if ($isRenderedAsSwitch && false !== strpos($field->getProperty(), '.')) { + if ($isRenderedAsSwitch && $this->isNestedProperty($field) && false === $this->isBoolean($field)) { throw new \InvalidArgumentException(sprintf('The "%s" property cannot be rendered as a switch because it belongs to an associated entity instead of to the entity itself. Render the property as a normal boolean field.', $field->getProperty())); }
1
<?php namespace EasyCorp\Bundle\EasyAdminBundle\Field\Configurator; use EasyCorp\Bundle\EasyAdminBundle\Context\AdminContext; use EasyCorp\Bundle\EasyAdminBundle\Contracts\Field\FieldConfiguratorInterface; use EasyCorp\Bundle\EasyAdminBundle\Dto\EntityDto; use EasyCorp\Bundle\EasyAdminBundle\Dto\FieldDto; use EasyCorp\Bundle\EasyAdminBundle\Field\BooleanField; /** * @author Javier Eguiluz <[email protected]> */ final class BooleanConfigurator implements FieldConfiguratorInterface { public function supports(FieldDto $field, EntityDto $entityDto): bool { return BooleanField::class === $field->getFieldFqcn(); } public function configure(FieldDto $field, EntityDto $entityDto, AdminContext $context): void { $isRenderedAsSwitch = true === $field->getCustomOption(BooleanField::OPTION_RENDER_AS_SWITCH); if ($isRenderedAsSwitch && false !== strpos($field->getProperty(), '.')) { throw new \InvalidArgumentException(sprintf('The "%s" property cannot be rendered as a switch because it belongs to an associated entity instead of to the entity itself. Render the property as a normal boolean field.', $field->getProperty())); } // TODO: ask someone who knows Symfony forms well how to make this work if ($isRenderedAsSwitch) { // see https://symfony.com/blog/new-in-symfony-4-4-bootstrap-custom-switches // $field->setFormTypeOptionIfNotSet('label_attr.class', 'switch-custom'); } } }
1
12,981
This was crashing the app during EDIT on boolean Doctrine Embedded properties
EasyCorp-EasyAdminBundle
php
@@ -45,7 +45,7 @@ #include "board_common.h" -#define DFL_SYSFS_SEC_GLOB "*dfl*/*spi*/*spi*/*spi*/**/security/" +#define DFL_SYSFS_SEC_GLOB "*dfl*/**/security/" #define DFL_SYSFS_SEC_USER_FLASH_COUNT DFL_SYSFS_SEC_GLOB "*flash_count" #define DFL_SYSFS_SEC_BMC_CANCEL DFL_SYSFS_SEC_GLOB "bmc_canceled_csks" #define DFL_SYSFS_SEC_BMC_ROOT DFL_SYSFS_SEC_GLOB "bmc_root_entry_hash"
1
// Copyright(c) 2019-2021, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_CONFIG_H #include <config.h> #endif // HAVE_CONFIG_H #include <glob.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <net/if.h> #include <net/ethernet.h> #include <opae/properties.h> #include <opae/utils.h> #include <opae/fpga.h> #include <sys/ioctl.h> #include "board_common.h" #define DFL_SYSFS_SEC_GLOB "*dfl*/*spi*/*spi*/*spi*/**/security/" #define DFL_SYSFS_SEC_USER_FLASH_COUNT DFL_SYSFS_SEC_GLOB "*flash_count" #define DFL_SYSFS_SEC_BMC_CANCEL DFL_SYSFS_SEC_GLOB "bmc_canceled_csks" #define DFL_SYSFS_SEC_BMC_ROOT DFL_SYSFS_SEC_GLOB "bmc_root_entry_hash" #define DFL_SYSFS_SEC_PR_CANCEL DFL_SYSFS_SEC_GLOB "pr_canceled_csks" #define DFL_SYSFS_SEC_PR_ROOT DFL_SYSFS_SEC_GLOB "pr_root_entry_hash" #define DFL_SYSFS_SEC_SR_CANCEL DFL_SYSFS_SEC_GLOB "sr_canceled_csks" #define DFL_SYSFS_SEC_SR_ROOT DFL_SYSFS_SEC_GLOB "sr_root_entry_hash" #define DFL_SYSFS_ETHINTERFACE "dfl*.*/net/%s*" #define ETHTOOL_STR "ethtool" #define IFCONFIG_STR "ifconfig" #define IFCONFIG_UP_STR "up" #define SYSFS_FEATURE_ID "/sys/bus/pci/devices/*%x*:*%x*:*%x*.*%x*/"\ "fpga_region/region*/dfl-fme*/dfl_dev*/feature_id" // Read sysfs fpga_result read_sysfs(fpga_token token, char *sysfs_path, char *sysfs_name, size_t len) { fpga_result res = FPGA_OK; fpga_result resval = FPGA_OK; uint32_t size = 0; char name[SYSFS_PATH_MAX] = { 0 }; fpga_object fpga_object; if (sysfs_path == NULL || sysfs_name == NULL) { OPAE_ERR("Invalid input parameter"); return FPGA_INVALID_PARAM; } res = fpgaTokenGetObject(token, sysfs_path, &fpga_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_MSG("Failed to get token Object"); return res; } res = fpgaObjectGetSize(fpga_object, &size, 0); if (res != FPGA_OK) { OPAE_ERR("Failed to get object size "); resval = res; goto out_destroy; } if (size > len) { OPAE_ERR("object size bigger then buffer size"); resval = FPGA_EXCEPTION; goto out_destroy; } res = fpgaObjectRead(fpga_object, (uint8_t *)(&name), 0, size, 0); if (res != FPGA_OK) { OPAE_ERR("Failed to Read object "); resval = res; goto out_destroy; } len = strnlen(name, len - 1); memcpy(sysfs_name, name, len); sysfs_name[len] = '\0'; if (sysfs_name[len-1] == '\n') sysfs_name[len-1] = '\0'; out_destroy: res = fpgaDestroyObject(&fpga_object); if (res != FPGA_OK) { OPAE_ERR("Failed to Destroy Object"); resval = res; } return resval; } // read sysfs value fpga_result read_sysfs_int64(fpga_token token, char *sysfs_path, uint64_t *value) { fpga_result res = FPGA_OK; fpga_object fpga_object; if (sysfs_path == NULL) { OPAE_ERR("Invalid input parameter"); return FPGA_INVALID_PARAM; } res = fpgaTokenGetObject(token, sysfs_path, &fpga_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_MSG("Failed to get token Object"); return res; } res = fpgaObjectRead64(fpga_object, value, 0); if (res != FPGA_OK) { OPAE_ERR("Failed to Read object "); } res = fpgaDestroyObject(&fpga_object); if (res != FPGA_OK) { OPAE_ERR("Failed to Destroy Object"); } return res; } // Sec info fpga_result print_sec_common_info(fpga_token token) { fpga_result res = FPGA_OK; fpga_result resval = FPGA_OK; fpga_object tcm_object; char name[SYSFS_PATH_MAX] = { 0 }; res = fpgaTokenGetObject(token, DFL_SYSFS_SEC_GLOB, &tcm_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_ERR("Failed to get token Object"); return res; } printf("********** SEC Info START ************ \n"); // BMC Keys memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_BMC_ROOT, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("BMC root entry hash: %s\n", name); } else { OPAE_MSG("Failed to Read TCM BMC root entry hash"); printf("BMC root entry hash: %s\n", "None"); resval = res; } memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_BMC_CANCEL, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("BMC CSK IDs canceled: %s\n", strlen(name) > 0 ? name : "None"); } else { OPAE_MSG("Failed to Read BMC CSK IDs canceled"); printf("BBMC CSK IDs canceled: %s\n", "None"); resval = res; } // PR Keys memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_PR_ROOT, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("PR root entry hash: %s\n", name); } else { OPAE_MSG("Failed to Read PR root entry hash"); printf("PR root entry hash: %s\n", "None"); resval = res; } memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_PR_CANCEL, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("AFU/PR CSK IDs canceled: %s\n", strlen(name) > 0 ? name : "None"); } else { OPAE_MSG("Failed to Read AFU CSK/PR IDs canceled"); printf("AFU/PR CSK IDs canceled: %s\n", "None"); resval = res; } // SR Keys memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_SR_ROOT, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("FIM root entry hash: %s\n", name); } else { OPAE_MSG("Failed to Read FIM root entry hash"); printf("FIM root entry hash: %s\n", "None"); resval = res; } memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_SR_CANCEL, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("FIM CSK IDs canceled: %s\n", strlen(name) > 0 ? name : "None"); } else { OPAE_MSG("Failed to Read FIM CSK IDs canceled"); printf("FIM CSK IDs canceled: %s\n", "None"); resval = res; } // User flash count memset(name, 0, sizeof(name)); res = read_sysfs(token, DFL_SYSFS_SEC_USER_FLASH_COUNT, name, SYSFS_PATH_MAX - 1); if (res == FPGA_OK) { printf("User flash update counter: %s\n", name); } else { OPAE_MSG("Failed to Read User flash update counter"); printf("User flash update counter: %s\n", "None"); resval = res; } res = fpgaDestroyObject(&tcm_object); if (res != FPGA_OK) { OPAE_MSG("Failed to Destroy Object"); resval = res; } printf("********** SEC Info END ************ \n"); return resval; } // prints FPGA ethernet interface info fpga_result print_eth_interface_info(fpga_token token, const char *interface_name) { fpga_result res = FPGA_NOT_FOUND; struct if_nameindex *if_nidxs = NULL; struct if_nameindex *intf = NULL; char cmd[SYSFS_PATH_MAX] = { 0 }; char glob[SYSFS_PATH_MAX] = { 0 }; int result = 0; fpga_object fpga_object; if_nidxs = if_nameindex(); if (if_nidxs != NULL) { for (intf = if_nidxs; intf->if_index != 0 || intf->if_name != NULL; intf++) { char *p = strstr(intf->if_name, interface_name); if (p) { memset(glob, 0, sizeof(glob)); if (snprintf(glob, sizeof(glob), DFL_SYSFS_ETHINTERFACE, p) < 0) { OPAE_ERR("snprintf failed"); res = FPGA_EXCEPTION; goto out_free; } // Check interface associated to bdf res = fpgaTokenGetObject(token, glob, &fpga_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_DBG("Failed to get token Object"); res = FPGA_OK; continue; } res = fpgaDestroyObject(&fpga_object); if (res != FPGA_OK) { OPAE_ERR("Failed to Destroy Object"); } // Interface up memset(cmd, 0, sizeof(cmd)); if (snprintf(cmd, sizeof(cmd), "%s %s %s", IFCONFIG_STR, intf->if_name, IFCONFIG_UP_STR) < 0) { OPAE_ERR("snprintf failed"); res = FPGA_EXCEPTION; goto out_free; } result = system(cmd); if (result < 0) { res = FPGA_EXCEPTION; OPAE_ERR("Failed to run cmd: %s %s", cmd, strerror(errno)); } // eth tool command memset(cmd, 0, sizeof(cmd)); if (snprintf(cmd, sizeof(cmd), "%s %s", ETHTOOL_STR, intf->if_name) < 0) { OPAE_ERR("snprintf failed"); res = FPGA_EXCEPTION; goto out_free; } result = system(cmd); if (result < 0) { res = FPGA_EXCEPTION; OPAE_ERR("Failed to run cmd: %s %s", cmd, strerror(errno)); } } } out_free: if_freenameindex(if_nidxs); } return res; } fpga_result sysfs_read_u64(const char *path, uint64_t *u) { int fd = -1; int res = 0; char buf[SYSFS_PATH_MAX] = { 0 }; int b = 0; if (!path || !u) { OPAE_ERR("Invalid input path"); return FPGA_INVALID_PARAM; } fd = open(path, O_RDONLY); if (fd < 0) { OPAE_MSG("open(%s) failed", path); return FPGA_NOT_FOUND; } if ((off_t)-1 == lseek(fd, 0, SEEK_SET)) { OPAE_MSG("seek failed"); goto out_close; } do { res = read(fd, buf + b, sizeof(buf) - b); if (res <= 0) { OPAE_MSG("Read from %s failed", path); goto out_close; } b += res; if (((unsigned)b > sizeof(buf)) || (b <= 0)) { OPAE_MSG("Unexpected size reading from %s", path); goto out_close; } } while (buf[b - 1] != '\n' && buf[b - 1] != '\0' && (unsigned)b < sizeof(buf)); // erase \n buf[b - 1] = 0; *u = strtoull(buf, NULL, 0); close(fd); return FPGA_OK; out_close: close(fd); return FPGA_NOT_FOUND; } fpga_result get_fpga_sbdf(fpga_token token, uint16_t *segment, uint8_t *bus, uint8_t *device, uint8_t *function) { fpga_result res = FPGA_OK; fpga_properties props = NULL; if (!segment || !bus || !device || !function) { OPAE_ERR("Invalid input parameters"); return FPGA_INVALID_PARAM; } res = fpgaGetProperties(token, &props); if (res != FPGA_OK) { OPAE_ERR("Failed to get properties "); return res; } res = fpgaPropertiesGetBus(props, bus); if (res != FPGA_OK) { OPAE_ERR("Failed to get bus "); return res; } res = fpgaPropertiesGetSegment(props, segment); if (res != FPGA_OK) { OPAE_ERR("Failed to get Segment "); return res; } res = fpgaPropertiesGetDevice(props, device); if (res != FPGA_OK) { OPAE_ERR("Failed to get Device "); return res; } res = fpgaPropertiesGetFunction(props, function); if (res != FPGA_OK) { OPAE_ERR("Failed to get Function "); return res; } return res; } fpga_result find_dev_feature(fpga_token token, uint32_t feature_id, char *dfl_dev_str) { fpga_result res = FPGA_NOT_FOUND; fpga_result retval = FPGA_OK; int gres = 0; size_t i = 0; uint64_t value = 0; uint16_t segment = 0; uint8_t bus = 0; uint8_t device = 0; uint8_t function = 0; glob_t pglob; char feature_path[SYSFS_PATH_MAX] = { 0 }; retval = get_fpga_sbdf(token, &segment, &bus, &device, &function); if (retval != FPGA_OK) { OPAE_ERR("Failed to get sbdf "); return retval; } if (snprintf(feature_path, sizeof(feature_path), SYSFS_FEATURE_ID, segment, bus, device, function) < 0) { OPAE_ERR("snprintf buffer overflow"); return FPGA_EXCEPTION; } gres = glob(feature_path, GLOB_NOSORT, NULL, &pglob); if (gres) { OPAE_ERR("Failed pattern match %s: %s", feature_path, strerror(errno)); globfree(&pglob); return FPGA_NOT_FOUND; } for (i = 0; i < pglob.gl_pathc; i++) { retval = sysfs_read_u64(pglob.gl_pathv[i], &value); if (retval != FPGA_OK) { OPAE_MSG("Failed to read sysfs value"); continue; } if (value == feature_id) { res = FPGA_OK; if (dfl_dev_str) { char *p = strstr(pglob.gl_pathv[i], "dfl_dev"); if (p == NULL) { res = FPGA_NOT_FOUND; goto free; } char *end = strchr(p, '/'); if (end == NULL) { res = FPGA_NOT_FOUND; goto free; } strncpy(dfl_dev_str, p, end - p); *(dfl_dev_str + (end - p)) = '\0'; } break; } } free: if (gres) globfree(&pglob); return res; }
1
20,883
Will this be backwards compatible with the previous path?
OPAE-opae-sdk
c
@@ -29,6 +29,11 @@ namespace OpenTelemetry.Trace /// </summary> ISpan CurrentSpan { get; } + /// <summary> + /// Gets the current scope from the context. + /// </summary> + IScope CurrentScope { get; } + /// <summary> /// Gets the <see cref="IBinaryFormat"/> for this implementation. /// </summary>
1
// <copyright file="ITracer.cs" company="OpenTelemetry Authors"> // Copyright 2018, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> namespace OpenTelemetry.Trace { using OpenTelemetry.Context; using OpenTelemetry.Context.Propagation; /// <summary> /// Tracer to record distributed tracing informaiton. /// </summary> public interface ITracer { /// <summary> /// Gets the current span from the context. /// </summary> ISpan CurrentSpan { get; } /// <summary> /// Gets the <see cref="IBinaryFormat"/> for this implementation. /// </summary> IBinaryFormat BinaryFormat { get; } /// <summary> /// Gets the <see cref="ITextFormat"/> for this implementation. /// </summary> ITextFormat TextFormat { get; } /// <summary> /// Associates the span with the current context. /// </summary> /// <param name="span">Span to associate with the current context.</param> /// <returns>Scope object to control span to current context association.</returns> IScope WithSpan(ISpan span); /// <summary> /// Gets the span builder for the span with the given name. /// </summary> /// <param name="spanName">Span name.</param> /// <returns>Span builder for the span with the given name.</returns> ISpanBuilder SpanBuilder(string spanName); /// <summary> /// Records <see cref="SpanData"/>. This API allows to send a pre-populated span object to the /// exporter.Sampling and recording decisions as well as other collection optimizations is a /// responsibility of a caller.Note, the <see cref="SpanContext" /> object on the span population with /// the values that will allow correlation of telemetry is also a caller responsibility. /// </summary> /// <param name="span">Immutable Span Data to be reported to all exporters.</param> void RecordSpanData(SpanData span); } }
1
12,126
what is potential use for current scope? It seems you'd only want it to stop it. But if you get current scope you never know if it's yours to stop - i.e. this is not safe or correct to stop current scope. So I wonder should we even try to expose it?
open-telemetry-opentelemetry-dotnet
.cs
@@ -76,7 +76,9 @@ describe( 'User Input Settings', () => { 'enter keywords', async () => { await expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ); - await expect( page ).toFill( '#searchTerms-keyword-0', 'One,Two,Three,' ); + await expect( page ).toFill( '#searchTerms-keyword-0', 'One' ); + await expect( page ).toFill( '#searchTerms-keyword-1', 'Two' ); + await expect( page ).toFill( '#searchTerms-keyword-2', 'Three' ); }, );
1
/** * User Input Settings tests. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { createURL, visitAdminPage } from '@wordpress/e2e-test-utils'; /** * Internal dependencies */ import { activatePlugins, deactivateUtilityPlugins, resetSiteKit, setupSiteKit, useRequestInterception, enableFeature, pageWait, step, } from '../utils'; describe( 'User Input Settings', () => { async function fillInInputSettings() { await step( 'select role', async () => { await page.waitForSelector( '.googlesitekit-user-input__question' ); await expect( page ).toClick( '#role-owner_with_team' ); }, ); await step( 'select post frequency', async () => { await expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ); await expect( page ).toClick( '#postFrequency-monthly' ); }, ); await step( 'select goals', async () => { await expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ); await expect( page ).toClick( '#goals-publish_blog' ); await expect( page ).toClick( '#goals-share_portfolio' ); }, ); await step( 'select help needed', async () => { await expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ); await expect( page ).toClick( '#helpNeeded-retaining_visitors' ); await expect( page ).toClick( '#helpNeeded-improving_performance' ); await expect( page ).toClick( '#helpNeeded-help_better_rank' ); }, ); await step( 'enter keywords', async () => { await expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ); await expect( page ).toFill( '#searchTerms-keyword-0', 'One,Two,Three,' ); }, ); await step( 'go to preview page', expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ), ); await step( 'wait for settings submission', Promise.all( [ expect( page ).toClick( '.googlesitekit-user-input__buttons--next' ), page.waitForNavigation(), ] ), ); await step( 'wait for success notification', page.waitForSelector( '#user-input-success' ), ); } beforeAll( async () => { await page.setRequestInterception( true ); useRequestInterception( ( request ) => { const url = request.url(); if ( url.startsWith( 'https://sitekit.withgoogle.com' ) ) { request.respond( { status: 302, headers: { location: createURL( '/wp-admin/index.php', 'oauth2callback=1&code=valid-test-code' ), }, } ); } else if ( url.match( '/google-site-kit/v1/core/user/data/user-input-settings' ) ) { request.continue(); } else if ( url.match( '/google-site-kit/v1/data' ) || url.match( '/google-site-kit/v1/modules' ) ) { request.respond( { status: 200 } ); } else { request.continue(); } } ); } ); beforeEach( async () => { await enableFeature( 'userInput' ); await setupSiteKit(); await activatePlugins( 'e2e-tests-oauth-callback-plugin', 'e2e-tests-site-verification-plugin', 'e2e-tests-proxy-credentials-plugin', 'e2e-tests-user-input-settings-api-mock', ); } ); afterEach( async () => { await deactivateUtilityPlugins(); await resetSiteKit(); } ); it( 'should require new users to enter input settings after signing in', async () => { await step( 'visit splash screen', visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ), ); await step( 'click on start setup button and wait for navigation', Promise.all( [ expect( page ).toClick( '.googlesitekit-start-setup' ), page.waitForNavigation(), ] ), ); await fillInInputSettings(); } ); it( 'should offer to enter input settings for existing users', async () => { await step( 'visit admin dashboard', visitAdminPage( 'admin.php', 'page=googlesitekit-dashboard' ), ); await step( 'click on CTA button and wait for navigation', async () => { await page.waitForSelector( '.googlesitekit-user-input__notification' ); await Promise.all( [ expect( page ).toClick( '.googlesitekit-notification__cta' ), page.waitForNavigation(), ] ); }, ); await fillInInputSettings(); } ); it( 'should let existing users enter input settings from the settings page', async () => { await step( 'visit admin settings', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' ); await pageWait(); await page.waitForSelector( '.mdc-tab-bar button.mdc-tab' ); await expect( page ).toClick( 'button.mdc-tab', { text: /admin settings/i } ); }, ); await step( 'click on CTA button and wait for navigation', async () => { await page.waitForSelector( '.googlesitekit-user-input__notification' ); await Promise.all( [ expect( page ).toClick( '.googlesitekit-notification__cta' ), page.waitForNavigation(), ] ); }, ); await fillInInputSettings(); } ); } );
1
36,716
I think this is likely the only additional change needed on the original PR.
google-site-kit-wp
js
@@ -5,10 +5,12 @@ import android.content.SharedPreferences; import android.content.res.Configuration; import android.util.AttributeSet; import android.view.View; + import androidx.appcompat.view.ContextThemeWrapper; import androidx.recyclerview.widget.DividerItemDecoration; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; + import de.danoeh.antennapod.R; import io.reactivex.annotations.Nullable;
1
package de.danoeh.antennapod.view; import android.content.Context; import android.content.SharedPreferences; import android.content.res.Configuration; import android.util.AttributeSet; import android.view.View; import androidx.appcompat.view.ContextThemeWrapper; import androidx.recyclerview.widget.DividerItemDecoration; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import de.danoeh.antennapod.R; import io.reactivex.annotations.Nullable; public class EpisodeItemListRecyclerView extends RecyclerView { private static final String TAG = "EpisodeItemListRecyclerView"; private static final String PREF_PREFIX_SCROLL_POSITION = "scroll_position_"; private static final String PREF_PREFIX_SCROLL_OFFSET = "scroll_offset_"; private LinearLayoutManager layoutManager; public EpisodeItemListRecyclerView(Context context) { super(new ContextThemeWrapper(context, R.style.FastScrollRecyclerView)); setup(); } public EpisodeItemListRecyclerView(Context context, @Nullable AttributeSet attrs) { super(new ContextThemeWrapper(context, R.style.FastScrollRecyclerView), attrs); setup(); } public EpisodeItemListRecyclerView(Context context, @Nullable AttributeSet attrs, int defStyleAttr) { super(new ContextThemeWrapper(context, R.style.FastScrollRecyclerView), attrs, defStyleAttr); setup(); } private void setup() { layoutManager = new LinearLayoutManager(getContext()); layoutManager.setRecycleChildrenOnDetach(true); setLayoutManager(layoutManager); setHasFixedSize(true); addItemDecoration(new DividerItemDecoration(getContext(), layoutManager.getOrientation())); setClipToPadding(false); } @Override protected void onConfigurationChanged(Configuration newConfig) { super.onConfigurationChanged(newConfig); int horizontalSpacing = (int) getResources().getDimension(R.dimen.additional_horizontal_spacing); setPadding(horizontalSpacing, getPaddingTop(), horizontalSpacing, getPaddingBottom()); } public void saveScrollPosition(String tag) { int firstItem = layoutManager.findFirstVisibleItemPosition(); View firstItemView = layoutManager.findViewByPosition(firstItem); float topOffset; if (firstItemView == null) { topOffset = 0; } else { topOffset = firstItemView.getTop(); } getContext().getSharedPreferences(TAG, Context.MODE_PRIVATE).edit() .putInt(PREF_PREFIX_SCROLL_POSITION + tag, firstItem) .putInt(PREF_PREFIX_SCROLL_OFFSET + tag, (int) topOffset) .apply(); } public void restoreScrollPosition(String tag) { SharedPreferences prefs = getContext().getSharedPreferences(TAG, Context.MODE_PRIVATE); int position = prefs.getInt(PREF_PREFIX_SCROLL_POSITION + tag, 0); int offset = prefs.getInt(PREF_PREFIX_SCROLL_OFFSET + tag, 0); if (position > 0 || offset > 0) { layoutManager.scrollToPositionWithOffset(position, offset); } } public boolean isScrolledToBottom() { int visibleEpisodeCount = getChildCount(); int totalEpisodeCount = layoutManager.getItemCount(); int firstVisibleEpisode = layoutManager.findFirstVisibleItemPosition(); return (totalEpisodeCount - visibleEpisodeCount) <= (firstVisibleEpisode + 3); } }
1
19,254
Please revert changes to unrelated file
AntennaPod-AntennaPod
java
@@ -0,0 +1,14 @@ +package azkaban.execapp.fake; + +import org.mortbay.jetty.Connector; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.bio.SocketConnector; + +public class FakeServer extends Server { + + @Override + public Connector[] getConnectors() { + return new Connector[]{new SocketConnector()}; + } + +}
1
1
13,290
Can test use a Mockito mock instance instead?
azkaban-azkaban
java
@@ -47,12 +47,13 @@ import java.lang.reflect.Method; link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks", linkType = BugPattern.LinkType.CUSTOM, severity = BugPattern.SeverityLevel.WARNING, - summary = "InvocationHandlers which delegate to another object must catch and unwrap " - + "InvocationTargetException, otherwise an UndeclaredThrowableException will be thrown " - + "each time the delegate throws an exception.\n" - + "This check is intended to be advisory. It's fine to " - + "@SuppressWarnings(\"InvocationHandlerDelegation\") in certain cases, " - + "but is usually not recommended.") + summary = + "InvocationHandlers which delegate to another object must catch and unwrap " + + "InvocationTargetException, otherwise an UndeclaredThrowableException will be thrown " + + "each time the delegate throws an exception.\n" + + "This check is intended to be advisory. It's fine to " + + "@SuppressWarnings(\"InvocationHandlerDelegation\") in certain cases, " + + "but is usually not recommended.") public final class InvocationHandlerDelegation extends BugChecker implements BugChecker.MethodInvocationTreeMatcher { private static final Matcher<MethodTree> INVOCATION_HANDLER = Matchers.anyOf(
1
/* * (c) Copyright 2018 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.errorprone; import com.google.auto.service.AutoService; import com.google.common.reflect.AbstractInvocationHandler; import com.google.errorprone.BugPattern; import com.google.errorprone.VisitorState; import com.google.errorprone.bugpatterns.BugChecker; import com.google.errorprone.matchers.ChildMultiMatcher; import com.google.errorprone.matchers.Description; import com.google.errorprone.matchers.Matcher; import com.google.errorprone.matchers.Matchers; import com.google.errorprone.matchers.method.MethodMatchers; import com.google.errorprone.util.ASTHelpers; import com.sun.source.tree.ConditionalExpressionTree; import com.sun.source.tree.ExpressionTree; import com.sun.source.tree.IfTree; import com.sun.source.tree.InstanceOfTree; import com.sun.source.tree.LambdaExpressionTree; import com.sun.source.tree.MethodInvocationTree; import com.sun.source.tree.MethodTree; import com.sun.source.tree.NewClassTree; import com.sun.source.tree.Tree; import com.sun.source.tree.TryTree; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @AutoService(BugChecker.class) @BugPattern( name = "InvocationHandlerDelegation", link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks", linkType = BugPattern.LinkType.CUSTOM, severity = BugPattern.SeverityLevel.WARNING, summary = "InvocationHandlers which delegate to another object must catch and unwrap " + "InvocationTargetException, otherwise an UndeclaredThrowableException will be thrown " + "each time the delegate throws an exception.\n" + "This check is intended to be advisory. It's fine to " + "@SuppressWarnings(\"InvocationHandlerDelegation\") in certain cases, " + "but is usually not recommended.") public final class InvocationHandlerDelegation extends BugChecker implements BugChecker.MethodInvocationTreeMatcher { private static final Matcher<MethodTree> INVOCATION_HANDLER = Matchers.anyOf( Matchers.allOf( Matchers.not(Matchers.isStatic()), MoreMatchers.hasSignature("invoke(java.lang.Object,java.lang.reflect.Method,java.lang.Object[])"), Matchers.enclosingClass(Matchers.isSubtypeOf(InvocationHandler.class.getName()))), Matchers.allOf( Matchers.not(Matchers.isStatic()), MoreMatchers.hasSignature( "handleInvocation(java.lang.Object,java.lang.reflect.Method,java.lang.Object[])"), Matchers.enclosingClass(Matchers.isSubtypeOf(AbstractInvocationHandler.class.getName())))); private static final Matcher<ExpressionTree> METHOD_INVOKE = MethodMatchers.instanceMethod() .onExactClass(Method.class.getName()) .withSignature("invoke(java.lang.Object,java.lang.Object...)"); private static final Matcher<ExpressionTree> METHOD_INVOKE_ENCLOSED_BY_INVOCATION_HANDLER = Matchers.allOf( METHOD_INVOKE, Matchers.enclosingMethod(INVOCATION_HANDLER)); private static final Matcher<Tree> CONTAINS_METHOD_INVOKE = Matchers.contains( ExpressionTree.class, METHOD_INVOKE); private static final Matcher<ExpressionTree> UNWRAP_THROWABLE = MethodMatchers.instanceMethod() .onDescendantOf(Throwable.class.getName()) .named("getCause") .withParameters(); private static final Matcher<Tree> CONTAINS_UNWRAP_THROWABLE = Matchers.contains(ExpressionTree.class, UNWRAP_THROWABLE); private static final Matcher<ExpressionTree> UNWRAP_ITE = MethodMatchers.instanceMethod() .onDescendantOf(InvocationTargetException.class.getName()) // getTargetException is deprecated, but does work correctly. .namedAnyOf("getCause", "getTargetException") .withParameters(); private static final Matcher<ExpressionTree> PASS_ITE = Matchers.methodInvocation( Matchers.anyMethod(), ChildMultiMatcher.MatchType.AT_LEAST_ONE, Matchers.isSubtypeOf(InvocationTargetException.class)); private static final Matcher<Tree> CONTAINS_INSTANCEOF_ITE = Matchers.contains( InstanceOfTree.class, (instanceOfTree, state) -> ASTHelpers.isSameType( ASTHelpers.getType(instanceOfTree.getType()), state.getTypeFromString(InvocationTargetException.class.getName()), state)); private static final Matcher<Tree> CONTAINS_UNWRAP_ITE = Matchers.anyOf( Matchers.contains(ExpressionTree.class, UNWRAP_ITE), Matchers.contains(ExpressionTree.class, PASS_ITE), Matchers.contains( IfTree.class, (Matcher<IfTree>) (ifExpression, state) -> CONTAINS_INSTANCEOF_ITE.matches(ifExpression.getCondition(), state) && CONTAINS_UNWRAP_THROWABLE.matches(ifExpression.getThenStatement(), state)), Matchers.contains( ConditionalExpressionTree.class, (Matcher<ConditionalExpressionTree>) (ifExpression, state) -> CONTAINS_INSTANCEOF_ITE.matches(ifExpression.getCondition(), state) && CONTAINS_UNWRAP_THROWABLE.matches(ifExpression.getTrueExpression(), state))); private static final Matcher<MethodTree> HANDLES_ITE = Matchers.anyOf( Matchers.contains(TryTree.class, (Matcher<TryTree>) (tree, state) -> CONTAINS_METHOD_INVOKE.matches(tree.getBlock(), state) && tree.getCatches().stream() .anyMatch(catchTree -> CONTAINS_UNWRAP_ITE.matches(catchTree.getBlock(), state))), // If Method.invoke occurs in a lambda or anonymous class, we don't have enough // conviction that it's a bug. Matchers.contains(LambdaExpressionTree.class, CONTAINS_METHOD_INVOKE::matches), Matchers.contains(NewClassTree.class, CONTAINS_METHOD_INVOKE::matches)); private static final Matcher<MethodInvocationTree> MATCHER = Matchers.allOf( METHOD_INVOKE_ENCLOSED_BY_INVOCATION_HANDLER, Matchers.not(Matchers.enclosingMethod(HANDLES_ITE))); @Override public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) { if (MATCHER.matches(tree, state)) { return describeMatch(tree); } return Description.NO_MATCH; } }
1
8,035
Bit of a shame about these multi-line strings in annotation parameters - they're not a deal breaker but just make the diff noisier
palantir-gradle-baseline
java
@@ -61,6 +61,11 @@ namespace Microsoft.Rest.Generator.NodeJS } } + public List<T> ConvertIEnumerableToList<T>(IEnumerable<T> enumerable) + { + return new List<T>(enumerable); + } + public bool IsPolymorphic { get
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Linq; using Microsoft.Rest.Generator.ClientModel; using Microsoft.Rest.Generator.NodeJS.TemplateModels; using Microsoft.Rest.Generator.Utilities; using System.Globalization; namespace Microsoft.Rest.Generator.NodeJS { public class ModelTemplateModel : CompositeType { private readonly IScopeProvider _scope = new ScopeProvider(); private ModelTemplateModel _parent = null; public ModelTemplateModel(CompositeType source, ServiceClient serviceClient) { this.LoadFrom(source); ServiceClient = serviceClient; if(source.BaseModelType != null) { _parent = new ModelTemplateModel(source.BaseModelType, serviceClient); } } public string DeserializeProperty(string objectName, Property property) { if (property == null || property.Type == null) { throw new ArgumentNullException("property"); } return property.Type.DeserializeType(_scope, objectName + "." + property.Name, "models"); } public IScopeProvider Scope { get { return _scope; } } public ServiceClient ServiceClient { get; set; } public virtual IEnumerable<string> Usings { get { return Enumerable.Empty<string>(); } } public IEnumerable<Property> ComposedProperties { get { if(this._parent != null) { return _parent.ComposedProperties .Union(this.Properties); } return this.Properties; } } public bool IsPolymorphic { get { if(!string.IsNullOrEmpty(this.PolymorphicDiscriminator)) { return true; } else if(this._parent != null) { return _parent.IsPolymorphic; } return false; } } public string ValidateProperty(string objectName, Property property) { if (property == null) { throw new ArgumentNullException("property"); } var propertyName = string.Format(CultureInfo.InvariantCulture, "{0}['{1}']", objectName, property.Name); if (property.IsRequired) { return property.Type.ValidateRequiredType(_scope, propertyName, "models"); } else { return property.Type.ValidateType(_scope, propertyName, "models"); } } /// <summary> /// Returns list of properties that needs to be explicitly deserializes for a model. /// </summary> public IEnumerable<Property> SpecialProperties { get { foreach (var property in ComposedProperties) { if (isSpecial(property.Type)) { yield return property; } } } } private bool isSpecial(IType type) { if (type == PrimaryType.DateTime || type == PrimaryType.Date || type == PrimaryType.ByteArray || type is CompositeType) { return true; } else if (type is SequenceType) { return isSpecial(((SequenceType)type).ElementType); } else if (type is DictionaryType) { return isSpecial(((DictionaryType)type).ValueType); } return false; } } }
1
20,933
we can remove this one as this is not used any more
Azure-autorest
java
@@ -90,7 +90,6 @@ public abstract class LongRunningConfig { private static LongRunningConfig createLongRunningConfigFromProtoFile( Method method, DiagCollector diagCollector, ProtoParser protoParser) { - boolean error = false; Model model = method.getModel(); OperationTypes operationTypes = protoParser.getLongRunningOperation(method); if (operationTypes == null
1
/* Copyright 2016 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.config; import com.google.api.codegen.LongRunningConfigProto; import com.google.api.codegen.util.ProtoParser; import com.google.api.tools.framework.model.Diag; import com.google.api.tools.framework.model.DiagCollector; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.api.tools.framework.model.SimpleLocation; import com.google.api.tools.framework.model.TypeRef; import com.google.auto.value.AutoValue; import com.google.longrunning.OperationTypes; import javax.annotation.Nullable; import org.threeten.bp.Duration; /** LongRunningConfig represents the long-running operation configuration for a method. */ @AutoValue public abstract class LongRunningConfig { // Default values for LongRunningConfig fields. static final boolean LRO_IMPLEMENTS_CANCEL = true; static final boolean LRO_IMPLEMENTS_DELETE = true; static final int LRO_INITIAL_POLL_DELAY_MILLIS = 3000; static final double LRO_POLL_DELAY_MULTIPLIER = 1.3; static final int LRO_MAX_POLL_DELAY_MILLIS = 60000; static final int LRO_TOTAL_POLL_TIMEOUT_MILLS = 600000; /** Returns the message type returned from a completed operation. */ public abstract TypeModel getReturnType(); /** Returns the message type for the metadata field of an operation. */ public abstract TypeModel getMetadataType(); /** Reports whether or not the service implements delete. */ public abstract boolean implementsDelete(); /** Reports whether or not the service implements cancel. */ public abstract boolean implementsCancel(); /** Returns initial delay after which first poll request will be made. */ public abstract Duration getInitialPollDelay(); /** * Returns multiplier used to gradually increase delay between subsequent polls until it reaches * maximum poll delay. */ public abstract double getPollDelayMultiplier(); /** Returns maximum time between two subsequent poll requests. */ public abstract Duration getMaxPollDelay(); /** Returns total polling timeout. */ public abstract Duration getTotalPollTimeout(); @Nullable static LongRunningConfig createLongRunningConfig( Method method, DiagCollector diagCollector, LongRunningConfigProto longRunningConfigProto, ProtoParser protoParser) { LongRunningConfig longRunningConfig = createLongRunningConfigFromProtoFile(method, diagCollector, protoParser); if (longRunningConfig != null) { return longRunningConfig; } if (!LongRunningConfigProto.getDefaultInstance().equals(longRunningConfigProto)) { return LongRunningConfig.createLongRunningConfigFromGapicConfig( method.getModel(), diagCollector, longRunningConfigProto); } return null; } /** Creates an instance of LongRunningConfig based on protofile annotations. */ @Nullable private static LongRunningConfig createLongRunningConfigFromProtoFile( Method method, DiagCollector diagCollector, ProtoParser protoParser) { boolean error = false; Model model = method.getModel(); OperationTypes operationTypes = protoParser.getLongRunningOperation(method); if (operationTypes == null || operationTypes.equals(operationTypes.getDefaultInstanceForType())) { return null; } String responseTypeName = operationTypes.getResponse(); String metadataTypeName = operationTypes.getMetadata(); TypeRef returnType = model.getSymbolTable().lookupType(responseTypeName); TypeRef metadataType = model.getSymbolTable().lookupType(metadataTypeName); if (returnType == null) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Type not found for long running config: '%s'", responseTypeName)); error = true; } else if (!returnType.isMessage()) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Type for long running config is not a message: '%s'", responseTypeName)); error = true; } if (metadataType == null) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Metadata type not found for long running config: '%s'", metadataTypeName)); error = true; } else if (!metadataType.isMessage()) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Metadata type for long running config is not a message: '%s'", metadataTypeName)); error = true; } Duration initialPollDelay = Duration.ofMillis(LRO_INITIAL_POLL_DELAY_MILLIS); Duration maxPollDelay = Duration.ofMillis(LRO_MAX_POLL_DELAY_MILLIS); Duration totalPollTimeout = Duration.ofMillis(LRO_TOTAL_POLL_TIMEOUT_MILLS); if (error) { return null; } else { return new AutoValue_LongRunningConfig( ProtoTypeRef.create(returnType), ProtoTypeRef.create(metadataType), LRO_IMPLEMENTS_CANCEL, LRO_IMPLEMENTS_DELETE, initialPollDelay, LRO_POLL_DELAY_MULTIPLIER, maxPollDelay, totalPollTimeout); } } /** Creates an instance of LongRunningConfig based on LongRunningConfigProto. */ @Nullable private static LongRunningConfig createLongRunningConfigFromGapicConfig( Model model, DiagCollector diagCollector, LongRunningConfigProto longRunningConfigProto) { boolean error = false; TypeRef returnType = model.getSymbolTable().lookupType(longRunningConfigProto.getReturnType()); TypeRef metadataType = model.getSymbolTable().lookupType(longRunningConfigProto.getMetadataType()); if (returnType == null) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Type not found for long running config: '%s'", longRunningConfigProto.getReturnType())); error = true; } else if (!returnType.isMessage()) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Type for long running config is not a message: '%s'", longRunningConfigProto.getReturnType())); error = true; } if (metadataType == null) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Metadata type not found for long running config: '%s'", longRunningConfigProto.getMetadataType())); error = true; } else if (!metadataType.isMessage()) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Metadata type for long running config is not a message: '%s'", longRunningConfigProto.getMetadataType())); error = true; } Duration initialPollDelay = Duration.ofMillis(longRunningConfigProto.getInitialPollDelayMillis()); if (initialPollDelay.compareTo(Duration.ZERO) < 0) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Initial poll delay must be provided and set to a positive number: '%s'", longRunningConfigProto.getInitialPollDelayMillis())); error = true; } double pollDelayMultiplier = longRunningConfigProto.getPollDelayMultiplier(); if (pollDelayMultiplier <= 1.0) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Poll delay multiplier must be provided and be greater or equal than 1.0: '%s'", longRunningConfigProto.getPollDelayMultiplier())); error = true; } Duration maxPollDelay = Duration.ofMillis(longRunningConfigProto.getMaxPollDelayMillis()); if (maxPollDelay.compareTo(initialPollDelay) < 0) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Max poll delay must be provided and set be equal or greater than initial poll delay: '%s'", longRunningConfigProto.getMaxPollDelayMillis())); error = true; } Duration totalPollTimeout = Duration.ofMillis(longRunningConfigProto.getTotalPollTimeoutMillis()); if (totalPollTimeout.compareTo(maxPollDelay) < 0) { diagCollector.addDiag( Diag.error( SimpleLocation.TOPLEVEL, "Total poll timeout must be provided and be be equal or greater than max poll delay: '%s'", longRunningConfigProto.getTotalPollTimeoutMillis())); error = true; } if (error) { return null; } else { return new AutoValue_LongRunningConfig( ProtoTypeRef.create(returnType), ProtoTypeRef.create(metadataType), longRunningConfigProto.getImplementsDelete(), longRunningConfigProto.getImplementsCancel(), initialPollDelay, pollDelayMultiplier, maxPollDelay, totalPollTimeout); } } }
1
27,305
we don't really need an extra error flag
googleapis-gapic-generator
java
@@ -21,11 +21,15 @@ import com.google.common.collect.ImmutableList; import java.io.File; import java.util.AbstractList; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; -import net.ltgt.gradle.errorprone.ErrorPronePlugin; +import net.ltgt.gradle.errorprone.javacplugin.CheckSeverity; +import net.ltgt.gradle.errorprone.javacplugin.ErrorProneJavacPluginPlugin; +import net.ltgt.gradle.errorprone.javacplugin.ErrorProneOptions; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.FileCollection; +import org.gradle.api.plugins.ExtensionAware; import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.tasks.compile.JavaCompile; import org.gradle.api.tasks.javadoc.Javadoc;
1
/* * (c) Copyright 2017 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.plugins; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import java.io.File; import java.util.AbstractList; import java.util.List; import java.util.stream.Collectors; import net.ltgt.gradle.errorprone.ErrorPronePlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.tasks.compile.JavaCompile; import org.gradle.api.tasks.javadoc.Javadoc; import org.gradle.api.tasks.testing.Test; public final class BaselineErrorProne implements Plugin<Project> { @Override public void apply(Project project) { project.getPluginManager().withPlugin("java", plugin -> { project.getPluginManager().apply(ErrorPronePlugin.class); project.getDependencies().add("errorprone", "com.palantir.baseline:baseline-error-prone:latest.release"); project.getTasks().withType(JavaCompile.class) .configureEach(compile -> compile.getOptions().getCompilerArgs() .addAll(ImmutableList.of( "-XepDisableWarningsInGeneratedCode", "-Xep:EqualsHashCode:ERROR", "-Xep:EqualsIncompatibleType:ERROR"))); // Add error-prone to bootstrap classpath of tasks performing java compilation. // Since there's no way of appending to the classpath we need to explicitly add current bootstrap classpath. JavaPluginConvention javaConvention = project.getConvention().getPlugin(JavaPluginConvention.class); if (!javaConvention.getSourceCompatibility().isJava9()) { List<File> bootstrapClasspath = Splitter.on(File.pathSeparator) .splitToList(System.getProperty("sun.boot.class.path")) .stream() .map(File::new) .collect(Collectors.toList()); FileCollection errorProneFiles = project.getConfigurations().getByName("errorprone") .plus(project.files(bootstrapClasspath)); project.getTasks().withType(JavaCompile.class) .configureEach(compile -> compile.getOptions().setBootstrapClasspath(errorProneFiles)); project.getTasks().withType(Test.class) .configureEach(test -> test.setBootstrapClasspath(errorProneFiles)); project.getTasks().withType(Javadoc.class) .configureEach(javadoc -> javadoc.getOptions() .setBootClasspath(new LazyConfigurationList(errorProneFiles))); } }); } private static final class LazyConfigurationList extends AbstractList<File> { private final FileCollection files; private List<File> fileList; private LazyConfigurationList(FileCollection files) { this.files = files; } @Override public File get(int index) { if (fileList == null) { fileList = ImmutableList.copyOf(files.getFiles()); } return fileList.get(index); } @Override public int size() { if (fileList == null) { fileList = ImmutableList.copyOf(files.getFiles()); } return fileList.size(); } } }
1
6,642
any chance you could just fix the processors plugin?
palantir-gradle-baseline
java
@@ -22,7 +22,8 @@ model = dict( basesize_ratio_range=(0.15, 0.9), anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), target_means=(.0, .0, .0, .0), - target_stds=(0.1, 0.1, 0.2, 0.2))) + target_stds=(0.1, 0.1, 0.2, 0.2), + use_depthwise=False)) cudnn_benchmark = True train_cfg = dict( assigner=dict(
1
# model settings input_size = 300 model = dict( type='SingleStageDetector', pretrained='open-mmlab://vgg16_caffe', backbone=dict( type='SSDVGG', input_size=input_size, depth=16, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), l2_norm_scale=20), neck=None, bbox_head=dict( type='SSDHead', input_size=input_size, in_channels=(512, 1024, 512, 256, 256, 256), num_classes=81, anchor_strides=(8, 16, 32, 64, 100, 300), basesize_ratio_range=(0.15, 0.9), anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), target_means=(.0, .0, .0, .0), target_stds=(0.1, 0.1, 0.2, 0.2))) cudnn_benchmark = True train_cfg = dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False) test_cfg = dict( nms=dict(type='nms', iou_thr=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200) # model training and testing settings # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=8, workers_per_gpu=3, train=dict( type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[16, 22]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 24 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ssd300_coco' load_from = None resume_from = None workflow = [('train', 1)]
1
18,594
We may keep this config unchanged since `use_depthwise=True` by default.
open-mmlab-mmdetection
py
@@ -566,6 +566,15 @@ privload_process_imports(privmod_t *mod) privload_locate_and_load(name, mod, false /*client dir=>true*/); if (impmod == NULL) return false; + if (strstr(name, "libpthread") == name) { + /* i#956: A private libpthread is not fully supported, but many + * libraries import some utilities from it and do not use + * threading. We load it and just do not guarantee things will + * work if thread-related routines are called. + */ + SYSLOG_INTERNAL_WARNING( + "private libpthread.so loaded but not fully supported"); + } /* i#852: identify all libs that import from DR as client libs. * XXX: this code seems stale as libdynamorio.so is already loaded * (xref #3850).
1
/* ******************************************************************************* * Copyright (c) 2011-2021 Google, Inc. All rights reserved. * Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved. * *******************************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * loader.c: custom private library loader for Linux * * original case: i#157 */ #include "../globals.h" #include "../module_shared.h" #include "os_private.h" #include "../ir/instr.h" /* SEG_GS/SEG_FS */ #include "module.h" #include "module_private.h" #include "../heap.h" /* HEAPACCT */ #ifdef LINUX # include "include/syscall.h" # include "memquery.h" # define _GNU_SOURCE 1 # define __USE_GNU 1 # include <link.h> /* struct dl_phdr_info, must be prior to dlfcn.h */ #else # include <sys/syscall.h> #endif #include "tls.h" #include <dlfcn.h> /* dlsym */ #ifdef LINUX # include <sys/prctl.h> /* PR_SET_NAME */ #endif #include <stdlib.h> /* getenv */ #include <dlfcn.h> /* dlopen/dlsym */ #include <unistd.h> /* __environ */ #include <stddef.h> /* offsetof */ extern size_t wcslen(const wchar_t *str); /* in string.c */ /* Written during initialization only */ /* FIXME: i#460, the path lookup itself is a complicated process, * so we just list possible common but in-complete paths for now. */ #define SYSTEM_LIBRARY_PATH_VAR "LD_LIBRARY_PATH" static char *ld_library_path = NULL; static const char *const system_lib_paths[] = { #ifdef X86 "/lib/tls/i686/cmov", #endif "/usr/lib", "/lib", "/usr/local/lib", /* Ubuntu: /etc/ld.so.conf.d/libc.conf */ #ifdef ANDROID "/system/lib", #endif #ifndef X64 "/usr/lib32", "/lib32", # ifdef X86 "/lib32/tls/i686/cmov", /* 32-bit Ubuntu */ "/lib/i386-linux-gnu", "/usr/lib/i386-linux-gnu", # elif defined(ARM) "/lib/arm-linux-gnueabihf", "/usr/lib/arm-linux-gnueabihf", "/lib/arm-linux-gnueabi", "/usr/lib/arm-linux-gnueabi", # endif #else /* 64-bit Ubuntu */ # ifdef X86 "/lib64/tls/i686/cmov", # endif "/usr/lib64", "/lib64", # ifdef X86 "/lib/x86_64-linux-gnu", /* /etc/ld.so.conf.d/x86_64-linux-gnu.conf */ "/usr/lib/x86_64-linux-gnu", /* /etc/ld.so.conf.d/x86_64-linux-gnu.conf */ # elif defined(AARCH64) "/lib/aarch64-linux-gnu", "/usr/lib/aarch64-linux-gnu", # endif #endif }; #define NUM_SYSTEM_LIB_PATHS (sizeof(system_lib_paths) / sizeof(system_lib_paths[0])) #define RPATH_ORIGIN "$ORIGIN" #define APP_BRK_GAP 64 * 1024 * 1024 static os_privmod_data_t *libdr_opd; #ifdef LINUX /* XXX i#1285: implement MacOS private loader */ static bool printed_gdb_commands = false; /* Global so visible in release build gdb */ static char gdb_priv_cmds[4096]; static size_t gdb_priv_cmds_sofar; #endif /* pointing to the I/O data structure in privately loaded libc, * They are used on exit when we need update file_no. */ stdfile_t **privmod_stdout; stdfile_t **privmod_stderr; stdfile_t **privmod_stdin; #define LIBC_STDOUT_NAME "stdout" #define LIBC_STDERR_NAME "stderr" #define LIBC_STDIN_NAME "stdin" #define LIBC_EARLY_INIT_NAME "__libc_early_init" /* We save the original sp from the kernel, for use by TLS setup on Android */ void *kernel_init_sp; /* forward decls */ static void privload_init_search_paths(void); static bool privload_locate(const char *name, privmod_t *dep, char *filename OUT, bool *client OUT); static privmod_t * privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable); static void privload_call_lib_func(fp_t func); static void privload_relocate_mod(privmod_t *mod); static void privload_create_os_privmod_data(privmod_t *privmod, bool dyn_reloc); static void privload_delete_os_privmod_data(privmod_t *privmod); #ifdef LINUX void privload_mod_tls_init(privmod_t *mod); void privload_mod_tls_primary_thread_init(privmod_t *mod); #endif /***************************************************************************/ /* Register a symbol file with gdb. This symbol needs to be exported so that * gdb can find it even when full debug information is unavailable. We do * *not* consider it part of DR's public API. * i#531: gdb support for private loader */ DYNAMORIO_EXPORT void dr_gdb_add_symbol_file(const char *filename, app_pc textaddr) { /* Do nothing. If gdb is attached with libdynamorio.so-gdb.py loaded, it * will stop here and lift the argument values. */ /* FIXME: This only passes the text section offset. gdb can accept * additional "-s<section> <address>" arguments to locate data sections. * This would be useful for setting watchpoints on client global variables. */ } #ifdef LINUX /* XXX i#1285: implement MacOS private loader */ static void privload_add_gdb_cmd(elf_loader_t *loader, const char *filename, bool reachable) { ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock); /* Get the text addr to register the ELF with gdb. The section headers * are not part of the mapped image, so we have to map the whole file. * XXX: seek to e_shoff and read the section headers to avoid this map. */ if (elf_loader_map_file(loader, reachable) != NULL) { app_pc text_addr = (app_pc)module_get_text_section(loader->file_map, loader->file_size); text_addr += loader->load_delta; print_to_buffer(gdb_priv_cmds, BUFFER_SIZE_ELEMENTS(gdb_priv_cmds), &gdb_priv_cmds_sofar, "add-symbol-file '%s' %p\n", filename, text_addr); /* Add debugging comment about how to get symbol information in gdb. */ if (printed_gdb_commands) { /* This is a dynamically loaded auxlib, so we print here. * The client and its direct dependencies are batched up and * printed in os_loader_init_epilogue. */ SYSLOG_INTERNAL_INFO("Paste into GDB to debug DynamoRIO clients:\n" "add-symbol-file '%s' %p\n", filename, text_addr); } LOG(GLOBAL, LOG_LOADER, 1, "for debugger: add-symbol-file %s %p\n", filename, text_addr); if (INTERNAL_OPTION(privload_register_gdb)) { dr_gdb_add_symbol_file(filename, text_addr); } } } #endif /* os specific loader initialization prologue before finalizing the load. */ void os_loader_init_prologue(void) { #ifndef STATIC_LIBRARY privmod_t *mod; #endif ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock); privload_init_search_paths(); #ifndef STATIC_LIBRARY /* insert libdynamorio.so */ mod = privload_insert(NULL, get_dynamorio_dll_start(), get_dynamorio_dll_end() - get_dynamorio_dll_start(), get_shared_lib_name(get_dynamorio_dll_start()), get_dynamorio_library_path()); ASSERT(mod != NULL); /* If DR was loaded by system ld.so, then .dynamic *was* relocated (i#1589) */ privload_create_os_privmod_data(mod, !DYNAMO_OPTION(early_inject)); libdr_opd = (os_privmod_data_t *)mod->os_privmod_data; DODEBUG({ if (DYNAMO_OPTION(early_inject)) { /* We've already filled the gap in dynamorio_lib_gap_empty(). We just * verify here now that we have segment info. */ int i; for (i = 0; i < libdr_opd->os_data.num_segments - 1; i++) { size_t sz = libdr_opd->os_data.segments[i + 1].start - libdr_opd->os_data.segments[i].end; if (sz > 0) { dr_mem_info_t info; bool ok = query_memory_ex_from_os(libdr_opd->os_data.segments[i].end, &info); ASSERT(ok); ASSERT(info.base_pc == libdr_opd->os_data.segments[i].end && info.size == sz && (info.type == DR_MEMTYPE_FREE || /* If we reloaded DR, our own loader filled it in. */ info.prot == DR_MEMPROT_NONE)); } } } }); mod->externally_loaded = true; # ifdef LINUX /*i#1285*/ if (DYNAMO_OPTION(early_inject)) { /* libdynamorio isn't visible to gdb so add to the cmd list */ byte *dr_base = get_dynamorio_dll_start(), *pref_base; elf_loader_t dr_ld; IF_DEBUG(bool success =) elf_loader_read_headers(&dr_ld, get_dynamorio_library_path()); ASSERT(success); module_walk_program_headers(dr_base, get_dynamorio_dll_end() - dr_base, false, false, (byte **)&pref_base, NULL, NULL, NULL, NULL); dr_ld.load_delta = dr_base - pref_base; privload_add_gdb_cmd(&dr_ld, get_dynamorio_library_path(), false /*!reach*/); elf_loader_destroy(&dr_ld); } # endif #endif } /* os specific loader initialization epilogue after finalizing the load. */ void os_loader_init_epilogue(void) { #ifdef LINUX /* XXX i#1285: implement MacOS private loader */ /* Print the add-symbol-file commands so they can be copy-pasted into gdb. * We have to do it in a single syslog so they can be copy pasted. * For non-internal builds, or for private libs loaded after this point, * the user must look at the global gdb_priv_cmds buffer in gdb. * FIXME i#531: Support attaching from the gdb script. */ ASSERT(!printed_gdb_commands); printed_gdb_commands = true; if (gdb_priv_cmds_sofar > 0) { SYSLOG_INTERNAL_INFO("Paste into GDB to debug DynamoRIO clients:\n" /* Need to turn off confirm for paste to work. */ "set confirm off\n" "%s", gdb_priv_cmds); } #endif } void os_loader_exit(void) { if (libdr_opd != NULL) { HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, libdr_opd->os_data.segments, module_segment_t, libdr_opd->os_data.alloc_segments, ACCT_OTHER, PROTECTED); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, libdr_opd, os_privmod_data_t, ACCT_OTHER, PROTECTED); } #ifdef LINUX /* Put printed_gdb_commands into its original state for potential * re-attaching and os_loader_init_epilogue(). */ printed_gdb_commands = false; #endif } /* These are called before loader_init for the primary thread for UNIX. */ void os_loader_thread_init_prologue(dcontext_t *dcontext) { /* do nothing */ } void os_loader_thread_init_epilogue(dcontext_t *dcontext) { /* do nothing */ } void os_loader_thread_exit(dcontext_t *dcontext) { /* do nothing */ } void privload_add_areas(privmod_t *privmod) { os_privmod_data_t *opd; uint i; /* create and init the os_privmod_data for privmod. * The os_privmod_data can only be created after heap is ready and * should be done before adding in vmvector_add, * so it can be either right before calling to privload_add_areas * in the privload_load_finalize, or in here. * We prefer here because it avoids changing the code in * loader_shared.c, which affects windows too. */ privload_create_os_privmod_data(privmod, false /* i#1589: .dynamic not relocated */); opd = (os_privmod_data_t *)privmod->os_privmod_data; for (i = 0; i < opd->os_data.num_segments; i++) { vmvector_add(modlist_areas, opd->os_data.segments[i].start, opd->os_data.segments[i].end, (void *)privmod); } } void privload_remove_areas(privmod_t *privmod) { uint i; os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data; /* walk the program header to remove areas */ for (i = 0; i < opd->os_data.num_segments; i++) { vmvector_remove(modlist_areas, opd->os_data.segments[i].start, opd->os_data.segments[i].end); } /* NOTE: we create os_privmod_data in privload_add_areas but * do not delete here, non-symmetry. * This is because we still need the information in os_privmod_data * to unmap the segments in privload_unmap_file, which happens after * privload_remove_areas. * The create of os_privmod_data should be done when mapping the file * into memory, but the heap is not ready at that time, so postponed * until privload_add_areas. */ } void privload_unmap_file(privmod_t *privmod) { /* walk the program header to unmap files, also the tls data */ uint i; os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data; /* unmap segments */ IF_DEBUG(size_t size_unmapped = 0); for (i = 0; i < opd->os_data.num_segments; i++) { d_r_unmap_file(opd->os_data.segments[i].start, opd->os_data.segments[i].end - opd->os_data.segments[i].start); DODEBUG({ size_unmapped += opd->os_data.segments[i].end - opd->os_data.segments[i].start; }); if (i + 1 < opd->os_data.num_segments && opd->os_data.segments[i + 1].start > opd->os_data.segments[i].end) { /* unmap the gap */ d_r_unmap_file(opd->os_data.segments[i].end, opd->os_data.segments[i + 1].start - opd->os_data.segments[i].end); DODEBUG({ size_unmapped += opd->os_data.segments[i + 1].start - opd->os_data.segments[i].end; }); } } ASSERT(size_unmapped == privmod->size); /* XXX i#3570: Better to store the MODLOAD_SEPARATE_BSS flag but there's no * simple code path to do it so we check the option. */ if (INTERNAL_OPTION(separate_private_bss)) { /* unmap the extra .bss-separating page */ d_r_unmap_file(privmod->base + privmod->size, PAGE_SIZE); DODEBUG({ size_unmapped += PAGE_SIZE; }); } /* free segments */ HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, opd->os_data.segments, module_segment_t, opd->os_data.alloc_segments, ACCT_OTHER, PROTECTED); /* delete os_privmod_data */ privload_delete_os_privmod_data(privmod); } bool privload_unload_imports(privmod_t *privmod) { /* FIXME: i#474 unload dependent libraries if necessary */ return true; } #ifdef LINUX /* Core-specific functionality for elf_loader_map_phdrs(). */ static modload_flags_t privload_map_flags(modload_flags_t init_flags) { /* XXX: Keep this condition matching the check in privload_unmap_file() * (minus MODLOAD_NOT_PRIVLIB since non-privlibs don't reach our unmap). */ if (INTERNAL_OPTION(separate_private_bss) && !TEST(MODLOAD_NOT_PRIVLIB, init_flags)) { /* place an extra no-access page after .bss */ /* XXX: update privload_early_inject call to init_emulated_brk if this changes */ /* XXX: should we avoid this for -early_inject's map of the app and ld.so? */ return init_flags | MODLOAD_SEPARATE_BSS; } return init_flags; } /* Core-specific functionality for elf_loader_map_phdrs(). */ static void privload_check_new_map_bounds(elf_loader_t *elf, byte *map_base, byte *map_end) { /* This is only called for MAP_FIXED. */ if (get_dynamorio_dll_start() < map_end && get_dynamorio_dll_end() > map_base) { FATAL_USAGE_ERROR(FIXED_MAP_OVERLAPS_DR, 3, get_application_name(), get_application_pid(), elf->filename); ASSERT_NOT_REACHED(); } } #endif /* This only maps, as relocation for ELF requires processing imports first, * which we have to delay at init time at least. */ app_pc privload_map_and_relocate(const char *filename, size_t *size OUT, modload_flags_t flags) { #ifdef LINUX map_fn_t map_func; unmap_fn_t unmap_func; prot_fn_t prot_func; app_pc base = NULL; elf_loader_t loader; ASSERT_OWN_RECURSIVE_LOCK(!TEST(MODLOAD_NOT_PRIVLIB, flags), &privload_lock); /* get appropriate function */ /* NOTE: all but the client lib will be added to DR areas list b/c using * d_r_map_file() */ if (dynamo_heap_initialized && !standalone_library) { map_func = d_r_map_file; unmap_func = d_r_unmap_file; prot_func = set_protection; } else { map_func = os_map_file; unmap_func = os_unmap_file; prot_func = os_set_protection; } if (!elf_loader_read_headers(&loader, filename)) { /* We may want to move the bitwidth check out if is_elf_so_header_common() * but for now we keep that there and do another check here. * If loader.buf was not read into it will be all zeroes. */ ELF_HEADER_TYPE *elf_header = (ELF_HEADER_TYPE *)loader.buf; ELF_ALTARCH_HEADER_TYPE *altarch = (ELF_ALTARCH_HEADER_TYPE *)elf_header; if (!TEST(MODLOAD_NOT_PRIVLIB, flags) && elf_header->e_version == 1 && altarch->e_ehsize == sizeof(ELF_ALTARCH_HEADER_TYPE) && altarch->e_machine == IF_X64_ELSE(IF_AARCHXX_ELSE(EM_ARM, EM_386), IF_AARCHXX_ELSE(EM_AARCH64, EM_X86_64))) { /* XXX i#147: Should we try some path substs like s/lib32/lib64/? * Maybe it's better to error out to avoid loading some unintended lib. */ SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_WRONG_BITWIDTH, 3, get_application_name(), get_application_pid(), filename); } return NULL; } base = elf_loader_map_phdrs(&loader, false /* fixed */, map_func, unmap_func, prot_func, privload_check_new_map_bounds, privload_map_flags(flags)); if (base != NULL) { if (size != NULL) *size = loader.image_size; if (!TEST(MODLOAD_NOT_PRIVLIB, flags)) privload_add_gdb_cmd(&loader, filename, TEST(MODLOAD_REACHABLE, flags)); } elf_loader_destroy(&loader); return base; #else /* XXX i#1285: implement MacOS private loader */ return NULL; #endif } bool privload_process_imports(privmod_t *mod) { #ifdef LINUX ELF_DYNAMIC_ENTRY_TYPE *dyn; os_privmod_data_t *opd; char *strtab, *name; opd = (os_privmod_data_t *)mod->os_privmod_data; ASSERT(opd != NULL); /* 1. get DYNAMIC section pointer */ dyn = (ELF_DYNAMIC_ENTRY_TYPE *)opd->dyn; /* 2. get dynamic string table */ strtab = (char *)opd->os_data.dynstr; /* 3. depth-first recursive load, so add into the deps list first */ while (dyn->d_tag != DT_NULL) { if (dyn->d_tag == DT_NEEDED) { name = strtab + dyn->d_un.d_val; LOG(GLOBAL, LOG_LOADER, 2, "%s: %s imports from %s\n", __FUNCTION__, mod->name, name); if (privload_lookup(name) == NULL) { privmod_t *impmod = privload_locate_and_load(name, mod, false /*client dir=>true*/); if (impmod == NULL) return false; /* i#852: identify all libs that import from DR as client libs. * XXX: this code seems stale as libdynamorio.so is already loaded * (xref #3850). */ if (impmod->base == get_dynamorio_dll_start()) mod->is_client = true; } } ++dyn; } /* Relocate library's symbols after load dependent libraries (so that we * can resolve symbols in the global ELF namespace). */ if (!mod->externally_loaded) { privload_relocate_mod(mod); } return true; #else /* XXX i#1285: implement MacOS private loader */ if (!mod->externally_loaded) { privload_relocate_mod(mod); } return false; #endif } bool privload_call_entry(dcontext_t *dcontext, privmod_t *privmod, uint reason) { os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data; ASSERT(os_get_priv_tls_base(NULL, TLS_REG_LIB) != NULL); if (reason == DLL_PROCESS_INIT) { /* calls init and init array */ LOG(GLOBAL, LOG_LOADER, 3, "%s: calling init routines of %s\n", __FUNCTION__, privmod->name); if (opd->init != NULL) { LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s init func " PFX "\n", __FUNCTION__, privmod->name, opd->init); privload_call_lib_func(opd->init); } if (opd->init_array != NULL) { uint i; for (i = 0; i < opd->init_arraysz / sizeof(opd->init_array[i]); i++) { if (opd->init_array[i] != NULL) { /* be paranoid */ LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s init array func " PFX "\n", __FUNCTION__, privmod->name, opd->init_array[i]); privload_call_lib_func(opd->init_array[i]); } } } return true; } else if (reason == DLL_PROCESS_EXIT) { /* calls fini and fini array */ #ifdef ANDROID /* i#1701: libdl.so fini routines call into libc somehow, which is * often already unmapped. We just skip them as a workaround. */ if (strcmp(privmod->name, "libdl.so") == 0) { LOG(GLOBAL, LOG_LOADER, 3, "%s: NOT calling fini routines of %s\n", __FUNCTION__, privmod->name); return true; } #endif LOG(GLOBAL, LOG_LOADER, 3, "%s: calling fini routines of %s\n", __FUNCTION__, privmod->name); if (opd->fini != NULL) { LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s fini func " PFX "\n", __FUNCTION__, privmod->name, opd->fini); privload_call_lib_func(opd->fini); } if (opd->fini_array != NULL) { uint i; for (i = 0; i < opd->fini_arraysz / sizeof(opd->fini_array[0]); i++) { if (opd->fini_array[i] != NULL) { /* be paranoid */ LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s fini array func " PFX "\n", __FUNCTION__, privmod->name, opd->fini_array[i]); privload_call_lib_func(opd->fini_array[i]); } } } return true; } return false; } void privload_redirect_setup(privmod_t *privmod) { /* do nothing, the redirection is done when relocating */ } void privload_os_finalize(privmod_t *privmod) { #ifndef LINUX return; /* Nothing to do. */ #else if (strstr(privmod->name, "libc.so") != privmod->name) return; os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data; /* Special handling for standard I/O file descriptors. */ privmod_stdout = (FILE **)get_proc_address_from_os_data( &opd->os_data, opd->load_delta, LIBC_STDOUT_NAME, NULL); privmod_stdin = (FILE **)get_proc_address_from_os_data(&opd->os_data, opd->load_delta, LIBC_STDIN_NAME, NULL); privmod_stderr = (FILE **)get_proc_address_from_os_data( &opd->os_data, opd->load_delta, LIBC_STDERR_NAME, NULL); /* i#5133: glibc 2.32+ has ld.so call a hardcoded initializer before calling the * regular ELF constructors. */ void (*libc_early_init)(bool) = (void (*)(bool))get_proc_address_from_os_data( &opd->os_data, opd->load_delta, LIBC_EARLY_INIT_NAME, NULL); if (libc_early_init != NULL) { LOG(GLOBAL, LOG_LOADER, 2, "%s: calling %s\n", __FUNCTION__, LIBC_EARLY_INIT_NAME); (*libc_early_init)(true); } #endif } static void privload_init_search_paths(void) { privload_add_drext_path(); ld_library_path = getenv(SYSTEM_LIBRARY_PATH_VAR); } static privmod_t * privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable) { char filename[MAXIMUM_PATH]; if (privload_locate(impname, dependent, filename, &reachable)) return privload_load(filename, dependent, reachable); return NULL; } app_pc privload_load_private_library(const char *name, bool reachable) { privmod_t *newmod; app_pc res = NULL; acquire_recursive_lock(&privload_lock); newmod = privload_lookup(name); if (newmod == NULL) newmod = privload_locate_and_load(name, NULL, reachable); else newmod->ref_count++; if (newmod != NULL) res = newmod->base; release_recursive_lock(&privload_lock); return res; } void privload_load_finalized(privmod_t *mod) { /* nothing further to do */ } /* If runpath, then DT_RUNPATH is searched; else, DT_RPATH. */ static bool privload_search_rpath(privmod_t *mod, bool runpath, const char *name, char *filename OUT /* buffer size is MAXIMUM_PATH */) { #ifdef LINUX os_privmod_data_t *opd; ELF_DYNAMIC_ENTRY_TYPE *dyn; ASSERT(mod != NULL && "can't look for rpath without a dependent module"); ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock); /* get the loading module's dir for RPATH_ORIGIN */ opd = (os_privmod_data_t *)mod->os_privmod_data; /* i#460: if DT_RUNPATH exists we must ignore ignore DT_RPATH and * search DT_RUNPATH after LD_LIBRARY_PATH. */ if (!runpath && opd->os_data.has_runpath) return false; const char *moddir_end = strrchr(mod->path, '/'); size_t moddir_len = (moddir_end == NULL ? strlen(mod->path) : moddir_end - mod->path); const char *strtab; ASSERT(opd != NULL); dyn = (ELF_DYNAMIC_ENTRY_TYPE *)opd->dyn; strtab = (char *)opd->os_data.dynstr; bool lib_found = false; /* support $ORIGIN expansion to lib's current directory */ while (dyn->d_tag != DT_NULL) { if (dyn->d_tag == (runpath ? DT_RUNPATH : DT_RPATH)) { /* DT_RPATH and DT_RUNPATH are each a colon-separated list of paths */ const char *list = strtab + dyn->d_un.d_val; const char *sep, *origin; size_t len; while (*list != '\0') { /* really we want strchrnul() */ sep = strchr(list, ':'); if (sep == NULL) len = strlen(list); else len = sep - list; /* support $ORIGIN expansion to lib's current directory */ origin = strstr(list, RPATH_ORIGIN); char path[MAXIMUM_PATH]; if (origin != NULL && origin < list + len) { size_t pre_len = origin - list; snprintf(path, BUFFER_SIZE_ELEMENTS(path), "%.*s%.*s%.*s", pre_len, list, moddir_len, mod->path, /* the '/' should already be here */ len - strlen(RPATH_ORIGIN) - pre_len, origin + strlen(RPATH_ORIGIN)); NULL_TERMINATE_BUFFER(path); } else { snprintf(path, BUFFER_SIZE_ELEMENTS(path), "%.*s", len, list); NULL_TERMINATE_BUFFER(path); } if (mod->is_client) { /* We are adding a client's lib rpath to the general search path. This * is not bullet proof compliant with what the loader should really * do. The real problem is that the loader is walking library * dependencies depth-first, while it should really search * breadth-first (xref i#3850). This can lead to libraries being * unlocatable, if the original client library had the proper rpath of * the library, but a dependency later in the chain did not. In order * to avoid this, we consider adding the rpath here relatively safe. * It only affects dependent libraries of the same name in different * locations. We are only doing this for client libraries, so we are * not at risk to search for the wrong system libraries. */ if (!privload_search_path_exists(path, strlen(path))) { snprintf(search_paths[search_paths_idx], BUFFER_SIZE_ELEMENTS(search_paths[search_paths_idx]), "%.*s", strlen(path), path); NULL_TERMINATE_BUFFER(search_paths[search_paths_idx]); LOG(GLOBAL, LOG_LOADER, 1, "%s: added search dir \"%s\"\n", __FUNCTION__, search_paths[search_paths_idx]); search_paths_idx++; } } if (!lib_found) { snprintf(filename, MAXIMUM_PATH, "%s/%s", path, name); filename[MAXIMUM_PATH - 1] = 0; LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) { lib_found = true; } } list += len; if (sep != NULL) list += 1; } } ++dyn; } return lib_found; #else /* XXX i#1285: implement MacOS private loader */ #endif return false; } static bool privload_locate(const char *name, privmod_t *dep, char *filename OUT /* buffer size is MAXIMUM_PATH */, bool *reachable INOUT) { uint i; char *lib_paths; /* We may be passed a full path. */ if (name[0] == '/' && os_file_exists(name, false /*!is_dir*/)) { snprintf(filename, MAXIMUM_PATH, "%s", name); filename[MAXIMUM_PATH - 1] = 0; return true; } /* FIXME: We have a simple implementation of library search. * libc implementation can be found at elf/dl-load.c:_dl_map_object. */ /* the loader search order: */ /* 0) DT_RPATH */ if (dep != NULL && privload_search_rpath(dep, false /*rpath*/, name, filename)) return true; /* 1) client lib dir */ for (i = 0; i < search_paths_idx; i++) { snprintf(filename, MAXIMUM_PATH, "%s/%s", search_paths[i], name); /* NULL_TERMINATE_BUFFER(filename) */ filename[MAXIMUM_PATH - 1] = 0; LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) { /* If in client or extension dir, always map it reachable */ *reachable = true; return true; } } /* 2) curpath */ snprintf(filename, MAXIMUM_PATH, "./%s", name); /* NULL_TERMINATE_BUFFER(filename) */ filename[MAXIMUM_PATH - 1] = 0; LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) return true; /* 3) LD_LIBRARY_PATH */ lib_paths = ld_library_path; while (lib_paths != NULL) { char *end = strstr(lib_paths, ":"); if (end != NULL) *end = '\0'; snprintf(filename, MAXIMUM_PATH, "%s/%s", lib_paths, name); if (end != NULL) { *end = ':'; end++; } /* NULL_TERMINATE_BUFFER(filename) */ filename[MAXIMUM_PATH - 1] = 0; LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) return true; lib_paths = end; } /* 4) DT_RUNPATH */ if (dep != NULL && privload_search_rpath(dep, true /*runpath*/, name, filename)) return true; /* 5) XXX i#460: We use our system paths instead of /etc/ld.so.cache. */ for (i = 0; i < NUM_SYSTEM_LIB_PATHS; i++) { /* First try -xarch_root for emulation. */ if (!IS_STRING_OPTION_EMPTY(xarch_root)) { string_option_read_lock(); snprintf(filename, MAXIMUM_PATH, "%s/%s/%s", DYNAMO_OPTION(xarch_root), system_lib_paths[i], name); filename[MAXIMUM_PATH - 1] = '\0'; string_option_read_unlock(); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) return true; } snprintf(filename, MAXIMUM_PATH, "%s/%s", system_lib_paths[i], name); /* NULL_TERMINATE_BUFFER(filename) */ filename[MAXIMUM_PATH - 1] = 0; LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename); if (os_file_exists(filename, false /*!is_dir*/) && module_file_has_module_header(filename)) return true; } /* Cannot find the library */ /* There's a syslog in loader_init() but we want to provide the lib name */ SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4, get_application_name(), get_application_pid(), name, "\n\tUnable to locate library! Try adding path to LD_LIBRARY_PATH"); return false; } #pragma weak dlsym app_pc get_private_library_address(app_pc modbase, const char *name) { #ifdef LINUX privmod_t *mod; app_pc res; acquire_recursive_lock(&privload_lock); mod = privload_lookup_by_base(modbase); if (mod == NULL || mod->externally_loaded) { release_recursive_lock(&privload_lock); # ifdef STATIC_LIBRARY /* externally loaded, use dlsym instead */ ASSERT(!DYNAMO_OPTION(early_inject)); return dlsym(modbase, name); # else /* Only libdynamorio.so is externally_loaded and we should not be querying * for it. Unknown libs shouldn't be queried here: get_proc_address should * be used instead. */ ASSERT_NOT_REACHED(); return NULL; # endif } /* Before the heap is initialized, we store the text address in opd, so we * can't check if opd != NULL to know whether it's valid. */ if (dynamo_heap_initialized) { /* opd is initialized */ os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data; res = get_proc_address_from_os_data(&opd->os_data, opd->load_delta, name, NULL); release_recursive_lock(&privload_lock); return res; } else { /* opd is not initialized */ /* get_private_library_address is first called on looking up * USES_DR_VERSION_NAME right after loading client_lib, * The os_privmod_data is not setup yet then because the heap * is not initialized, so it is possible opd to be NULL. * For this case, we have to compute the temporary os_data instead. */ ptr_int_t delta; char *soname; os_module_data_t os_data; memset(&os_data, 0, sizeof(os_data)); if (!module_read_os_data(mod->base, false /* .dynamic not relocated (i#1589) */, &delta, &os_data, &soname)) { release_recursive_lock(&privload_lock); return NULL; } res = get_proc_address_from_os_data(&os_data, delta, name, NULL); release_recursive_lock(&privload_lock); return res; } ASSERT_NOT_REACHED(); #else /* XXX i#1285: implement MacOS private loader */ #endif return NULL; } static void privload_call_lib_func(fp_t func) { char dummy_str[] = "dummy"; char *dummy_argv[2]; /* FIXME: i#475 * The regular loader always passes argc, argv and env to libaries, * (see libc code elf/dl-init.c), which might be ignored by those * routines. * we create dummy argc and argv, and passed with the real __environ. */ dummy_argv[0] = dummy_str; dummy_argv[1] = NULL; func(1, dummy_argv, our_environ); } bool get_private_library_bounds(IN app_pc modbase, OUT byte **start, OUT byte **end) { privmod_t *mod; bool found = false; ASSERT(start != NULL && end != NULL); acquire_recursive_lock(&privload_lock); mod = privload_lookup_by_base(modbase); if (mod != NULL) { *start = mod->base; *end = mod->base + mod->size; found = true; } release_recursive_lock(&privload_lock); return found; } #ifdef LINUX # if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* If we fail to relocate dynamorio, print the error msg and abort. */ static void privload_report_relocate_error() { /* The problem is that we can't call any normal routines here, or * even reference global vars like string literals. We thus use * a char array: */ const char aslr_msg[] = { 'E', 'R', 'R', 'O', 'R', ':', ' ', 'f', 'a', 'i', 'l', 'e', 'd', ' ', 't', 'o', ' ', 'r', 'e', 'l', 'o', 'c', 'a', 't', 'e', ' ', 'D', 'y', 'n', 'a', 'm', 'o', 'R', 'I', 'O', '!', '\n', 'P', 'l', 'e', 'a', 's', 'e', ' ', 'f', 'i', 'l', 'e', ' ', 'a', 'n', ' ', 'i', 's', 's', 'u', 'e', ' ', 'a', 't', ' ', 'h', 't', 't', 'p', ':', '/', '/', 'd', 'y', 'n', 'a', 'm', 'o', 'r', 'i', 'o', '.', 'o', 'r', 'g', '/', 'i', 's', 's', 'u', 'e', 's', '.', '\n' }; # define STDERR_FD 2 os_write(STDERR_FD, aslr_msg, sizeof(aslr_msg)); dynamorio_syscall(SYS_exit_group, 1, -1); } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is duplicated from module_relocate_symbol and simplified * for only relocating dynamorio symbols. */ static void privload_relocate_symbol(ELF_REL_TYPE *rel, os_privmod_data_t *opd, bool is_rela) { ELF_ADDR *r_addr; uint r_type; reg_t addend; /* XXX: we assume ELF_REL_TYPE and ELF_RELA_TYPE only differ at the end, * i.e. with or without r_addend. */ if (is_rela) addend = ((ELF_RELA_TYPE *)rel)->r_addend; else addend = 0; /* assume everything is read/writable */ r_addr = (ELF_ADDR *)(rel->r_offset + opd->load_delta); r_type = (uint)ELF_R_TYPE(rel->r_info); /* handle the most common case, i.e. ELF_R_RELATIVE */ if (r_type == ELF_R_RELATIVE) { if (is_rela) *r_addr = addend + opd->load_delta; else *r_addr += opd->load_delta; return; } else if (r_type == ELF_R_NONE) return; /* XXX i#1708: support more relocation types in bootstrap stage */ privload_report_relocate_error(); } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is duplicated from module_relocate_rel for relocating dynamorio. */ static void privload_relocate_rel(os_privmod_data_t *opd, ELF_REL_TYPE *start, ELF_REL_TYPE *end) { ELF_REL_TYPE *rel; for (rel = start; rel < end; rel++) privload_relocate_symbol(rel, opd, false); } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is duplicated from module_relocate_rela for relocating dynamorio. */ static void privload_relocate_rela(os_privmod_data_t *opd, ELF_RELA_TYPE *start, ELF_RELA_TYPE *end) { ELF_RELA_TYPE *rela; for (rela = start; rela < end; rela++) privload_relocate_symbol((ELF_REL_TYPE *)rela, opd, true); } /* XXX: This routine may be called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is duplicated from privload_relocate_os_privmod_data */ static void privload_early_relocate_os_privmod_data(os_privmod_data_t *opd, byte *mod_base) { if (opd->rel != NULL) privload_relocate_rel(opd, opd->rel, opd->rel + opd->relsz / opd->relent); if (opd->rela != NULL) privload_relocate_rela(opd, opd->rela, opd->rela + opd->relasz / opd->relaent); if (opd->jmprel != NULL) { if (opd->pltrel == DT_REL) { privload_relocate_rel(opd, (ELF_REL_TYPE *)opd->jmprel, (ELF_REL_TYPE *)(opd->jmprel + opd->pltrelsz)); } else if (opd->pltrel == DT_RELA) { privload_relocate_rela(opd, (ELF_RELA_TYPE *)opd->jmprel, (ELF_RELA_TYPE *)(opd->jmprel + opd->pltrelsz)); } else { privload_report_relocate_error(); } } } # endif /* !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) */ /* This routine is duplicated at privload_early_relocate_os_privmod_data. */ static void privload_relocate_os_privmod_data(os_privmod_data_t *opd, byte *mod_base) { if (opd->rel != NULL) { module_relocate_rel(mod_base, opd, opd->rel, opd->rel + opd->relsz / opd->relent); } if (opd->rela != NULL) { module_relocate_rela(mod_base, opd, opd->rela, opd->rela + opd->relasz / opd->relaent); } if (opd->jmprel != NULL) { app_pc jmprel_start = opd->jmprel; app_pc jmprel_end = opd->jmprel + opd->pltrelsz; /* i#5080: Some libs list JMPREL as overlapping with REL{,A} and it's implied * that really JMPREL comes after. */ if (opd->rel != NULL && jmprel_start >= (app_pc)opd->rel && jmprel_start < (app_pc)(opd->rel + opd->relsz / opd->relent)) jmprel_start = (app_pc)(opd->rel + opd->relsz / opd->relent); if (opd->rela != NULL && jmprel_start >= (app_pc)opd->rela && jmprel_start < (app_pc)(opd->rela + opd->relasz / opd->relaent)) jmprel_start = (app_pc)(opd->rela + opd->relasz / opd->relaent); if (opd->pltrel == DT_REL) { module_relocate_rel(mod_base, opd, (ELF_REL_TYPE *)jmprel_start, (ELF_REL_TYPE *)jmprel_end); } else if (opd->pltrel == DT_RELA) { module_relocate_rela(mod_base, opd, (ELF_RELA_TYPE *)jmprel_start, (ELF_RELA_TYPE *)jmprel_end); } else { ASSERT(false); } } } #endif /* LINUX */ static void privload_relocate_mod(privmod_t *mod) { #ifdef LINUX os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data; ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock); LOG(GLOBAL, LOG_LOADER, 3, "relocating %s\n", mod->name); /* If module has tls block need update its tls offset value. * This must be done *before* relocating as relocating needs the os_privmod_data_t * TLS fields set here. */ if (opd->tls_block_size != 0) privload_mod_tls_init(mod); privload_relocate_os_privmod_data(opd, mod->base); /* For the primary thread, we now perform TLS block copying, after relocating. * For subsequent threads this is done in privload_tls_init(). */ if (opd->tls_block_size != 0) privload_mod_tls_primary_thread_init(mod); #else /* XXX i#1285: implement MacOS private loader */ #endif } static void privload_create_os_privmod_data(privmod_t *privmod, bool dyn_reloc) { os_privmod_data_t *opd; opd = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, os_privmod_data_t, ACCT_OTHER, PROTECTED); privmod->os_privmod_data = opd; memset(opd, 0, sizeof(*opd)); /* walk the module's program header to get privmod information */ module_walk_program_headers(privmod->base, privmod->size, false, /* segments are remapped */ dyn_reloc, &opd->os_data.base_address, NULL, &opd->max_end, &opd->soname, &opd->os_data); module_get_os_privmod_data(privmod->base, privmod->size, false /*!relocated*/, opd); } static void privload_delete_os_privmod_data(privmod_t *privmod) { HEAP_TYPE_FREE(GLOBAL_DCONTEXT, privmod->os_privmod_data, os_privmod_data_t, ACCT_OTHER, PROTECTED); privmod->os_privmod_data = NULL; } /* i#1589: the client lib is already on the priv lib list, so we share its * data with loaded_module_areas (which also avoids problems with .dynamic * not being relocated for priv libs). */ bool privload_fill_os_module_info(app_pc base, OUT app_pc *out_base /* relative pc */, OUT app_pc *out_max_end /* relative pc */, OUT char **out_soname, OUT os_module_data_t *out_data) { bool res = false; privmod_t *privmod; acquire_recursive_lock(&privload_lock); privmod = privload_lookup_by_base(base); if (privmod != NULL) { os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data; if (out_base != NULL) *out_base = opd->os_data.base_address; if (out_max_end != NULL) *out_max_end = opd->max_end; if (out_soname != NULL) *out_soname = opd->soname; if (out_data != NULL) module_copy_os_data(out_data, &opd->os_data); res = true; } release_recursive_lock(&privload_lock); return res; } /**************************************************************************** * Function Redirection */ #if defined(LINUX) && !defined(ANDROID) /* These are not yet supported by Android's Bionic */ void * redirect___tls_get_addr(); void * redirect____tls_get_addr(); #endif #ifdef LINUX static int redirect_dl_iterate_phdr(int (*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data) { int res = 0; struct dl_phdr_info info; privmod_t *mod; acquire_recursive_lock(&privload_lock); for (mod = privload_first_module(); mod != NULL; mod = privload_next_module(mod)) { ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)mod->base; os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data; /* We do want to include externally loaded (if any) and clients as * clients can contain C++ exception code, which will call here. */ if (mod->base == get_dynamorio_dll_start()) continue; info.dlpi_addr = opd->load_delta; info.dlpi_name = mod->name; info.dlpi_phdr = (ELF_PROGRAM_HEADER_TYPE *)(mod->base + elf_hdr->e_phoff); info.dlpi_phnum = elf_hdr->e_phnum; res = callback(&info, sizeof(info), data); if (res != 0) break; } release_recursive_lock(&privload_lock); return res; } # if defined(ARM) && !defined(ANDROID) typedef struct _unwind_callback_data_t { void *pc; void *base; int size; } unwind_callback_data_t; /* Find the exception unwind table (exidx) of the image that contains the * exception pc. */ int exidx_lookup_callback(struct dl_phdr_info *info, size_t size, void *data) { int i; int res = 0; unwind_callback_data_t *ucd; if (data == NULL || size != sizeof(*info)) return res; ucd = (unwind_callback_data_t *)data; for (i = 0; i < info->dlpi_phnum; i++) { /* look for the table */ if (info->dlpi_phdr[i].p_type == PT_ARM_EXIDX) { /* the location and size of the table for the image */ ucd->base = (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); ucd->size = info->dlpi_phdr[i].p_memsz; } /* look for the segment */ if (res == 0 && info->dlpi_phdr[i].p_type == PT_LOAD) { if (ucd->pc >= (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr) && ucd->pc < (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr + info->dlpi_phdr[i].p_memsz)) { res = 1; } } } return res; } /* find the exception unwind table that contains the PC during an exception */ void * redirect___gnu_Unwind_Find_exidx(void *pc, int *count) { unwind_callback_data_t ucd; memset(&ucd, 0, sizeof(ucd)); ucd.pc = pc; if (redirect_dl_iterate_phdr(exidx_lookup_callback, &ucd) <= 0) return NULL; if (count != NULL) *count = ucd.size / 8 /* exidx table entry size */; return ucd.base; } # endif /* ARM && !ANDROID */ #endif /* LINUX */ typedef struct _redirect_import_t { const char *name; app_pc func; } redirect_import_t; static const redirect_import_t redirect_imports[] = { { "calloc", (app_pc)redirect_calloc }, { "malloc", (app_pc)redirect_malloc }, { "free", (app_pc)redirect_free }, { "realloc", (app_pc)redirect_realloc }, { "strdup", (app_pc)redirect_strdup }, /* TODO i#4243: we should also redirect functions including: * + malloc_usable_size, memalign, valloc, mallinfo, mallopt, etc. * + tcmalloc: tc_malloc, tc_free, etc. * + __libc_malloc, __libc_free, etc. * + OSX: malloc_zone_malloc, etc.? Or just malloc_create_zone? * + C++ operators in case they don't just call libc malloc? */ #if defined(LINUX) && !defined(ANDROID) { "__tls_get_addr", (app_pc)redirect___tls_get_addr }, { "___tls_get_addr", (app_pc)redirect____tls_get_addr }, #endif #ifdef LINUX /* i#1717: C++ exceptions call this */ { "dl_iterate_phdr", (app_pc)redirect_dl_iterate_phdr }, # if defined(ARM) && !defined(ANDROID) /* i#1717: C++ exceptions call this on ARM Linux */ { "__gnu_Unwind_Find_exidx", (app_pc)redirect___gnu_Unwind_Find_exidx }, # endif #endif /* We need these for clients that don't use libc (i#1747) */ { "strlen", (app_pc)strlen }, { "wcslen", (app_pc)wcslen }, { "strchr", (app_pc)strchr }, { "strrchr", (app_pc)strrchr }, { "strncpy", (app_pc)strncpy }, { "memcpy", (app_pc)memcpy }, { "memset", (app_pc)memset }, { "memmove", (app_pc)memmove }, { "strncat", (app_pc)strncat }, { "strcmp", (app_pc)strcmp }, { "strncmp", (app_pc)strncmp }, { "memcmp", (app_pc)memcmp }, { "strstr", (app_pc)strstr }, { "strcasecmp", (app_pc)strcasecmp }, /* Also redirect the _chk versions (i#1747, i#46) */ { "memcpy_chk", (app_pc)memcpy }, { "memset_chk", (app_pc)memset }, { "memmove_chk", (app_pc)memmove }, { "strncpy_chk", (app_pc)strncpy }, }; #define REDIRECT_IMPORTS_NUM (sizeof(redirect_imports) / sizeof(redirect_imports[0])) #ifdef DEBUG static const redirect_import_t redirect_debug_imports[] = { { "calloc", (app_pc)redirect_calloc_initonly }, { "malloc", (app_pc)redirect_malloc_initonly }, { "free", (app_pc)redirect_free_initonly }, { "realloc", (app_pc)redirect_realloc_initonly }, { "strdup", (app_pc)redirect_strdup_initonly }, }; # define REDIRECT_DEBUG_IMPORTS_NUM \ (sizeof(redirect_debug_imports) / sizeof(redirect_debug_imports[0])) #endif bool privload_redirect_sym(ptr_uint_t *r_addr, const char *name) { int i; /* iterate over all symbols and redirect syms when necessary, e.g. malloc */ #ifdef DEBUG if (disallow_unsafe_static_calls) { for (i = 0; i < REDIRECT_DEBUG_IMPORTS_NUM; i++) { if (strcmp(redirect_debug_imports[i].name, name) == 0) { *r_addr = (ptr_uint_t)redirect_debug_imports[i].func; return true; ; } } } #endif for (i = 0; i < REDIRECT_IMPORTS_NUM; i++) { if (strcmp(redirect_imports[i].name, name) == 0) { *r_addr = (ptr_uint_t)redirect_imports[i].func; return true; ; } } return false; } /*************************************************************************** * DynamoRIO Early Injection Code */ #ifdef LINUX # if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) /* Find the auxiliary vector and adjust it to look as if the kernel had set up * the stack for the ELF mapped at map. The auxiliary vector starts after the * terminating NULL pointer in the envp array. */ static void privload_setup_auxv(char **envp, app_pc map, ptr_int_t delta, app_pc interp_map, const char *exe_path /*must be persistent*/) { ELF_AUXV_TYPE *auxv; ELF_HEADER_TYPE *elf = (ELF_HEADER_TYPE *)map; /* The aux vector is after the last environment pointer. */ while (*envp != NULL) envp++; auxv = (ELF_AUXV_TYPE *)(envp + 1); /* fix up the auxv entries that refer to the executable */ for (; auxv->a_type != AT_NULL; auxv++) { /* the actual addr should be: (base + offs) or (v_addr + delta) */ switch (auxv->a_type) { case AT_ENTRY: auxv->a_un.a_val = (ptr_int_t)elf->e_entry + delta; LOG(GLOBAL, LOG_LOADER, 2, "AT_ENTRY: " PFX "\n", auxv->a_un.a_val); break; case AT_PHDR: auxv->a_un.a_val = (ptr_int_t)map + elf->e_phoff; LOG(GLOBAL, LOG_LOADER, 2, "AT_PHDR: " PFX "\n", auxv->a_un.a_val); break; case AT_PHENT: auxv->a_un.a_val = (ptr_int_t)elf->e_phentsize; break; case AT_PHNUM: auxv->a_un.a_val = (ptr_int_t)elf->e_phnum; break; case AT_BASE: /* Android loader reads this */ auxv->a_un.a_val = (ptr_int_t)interp_map; LOG(GLOBAL, LOG_LOADER, 2, "AT_BASE: " PFX "\n", auxv->a_un.a_val); break; case AT_EXECFN: /* Android loader references this, unclear what for */ auxv->a_un.a_val = (ptr_int_t)exe_path; LOG(GLOBAL, LOG_LOADER, 2, "AT_EXECFN: " PFX " %s\n", auxv->a_un.a_val, (char *)auxv->a_un.a_val); break; /* The rest of these AT_* values don't seem to be important to the * loader, but we log them. */ case AT_EXECFD: LOG(GLOBAL, LOG_LOADER, 2, "AT_EXECFD: %d\n", auxv->a_un.a_val); break; } } } /* Entry point for ptrace injection. */ static void takeover_ptrace(ptrace_stack_args_t *args) { static char home_var[MAXIMUM_PATH + 6 /*HOME=path\0*/]; static char *fake_envp[] = { home_var, NULL }; /* When we come in via ptrace, we have no idea where the environment * pointer is. We could use /proc/self/environ to read it or go searching * near the stack base. However, both are fragile and we don't really need * the environment for anything except for option passing. In the initial * ptraced process, we can assume our options are in a config file and not * the environment, so we just set an environment with HOME. */ snprintf(home_var, BUFFER_SIZE_ELEMENTS(home_var), "HOME=%s", args->home_dir); NULL_TERMINATE_BUFFER(home_var); dynamorio_set_envp(fake_envp); dynamorio_app_init(); /* FIXME i#37: takeover other threads */ /* We need to wait until dr_inject_process_run() is called to finish * takeover, and this is an easy way to stop and return control to the * injector. */ dynamorio_syscall(SYS_kill, 2, get_process_id(), SIGTRAP); dynamo_start(&args->mc); } static void reserve_brk(app_pc post_app) { /* We haven't parsed the options yet, so we rely on drinjectlib * setting this env var if the user passed -no_emulate_brk: */ if (getenv(DYNAMORIO_VAR_NO_EMULATE_BRK) == NULL) { /* i#1004: we're going to emulate the brk via our own mmap. * Reserve the initial brk now before any of DR's mmaps to avoid overlap. */ dynamo_options.emulate_brk = true; /* not parsed yet */ init_emulated_brk(post_app); } else { /* i#1004: as a workaround, reserve some space for sbrk() during early injection * before initializing DR's heap. With early injection, the program break comes * somewhere after DR's bss section, subject to some ASLR. When we allocate our * heap, sometimes we mmap right over the break, so any brk() calls will fail. * When brk() fails, most malloc() implementations fall back to mmap(). * However, sometimes libc startup code needs to allocate memory before libc is * initialized. In this case it calls brk(), and will crash if it fails. * * Ideally we'd just set the break to follow the app's exe, but the kernel * forbids setting the break to a value less than the current break. I also * tried to reserve memory by increasing the break by ~20 pages and then * resetting it, but the kernel unreserves it. The current work around is to * increase the break by 1. The loader needs to allocate more than a page of * memory, so this doesn't guarantee that further brk() calls will succeed. * However, I haven't observed any brk() failures after adding this workaround. */ ptr_int_t start_brk; ASSERT(!dynamo_heap_initialized); start_brk = dynamorio_syscall(SYS_brk, 1, 0); dynamorio_syscall(SYS_brk, 1, start_brk + 1); /* I'd log the results, but logs aren't initialized yet. */ } } byte * map_exe_file_and_brk(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, map_flags_t map_flags) { /* A little hacky: we assume the MEMPROT_NONE is the overall mmap for the whole * region, where our goal is to push it back for top-down PIE filling to leave * room for a reasonable brk. */ if (prot == MEMPROT_NONE && offs == 0) { size_t sz_with_brk = *size + APP_BRK_GAP; byte *res = os_map_file(f, &sz_with_brk, offs, addr, prot, map_flags); if (res != NULL) os_unmap_file(res + sz_with_brk - APP_BRK_GAP, APP_BRK_GAP); *size = sz_with_brk - APP_BRK_GAP; return res; } else return os_map_file(f, size, offs, addr, prot, map_flags); } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is partially duplicated from module_get_os_privmod_data. * It paritially fills the os_privmod_data for dynamorio relocation. * Return true if relocation is required. */ static bool privload_get_os_privmod_data(app_pc base, OUT os_privmod_data_t *opd) { app_pc mod_base, mod_end; ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)base; ELF_PROGRAM_HEADER_TYPE *prog_hdr; uint i; /* walk program headers to get mod_base mod_end and delta */ mod_base = module_vaddr_from_prog_header(base + elf_hdr->e_phoff, elf_hdr->e_phnum, NULL, &mod_end); /* delta from preferred address, used for calcuate real address */ opd->load_delta = base - mod_base; /* At this point one could consider returning false if the load_delta * is zero. However, this optimisation was found to give only a small * benefit, and is not safe if RELA relocations are in use. In particular, * it did not work on AArch64 when libdynamorio.so was built with the BFD * linker from Debian's binutils 2.26-8. */ /* walk program headers to get dynamic section pointer */ prog_hdr = (ELF_PROGRAM_HEADER_TYPE *)(base + elf_hdr->e_phoff); for (i = 0; i < elf_hdr->e_phnum; i++) { if (prog_hdr->p_type == PT_DYNAMIC) { opd->dyn = (ELF_DYNAMIC_ENTRY_TYPE *)(prog_hdr->p_vaddr + opd->load_delta); opd->dynsz = prog_hdr->p_memsz; # ifdef DEBUG } else if (prog_hdr->p_type == PT_TLS && prog_hdr->p_memsz > 0) { /* XXX: we assume libdynamorio has no tls block b/c we're not calling * privload_relocate_mod(). */ privload_report_relocate_error(); # endif /* DEBUG */ } ++prog_hdr; } if (opd->dyn == NULL) return false; module_init_os_privmod_data_from_dyn(opd, opd->dyn, opd->load_delta); return true; } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ /* This routine is duplicated from is_elf_so_header_common. */ static bool privload_mem_is_elf_so_header(byte *mem) { /* assume we can directly read from mem */ ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)mem; /* ELF magic number */ if (elf_hdr->e_ident[EI_MAG0] != ELFMAG0 || elf_hdr->e_ident[EI_MAG1] != ELFMAG1 || elf_hdr->e_ident[EI_MAG2] != ELFMAG2 || elf_hdr->e_ident[EI_MAG3] != ELFMAG3) return false; /* libdynamorio should be ET_DYN */ if (elf_hdr->e_type != ET_DYN) return false; /* ARM or X86 */ /* i#1684: We do allow mixing arches of the same bitwidth. See the i#1684 * comment in is_elf_so_header_common(). */ if ( # ifdef X64 elf_hdr->e_machine != EM_X86_64 && elf_hdr->e_machine != EM_AARCH64 # else elf_hdr->e_machine != EM_386 && elf_hdr->e_machine != EM_ARM # endif ) return false; if (elf_hdr->e_ehsize != sizeof(ELF_HEADER_TYPE)) return false; return true; } /* Returns false if the text-data gap is not empty. Else, fills the gap with * no-access mappings and returns true. */ static bool dynamorio_lib_gap_empty(void) { /* XXX: get_dynamorio_dll_start() is already calling * memquery_library_bounds_by_iterator() which is doing this maps walk: can we * avoid this extra walk by somehow passing info back to us? Have an * "interrupted" output param or sthg and is_dynamorio_dll_interrupted()? */ memquery_iter_t iter; bool res = true; if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) { byte *dr_start = get_dynamorio_dll_start(); byte *dr_end = get_dynamorio_dll_end(); byte *gap_start = dr_start; const char *dynamorio_library_path = get_dynamorio_library_path(); while (memquery_iterator_next(&iter) && iter.vm_start < dr_end) { if (iter.vm_start >= dr_start && iter.vm_end <= dr_end && iter.comment[0] != '\0' && /* i#3799: ignore the kernel labeling DR's .bss as "[heap]". */ strcmp(iter.comment, "[heap]") != 0 && strcmp(iter.comment, dynamorio_library_path) != 0) { /* There's a non-anon mapping inside: probably vvar and/or vdso. */ res = false; break; } /* i#1659: fill in the text-data segment gap to ensure no mmaps in between. * The kernel does not do this. Our private loader does, so if we reloaded * ourselves this is already in place. We do this now rather than in * os_loader_init_prologue() to prevent our brk mmap from landing here. */ if (iter.vm_start > gap_start) { size_t sz = iter.vm_start - gap_start; ASSERT(sz > 0); DEBUG_DECLARE(byte *fill =) os_map_file(-1, &sz, 0, gap_start, MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED); ASSERT(fill != NULL); gap_start = iter.vm_end; } else if (iter.vm_end > gap_start) { gap_start = iter.vm_end; } } memquery_iterator_stop(&iter); } return res; } /* XXX: This routine is called before dynamorio relocation when we are in a * fragile state and thus no globals access or use of ASSERT/LOG/STATS! */ void relocate_dynamorio(byte *dr_map, size_t dr_size, byte *sp) { ptr_uint_t argc = *(ptr_uint_t *)sp; /* Plus 2 to skip argc and null pointer that terminates argv[]. */ const char **env = (const char **)sp + argc + 2; os_privmod_data_t opd = { { 0 } }; os_page_size_init(env, true); if (dr_map == NULL) { /* we do not know where dynamorio is, so check backward page by page */ dr_map = (app_pc)ALIGN_BACKWARD((ptr_uint_t)relocate_dynamorio, PAGE_SIZE); while (dr_map != NULL && !privload_mem_is_elf_so_header(dr_map)) { dr_map -= PAGE_SIZE; } } if (dr_map == NULL) privload_report_relocate_error(); /* Relocate it */ if (privload_get_os_privmod_data(dr_map, &opd)) privload_early_relocate_os_privmod_data(&opd, dr_map); } /* i#1227: on a conflict with the app we reload ourselves. * Does not return. */ static void reload_dynamorio(void **init_sp, app_pc conflict_start, app_pc conflict_end) { elf_loader_t dr_ld; os_privmod_data_t opd; byte *dr_map; /* We expect at most vvar+vdso+stack+vsyscall => 5 different mappings * even if they were all in the conflict area. */ # define MAX_TEMP_MAPS 16 byte *temp_map[MAX_TEMP_MAPS]; size_t temp_size[MAX_TEMP_MAPS]; uint num_temp_maps = 0, i; memquery_iter_t iter; app_pc entry; byte *cur_dr_map = get_dynamorio_dll_start(); byte *cur_dr_end = get_dynamorio_dll_end(); size_t dr_size = cur_dr_end - cur_dr_map; IF_DEBUG(bool success =) elf_loader_read_headers(&dr_ld, get_dynamorio_library_path()); ASSERT(success); /* XXX: have better strategy for picking base: currently we rely on * the kernel picking an address, so we have to block out the conflicting * region first, avoiding any existing mappings (like vvar+vdso: i#2641). */ if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) { /* Strategy: track the leading edge ("tocover_start") of the conflict region. * Find the next block beyond that edge so we know the safe endpoint for a * temp mmap. */ byte *tocover_start = conflict_start; while (memquery_iterator_next(&iter)) { if (iter.vm_start > tocover_start) { temp_map[num_temp_maps] = tocover_start; temp_size[num_temp_maps] = MIN(iter.vm_start, conflict_end) - tocover_start; tocover_start = iter.vm_end; if (temp_size[num_temp_maps] > 0) { temp_map[num_temp_maps] = os_map_file( -1, &temp_size[num_temp_maps], 0, temp_map[num_temp_maps], MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED); ASSERT(temp_map[num_temp_maps] != NULL); num_temp_maps++; } } else if (iter.vm_end > tocover_start) { tocover_start = iter.vm_end; } if (iter.vm_start >= conflict_end) break; } memquery_iterator_stop(&iter); if (tocover_start < conflict_end) { temp_map[num_temp_maps] = tocover_start; temp_size[num_temp_maps] = conflict_end - tocover_start; temp_map[num_temp_maps] = os_map_file(-1, &temp_size[num_temp_maps], 0, temp_map[num_temp_maps], MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED); ASSERT(temp_map[num_temp_maps] != NULL); num_temp_maps++; } } /* Now load the 2nd libdynamorio.so */ dr_map = elf_loader_map_phdrs(&dr_ld, false /*!fixed*/, os_map_file, os_unmap_file, os_set_protection, privload_check_new_map_bounds, privload_map_flags(0 /*!reachable*/)); ASSERT(dr_map != NULL); ASSERT(is_elf_so_header(dr_map, 0)); /* Relocate it */ memset(&opd, 0, sizeof(opd)); module_get_os_privmod_data(dr_map, dr_size, false /*!relocated*/, &opd); /* XXX: we assume libdynamorio has no tls block b/c we're not calling * privload_relocate_mod(). */ ASSERT(opd.tls_block_size == 0); privload_relocate_os_privmod_data(&opd, dr_map); for (i = 0; i < num_temp_maps; i++) os_unmap_file(temp_map[i], temp_size[i]); entry = (app_pc)dr_ld.ehdr->e_entry + dr_ld.load_delta; elf_loader_destroy(&dr_ld); /* Now we transfer control unconditionally to the new DR's _start, after * first restoring init_sp. We pass along the current (old) DR's bounds * for removal. */ xfer_to_new_libdr(entry, init_sp, cur_dr_map, dr_size); ASSERT_NOT_REACHED(); } /* Called from _start in x86.asm. sp is the initial app stack pointer that the * kernel set up for us, and it points to the usual argc, argv, envp, and auxv * that the kernel puts on the stack. The 2nd & 3rd args must be 0 in * the initial call. * * We assume that _start has already called relocate_dynamorio() for us and * that it is now safe to access globals. */ void privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size) { ptr_int_t *argc = (ptr_int_t *)sp; /* Kernel writes an elf_addr_t. */ char **argv = (char **)sp + 1; char **envp = argv + *argc + 1; app_pc entry = NULL; char *exe_path; char *exe_basename; app_pc exe_map, exe_end; elf_loader_t exe_ld; const char *interp; priv_mcontext_t mc; bool success; memquery_iter_t iter; app_pc interp_map; if (*argc == ARGC_PTRACE_SENTINEL) { /* XXX: Teach the injector to look up takeover_ptrace() and call it * directly instead of using this sentinel. We come here because we * can easily find the address of _start in the ELF header. */ takeover_ptrace((ptrace_stack_args_t *)sp); ASSERT_NOT_REACHED(); } kernel_init_sp = (void *)sp; /* XXX i#47: for Linux, we can't easily have this option on by default as * code like get_application_short_name() called from drpreload before * even _init is run needs to have a non-early default. */ dynamo_options.early_inject = true; /* i#1227: if we reloaded ourselves, unload the old libdynamorio */ if (old_libdr_base != NULL) { /* i#2641: we can't blindly unload the whole region as vvar+vdso may be * in the text-data gap. */ const char *dynamorio_library_path = get_dynamorio_library_path(); if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) { while (memquery_iterator_next(&iter)) { if (iter.vm_start >= old_libdr_base && iter.vm_end <= old_libdr_base + old_libdr_size && (iter.comment[0] == '\0' /* .bss */ || /* The kernel sometimes mis-labels our .bss as "[heap]". */ strcmp(iter.comment, "[heap]") == 0 || strcmp(iter.comment, dynamorio_library_path) == 0)) { os_unmap_file(iter.vm_start, iter.vm_end - iter.vm_start); } if (iter.vm_start >= old_libdr_base + old_libdr_size) break; } memquery_iterator_stop(&iter); } } dynamorio_set_envp(envp); /* argv[0] doesn't actually have to be the path to the exe, so we put the * real exe path in an environment variable. */ exe_path = getenv(DYNAMORIO_VAR_EXE_PATH); /* i#1677: this happens upon re-launching within gdb, so provide a nice error */ if (exe_path == NULL) { /* i#1677: avoid assert in get_application_name_helper() */ set_executable_path("UNKNOWN"); apicheck(exe_path != NULL, DYNAMORIO_VAR_EXE_PATH " env var is not set. " "Are you re-launching within gdb?"); } /* i#907: We can't rely on /proc/self/exe for the executable path, so we * have to tell get_application_name() to use this path. */ set_executable_path(exe_path); /* XXX i#2662: Currently, we only support getting args for early injection. * Add support for late injection. */ set_app_args((int *)argc, argv); success = elf_loader_read_headers(&exe_ld, exe_path); apicheck(success, "Failed to read app ELF headers. Check path and " "architecture."); /* Initialize DR's options to avoid syslogs in get_dynamo_library_bounds() and * for the -xarch_root option below. */ dynamorio_app_init_part_one_options(); /* Find range of app */ exe_map = module_vaddr_from_prog_header((app_pc)exe_ld.phdrs, exe_ld.ehdr->e_phnum, NULL, &exe_end); /* i#1227: on a conflict with the app (+ room for the brk): reload ourselves */ if (get_dynamorio_dll_start() < exe_end + APP_BRK_GAP && get_dynamorio_dll_end() > exe_map) { elf_loader_destroy(&exe_ld); reload_dynamorio(sp, exe_map, exe_end + APP_BRK_GAP); ASSERT_NOT_REACHED(); } /* i#2641: we can't handle something in the text-data gap. * Various parts of DR assume there's nothing inside (and we even fill the * gap with a PROT_NONE mmap later: i#1659), so we reload to avoid it, * under the assumption that it's rare and we're not paying this cost * very often. */ if (!dynamorio_lib_gap_empty()) { elf_loader_destroy(&exe_ld); reload_dynamorio(sp, get_dynamorio_dll_start(), get_dynamorio_dll_end()); ASSERT_NOT_REACHED(); } exe_map = elf_loader_map_phdrs(&exe_ld, /* fixed at preferred address, * will be overridden if preferred base is 0 */ true, /* ensure there's space for the brk */ map_exe_file_and_brk, os_unmap_file, os_set_protection, privload_check_new_map_bounds, privload_map_flags(MODLOAD_IS_APP /*!reachable*/)); apicheck(exe_map != NULL, "Failed to load application. " "Check path and architecture."); ASSERT(is_elf_so_header(exe_map, 0)); /* i#1660: the app may have passed a relative path or a symlink to execve, * yet the kernel will put a resolved path into /proc/self/maps. * Rather than us here or in pre-execve, plus in drrun or drinjectlib, * making paths absolute and resolving symlinks to try and match what the * kernel does, we just read the kernel's resolved path. * This is prior to memquery_init() but that's fine (it's already being * called by is_elf_so_header() above). */ if (memquery_iterator_start(&iter, exe_map, false /*no heap*/)) { while (memquery_iterator_next(&iter)) { if (iter.vm_start == exe_map) { set_executable_path(iter.comment); break; } } memquery_iterator_stop(&iter); } /* Set the process name with prctl PR_SET_NAME. This makes killall <app> * work. */ exe_basename = strrchr(exe_path, '/'); if (exe_basename == NULL) { exe_basename = exe_path; } else { exe_basename++; } dynamorio_syscall(SYS_prctl, 5, PR_SET_NAME, (ptr_uint_t)exe_basename, 0, 0, 0); reserve_brk(exe_map + exe_ld.image_size + (INTERNAL_OPTION(separate_private_bss) ? PAGE_SIZE : 0)); interp = elf_loader_find_pt_interp(&exe_ld); if (interp != NULL) { char buf[MAXIMUM_PATH]; if (!IS_STRING_OPTION_EMPTY(xarch_root) && !os_file_exists(interp, false)) { string_option_read_lock(); snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s/%s", DYNAMO_OPTION(xarch_root), interp); NULL_TERMINATE_BUFFER(buf); string_option_read_unlock(); if (os_file_exists(buf, false)) { LOG(GLOBAL, LOG_SYSCALLS, 2, "replacing interpreter |%s| with |%s|\n", interp, buf); interp = buf; } } /* Load the ELF pointed at by PT_INTERP, usually ld.so. */ elf_loader_t interp_ld; success = elf_loader_read_headers(&interp_ld, interp); apicheck(success, "Failed to read ELF interpreter headers."); interp_map = elf_loader_map_phdrs( &interp_ld, false /* fixed */, os_map_file, os_unmap_file, os_set_protection, privload_check_new_map_bounds, privload_map_flags(MODLOAD_IS_APP /*!reachable*/)); apicheck(interp_map != NULL && is_elf_so_header(interp_map, 0), "Failed to map ELF interpreter."); /* On Android, the system loader /system/bin/linker sets itself * as the interpreter in the ELF header .interp field. */ ASSERT_CURIOSITY_ONCE((strcmp(interp, "/system/bin/linker") == 0 || elf_loader_find_pt_interp(&interp_ld) == NULL) && "The interpreter shouldn't have an interpreter"); entry = (app_pc)interp_ld.ehdr->e_entry + interp_ld.load_delta; elf_loader_destroy(&interp_ld); } else { /* No PT_INTERP, so this is a static exe. */ interp_map = NULL; entry = (app_pc)exe_ld.ehdr->e_entry + exe_ld.load_delta; } privload_setup_auxv(envp, exe_map, exe_ld.load_delta, interp_map, exe_path); elf_loader_destroy(&exe_ld); /* Initialize the rest of DR *after* we map the app and interp images. This is * consistent with our old behavior, and allows the client to do things like call * dr_get_proc_address() on the app from dr_client_main(). We let * find_executable_vm_areas re-discover the mappings we made for the app and * interp images. We do not do the full init before mapping the interp image * as it complicates recording the mappings for the interp. */ if (dynamorio_app_init_part_two_finalize() != SUCCESS) apicheck(false, "Failed to initialize part two."); LOG(GLOBAL, LOG_TOP, 1, "early injected into app with this cmdline:\n"); DOLOG(1, LOG_TOP, { int i; for (i = 0; i < *argc; i++) { LOG(GLOBAL, LOG_TOP, 1, "%s ", argv[i]); } LOG(GLOBAL, LOG_TOP, 1, "\n"); }); if (RUNNING_WITHOUT_CODE_CACHE()) { /* Reset the stack pointer back to the beginning and jump to the entry * point to execute the app natively. This is also useful for testing * if the app has been mapped correctly without involving DR's code * cache. */ # ifdef X86 asm("mov %0, %%" ASM_XSP "\n\t" "jmp *%1\n\t" : : "r"(sp), "r"(entry)); # elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_REACHED(); # endif } memset(&mc, 0, sizeof(mc)); mc.xsp = (reg_t)sp; mc.pc = entry; dynamo_start(&mc); } # endif /* !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) */ #else /* XXX i#1285: implement MacOS private loader */ #endif
1
25,077
Maybe add i#956 in this log as well.
DynamoRIO-dynamorio
c
@@ -438,6 +438,11 @@ void Player::updateInventoryWeight() inventoryWeight += item->getWeight(); } } + + StoreInbox* storeInbox = getStoreInbox(); + if (storeInbox) { + inventoryWeight += storeInbox->getWeight(); + } } void Player::addSkillAdvance(skills_t skill, uint64_t count)
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include <bitset> #include "bed.h" #include "chat.h" #include "combat.h" #include "configmanager.h" #include "creatureevent.h" #include "events.h" #include "game.h" #include "iologindata.h" #include "monster.h" #include "movement.h" #include "scheduler.h" #include "weapons.h" extern ConfigManager g_config; extern Game g_game; extern Chat* g_chat; extern Vocations g_vocations; extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; extern CreatureEvents* g_creatureEvents; extern Events* g_events; MuteCountMap Player::muteCountMap; uint32_t Player::playerAutoID = 0x10000000; Player::Player(ProtocolGame_ptr p) : Creature(), lastPing(OTSYS_TIME()), lastPong(lastPing), inbox(new Inbox(ITEM_INBOX)), storeInbox(new StoreInbox(ITEM_STORE_INBOX)), client(std::move(p)) { inbox->incrementReferenceCounter(); storeInbox->setParent(this); storeInbox->incrementReferenceCounter(); } Player::~Player() { for (Item* item : inventory) { if (item) { item->setParent(nullptr); item->decrementReferenceCounter(); } } for (const auto& it : depotLockerMap) { it.second->removeInbox(inbox); it.second->decrementReferenceCounter(); } inbox->decrementReferenceCounter(); storeInbox->setParent(nullptr); storeInbox->decrementReferenceCounter(); setWriteItem(nullptr); setEditHouse(nullptr); } bool Player::setVocation(uint16_t vocId) { Vocation* voc = g_vocations.getVocation(vocId); if (!voc) { return false; } vocation = voc; Condition* condition = getCondition(CONDITION_REGENERATION, CONDITIONID_DEFAULT); if (condition) { condition->setParam(CONDITION_PARAM_HEALTHGAIN, vocation->getHealthGainAmount()); condition->setParam(CONDITION_PARAM_HEALTHTICKS, vocation->getHealthGainTicks() * 1000); condition->setParam(CONDITION_PARAM_MANAGAIN, vocation->getManaGainAmount()); condition->setParam(CONDITION_PARAM_MANATICKS, vocation->getManaGainTicks() * 1000); } return true; } bool Player::isPushable() const { if (hasFlag(PlayerFlag_CannotBePushed)) { return false; } return Creature::isPushable(); } std::string Player::getDescription(int32_t lookDistance) const { std::ostringstream s; if (lookDistance == -1) { s << "yourself."; if (group->access) { s << " You are " << group->name << '.'; } else if (vocation->getId() != VOCATION_NONE) { s << " You are " << vocation->getVocDescription() << '.'; } else { s << " You have no vocation."; } } else { s << name; if (!group->access) { s << " (Level " << level << ')'; } s << '.'; if (sex == PLAYERSEX_FEMALE) { s << " She"; } else { s << " He"; } if (group->access) { s << " is " << group->name << '.'; } else if (vocation->getId() != VOCATION_NONE) { s << " is " << vocation->getVocDescription() << '.'; } else { s << " has no vocation."; } } if (party) { if (lookDistance == -1) { s << " Your party has "; } else if (sex == PLAYERSEX_FEMALE) { s << " She is in a party with "; } else { s << " He is in a party with "; } size_t memberCount = party->getMemberCount() + 1; if (memberCount == 1) { s << "1 member and "; } else { s << memberCount << " members and "; } size_t invitationCount = party->getInvitationCount(); if (invitationCount == 1) { s << "1 pending invitation."; } else { s << invitationCount << " pending invitations."; } } if (!guild || !guildRank) { return s.str(); } if (lookDistance == -1) { s << " You are "; } else if (sex == PLAYERSEX_FEMALE) { s << " She is "; } else { s << " He is "; } s << guildRank->name << " of the " << guild->getName(); if (!guildNick.empty()) { s << " (" << guildNick << ')'; } size_t memberCount = guild->getMemberCount(); if (memberCount == 1) { s << ", which has 1 member, " << guild->getMembersOnline().size() << " of them online."; } else { s << ", which has " << memberCount << " members, " << guild->getMembersOnline().size() << " of them online."; } return s.str(); } Item* Player::getInventoryItem(slots_t slot) const { if (slot < CONST_SLOT_FIRST || slot > CONST_SLOT_LAST) { return nullptr; } return inventory[slot]; } void Player::addConditionSuppressions(uint32_t conditions) { conditionSuppressions |= conditions; } void Player::removeConditionSuppressions(uint32_t conditions) { conditionSuppressions &= ~conditions; } Item* Player::getWeapon(slots_t slot, bool ignoreAmmo) const { Item* item = inventory[slot]; if (!item) { return nullptr; } WeaponType_t weaponType = item->getWeaponType(); if (weaponType == WEAPON_NONE || weaponType == WEAPON_SHIELD || weaponType == WEAPON_AMMO) { return nullptr; } if (!ignoreAmmo && weaponType == WEAPON_DISTANCE) { const ItemType& it = Item::items[item->getID()]; if (it.ammoType != AMMO_NONE) { Item* ammoItem = inventory[CONST_SLOT_AMMO]; if (!ammoItem || ammoItem->getAmmoType() != it.ammoType) { return nullptr; } item = ammoItem; } } return item; } Item* Player::getWeapon(bool ignoreAmmo/* = false*/) const { Item* item = getWeapon(CONST_SLOT_LEFT, ignoreAmmo); if (item) { return item; } item = getWeapon(CONST_SLOT_RIGHT, ignoreAmmo); if (item) { return item; } return nullptr; } WeaponType_t Player::getWeaponType() const { Item* item = getWeapon(); if (!item) { return WEAPON_NONE; } return item->getWeaponType(); } int32_t Player::getWeaponSkill(const Item* item) const { if (!item) { return getSkillLevel(SKILL_FIST); } int32_t attackSkill; WeaponType_t weaponType = item->getWeaponType(); switch (weaponType) { case WEAPON_SWORD: { attackSkill = getSkillLevel(SKILL_SWORD); break; } case WEAPON_CLUB: { attackSkill = getSkillLevel(SKILL_CLUB); break; } case WEAPON_AXE: { attackSkill = getSkillLevel(SKILL_AXE); break; } case WEAPON_DISTANCE: { attackSkill = getSkillLevel(SKILL_DISTANCE); break; } default: { attackSkill = 0; break; } } return attackSkill; } int32_t Player::getArmor() const { int32_t armor = 0; static const slots_t armorSlots[] = {CONST_SLOT_HEAD, CONST_SLOT_NECKLACE, CONST_SLOT_ARMOR, CONST_SLOT_LEGS, CONST_SLOT_FEET, CONST_SLOT_RING}; for (slots_t slot : armorSlots) { Item* inventoryItem = inventory[slot]; if (inventoryItem) { armor += inventoryItem->getArmor(); } } return static_cast<int32_t>(armor * vocation->armorMultiplier); } void Player::getShieldAndWeapon(const Item*& shield, const Item*& weapon) const { shield = nullptr; weapon = nullptr; for (uint32_t slot = CONST_SLOT_RIGHT; slot <= CONST_SLOT_LEFT; slot++) { Item* item = inventory[slot]; if (!item) { continue; } switch (item->getWeaponType()) { case WEAPON_NONE: break; case WEAPON_SHIELD: { if (!shield || item->getDefense() > shield->getDefense()) { shield = item; } break; } default: { // weapons that are not shields weapon = item; break; } } } } int32_t Player::getDefense() const { int32_t defenseSkill = getSkillLevel(SKILL_FIST); int32_t defenseValue = 7; const Item* weapon; const Item* shield; getShieldAndWeapon(shield, weapon); if (weapon) { defenseValue = weapon->getDefense() + weapon->getExtraDefense(); defenseSkill = getWeaponSkill(weapon); } if (shield) { defenseValue = weapon != nullptr ? shield->getDefense() + weapon->getExtraDefense() : shield->getDefense(); defenseSkill = getSkillLevel(SKILL_SHIELD); } if (defenseSkill == 0) { switch (fightMode) { case FIGHTMODE_ATTACK: case FIGHTMODE_BALANCED: return 1; case FIGHTMODE_DEFENSE: return 2; } } return (defenseSkill / 4. + 2.23) * defenseValue * 0.15 * getDefenseFactor() * vocation->defenseMultiplier; } float Player::getAttackFactor() const { switch (fightMode) { case FIGHTMODE_ATTACK: return 1.0f; case FIGHTMODE_BALANCED: return 1.2f; case FIGHTMODE_DEFENSE: return 2.0f; default: return 1.0f; } } float Player::getDefenseFactor() const { switch (fightMode) { case FIGHTMODE_ATTACK: return (OTSYS_TIME() - lastAttack) < getAttackSpeed() ? 0.5f : 1.0f; case FIGHTMODE_BALANCED: return (OTSYS_TIME() - lastAttack) < getAttackSpeed() ? 0.75f : 1.0f; case FIGHTMODE_DEFENSE: return 1.0f; default: return 1.0f; } } uint16_t Player::getClientIcons() const { uint16_t icons = 0; for (Condition* condition : conditions) { if (!isSuppress(condition->getType())) { icons |= condition->getIcons(); } } if (pzLocked) { icons |= ICON_REDSWORDS; } if (tile->hasFlag(TILESTATE_PROTECTIONZONE)) { icons |= ICON_PIGEON; // Don't show ICON_SWORDS if player is in protection zone. if (hasBitSet(ICON_SWORDS, icons)) { icons &= ~ICON_SWORDS; } } // Game client debugs with 10 or more icons // so let's prevent that from happening. std::bitset<20> icon_bitset(static_cast<uint64_t>(icons)); for (size_t pos = 0, bits_set = icon_bitset.count(); bits_set >= 10; ++pos) { if (icon_bitset[pos]) { icon_bitset.reset(pos); --bits_set; } } return icon_bitset.to_ulong(); } void Player::updateInventoryWeight() { if (hasFlag(PlayerFlag_HasInfiniteCapacity)) { return; } inventoryWeight = 0; for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { const Item* item = inventory[i]; if (item) { inventoryWeight += item->getWeight(); } } } void Player::addSkillAdvance(skills_t skill, uint64_t count) { uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level); uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { //player has reached max skill return; } g_events->eventPlayerOnGainSkillTries(this, skill, count); if (count == 0) { return; } bool sendUpdateSkills = false; while ((skills[skill].tries + count) >= nextReqTries) { count -= nextReqTries - skills[skill].tries; skills[skill].level++; skills[skill].tries = 0; skills[skill].percent = 0; std::ostringstream ss; ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level); sendUpdateSkills = true; currReqTries = nextReqTries; nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { count = 0; break; } } skills[skill].tries += count; uint32_t newPercent; if (nextReqTries > currReqTries) { newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries); } else { newPercent = 0; } if (skills[skill].percent != newPercent) { skills[skill].percent = newPercent; sendUpdateSkills = true; } if (sendUpdateSkills) { sendSkills(); } } void Player::setVarStats(stats_t stat, int32_t modifier) { varStats[stat] += modifier; switch (stat) { case STAT_MAXHITPOINTS: { if (getHealth() > getMaxHealth()) { Creature::changeHealth(getMaxHealth() - getHealth()); } else { g_game.addCreatureHealth(this); } break; } case STAT_MAXMANAPOINTS: { if (getMana() > getMaxMana()) { changeMana(getMaxMana() - getMana()); } break; } default: { break; } } } int32_t Player::getDefaultStats(stats_t stat) const { switch (stat) { case STAT_MAXHITPOINTS: return healthMax; case STAT_MAXMANAPOINTS: return manaMax; case STAT_MAGICPOINTS: return getBaseMagicLevel(); default: return 0; } } void Player::addContainer(uint8_t cid, Container* container) { if (cid > 0xF) { return; } if (container->getID() == ITEM_BROWSEFIELD) { container->incrementReferenceCounter(); } auto it = openContainers.find(cid); if (it != openContainers.end()) { OpenContainer& openContainer = it->second; Container* oldContainer = openContainer.container; if (oldContainer->getID() == ITEM_BROWSEFIELD) { oldContainer->decrementReferenceCounter(); } openContainer.container = container; openContainer.index = 0; } else { OpenContainer openContainer; openContainer.container = container; openContainer.index = 0; openContainers[cid] = openContainer; } } void Player::closeContainer(uint8_t cid) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return; } OpenContainer openContainer = it->second; Container* container = openContainer.container; openContainers.erase(it); if (container && container->getID() == ITEM_BROWSEFIELD) { container->decrementReferenceCounter(); } } void Player::setContainerIndex(uint8_t cid, uint16_t index) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return; } it->second.index = index; } Container* Player::getContainerByID(uint8_t cid) { auto it = openContainers.find(cid); if (it == openContainers.end()) { return nullptr; } return it->second.container; } int8_t Player::getContainerID(const Container* container) const { for (const auto& it : openContainers) { if (it.second.container == container) { return it.first; } } return -1; } uint16_t Player::getContainerIndex(uint8_t cid) const { auto it = openContainers.find(cid); if (it == openContainers.end()) { return 0; } return it->second.index; } bool Player::canOpenCorpse(uint32_t ownerId) const { return getID() == ownerId || (party && party->canOpenCorpse(ownerId)); } uint16_t Player::getLookCorpse() const { if (sex == PLAYERSEX_FEMALE) { return ITEM_FEMALE_CORPSE; } else { return ITEM_MALE_CORPSE; } } void Player::addStorageValue(const uint32_t key, const int32_t value, const bool isLogin/* = false*/) { if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) { if (IS_IN_KEYRANGE(key, OUTFITS_RANGE)) { outfits.emplace_back( value >> 16, value & 0xFF ); return; } else if (IS_IN_KEYRANGE(key, MOUNTS_RANGE)) { // do nothing } else { std::cout << "Warning: unknown reserved key: " << key << " player: " << getName() << std::endl; return; } } if (value != -1) { int32_t oldValue; getStorageValue(key, oldValue); storageMap[key] = value; if (!isLogin) { auto currentFrameTime = g_dispatcher.getDispatcherCycle(); if (lastQuestlogUpdate != currentFrameTime && g_game.quests.isQuestStorage(key, value, oldValue)) { lastQuestlogUpdate = currentFrameTime; sendTextMessage(MESSAGE_EVENT_ADVANCE, "Your questlog has been updated."); } } } else { storageMap.erase(key); } } bool Player::getStorageValue(const uint32_t key, int32_t& value) const { auto it = storageMap.find(key); if (it == storageMap.end()) { value = -1; return false; } value = it->second; return true; } bool Player::canSee(const Position& pos) const { if (!client) { return false; } return client->canSee(pos); } bool Player::canSeeCreature(const Creature* creature) const { if (creature == this) { return true; } if (creature->isInGhostMode() && !group->access) { return false; } if (!creature->getPlayer() && !canSeeInvisibility() && creature->isInvisible()) { return false; } return true; } bool Player::canWalkthrough(const Creature* creature) const { if (group->access || creature->isInGhostMode()) { return true; } const Player* player = creature->getPlayer(); if (!player || !g_config.getBoolean(ConfigManager::ALLOW_WALKTHROUGH)) { return false; } const Tile* playerTile = player->getTile(); if (!playerTile || (!playerTile->hasFlag(TILESTATE_PROTECTIONZONE) && player->getLevel() > static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL)))) { return false; } const Item* playerTileGround = playerTile->getGround(); if (!playerTileGround || !playerTileGround->hasWalkStack()) { return false; } Player* thisPlayer = const_cast<Player*>(this); if ((OTSYS_TIME() - lastWalkthroughAttempt) > 2000) { thisPlayer->setLastWalkthroughAttempt(OTSYS_TIME()); return false; } if (creature->getPosition() != lastWalkthroughPosition) { thisPlayer->setLastWalkthroughPosition(creature->getPosition()); return false; } thisPlayer->setLastWalkthroughPosition(creature->getPosition()); return true; } bool Player::canWalkthroughEx(const Creature* creature) const { if (group->access) { return true; } const Player* player = creature->getPlayer(); if (!player || !g_config.getBoolean(ConfigManager::ALLOW_WALKTHROUGH)) { return false; } const Tile* playerTile = player->getTile(); return playerTile && (playerTile->hasFlag(TILESTATE_PROTECTIONZONE) || player->getLevel() <= static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL))); } void Player::onReceiveMail() const { if (isNearDepotBox()) { sendTextMessage(MESSAGE_EVENT_ADVANCE, "New mail has arrived."); } } bool Player::isNearDepotBox() const { const Position& pos = getPosition(); for (int32_t cx = -1; cx <= 1; ++cx) { for (int32_t cy = -1; cy <= 1; ++cy) { Tile* tile = g_game.map.getTile(pos.x + cx, pos.y + cy, pos.z); if (!tile) { continue; } if (tile->hasFlag(TILESTATE_DEPOT)) { return true; } } } return false; } DepotChest* Player::getDepotChest(uint32_t depotId, bool autoCreate) { auto it = depotChests.find(depotId); if (it != depotChests.end()) { return it->second; } if (!autoCreate) { return nullptr; } DepotChest* depotChest = new DepotChest(ITEM_DEPOT); depotChest->incrementReferenceCounter(); depotChest->setMaxDepotItems(getMaxDepotItems()); depotChests[depotId] = depotChest; return depotChest; } DepotLocker* Player::getDepotLocker(uint32_t depotId) { auto it = depotLockerMap.find(depotId); if (it != depotLockerMap.end()) { inbox->setParent(it->second); return it->second; } DepotLocker* depotLocker = new DepotLocker(ITEM_LOCKER1); depotLocker->setDepotId(depotId); depotLocker->internalAddThing(Item::CreateItem(ITEM_MARKET)); depotLocker->internalAddThing(inbox); depotLocker->internalAddThing(getDepotChest(depotId, true)); depotLockerMap[depotId] = depotLocker; return depotLocker; } void Player::sendCancelMessage(ReturnValue message) const { sendCancelMessage(getReturnMessage(message)); } void Player::sendStats() { if (client) { client->sendStats(); lastStatsTrainingTime = getOfflineTrainingTime() / 60 / 1000; } } void Player::sendPing() { int64_t timeNow = OTSYS_TIME(); bool hasLostConnection = false; if ((timeNow - lastPing) >= 5000) { lastPing = timeNow; if (client) { client->sendPing(); } else { hasLostConnection = true; } } int64_t noPongTime = timeNow - lastPong; if ((hasLostConnection || noPongTime >= 7000) && attackedCreature && attackedCreature->getPlayer()) { setAttackedCreature(nullptr); } if (noPongTime >= 60000 && canLogout()) { if (g_creatureEvents->playerLogout(this)) { if (client) { client->logout(true, true); } else { g_game.removeCreature(this, true); } } } } Item* Player::getWriteItem(uint32_t& windowTextId, uint16_t& maxWriteLen) { windowTextId = this->windowTextId; maxWriteLen = this->maxWriteLen; return writeItem; } void Player::setWriteItem(Item* item, uint16_t maxWriteLen /*= 0*/) { windowTextId++; if (writeItem) { writeItem->decrementReferenceCounter(); } if (item) { writeItem = item; this->maxWriteLen = maxWriteLen; writeItem->incrementReferenceCounter(); } else { writeItem = nullptr; this->maxWriteLen = 0; } } House* Player::getEditHouse(uint32_t& windowTextId, uint32_t& listId) { windowTextId = this->windowTextId; listId = this->editListId; return editHouse; } void Player::setEditHouse(House* house, uint32_t listId /*= 0*/) { windowTextId++; editHouse = house; editListId = listId; } void Player::sendHouseWindow(House* house, uint32_t listId) const { if (!client) { return; } std::string text; if (house->getAccessList(listId, text)) { client->sendHouseWindow(windowTextId, text); } } //container void Player::sendAddContainerItem(const Container* container, const Item* item) { if (!client) { return; } for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } uint16_t slot = openContainer.index; if (container->getID() == ITEM_BROWSEFIELD) { uint16_t containerSize = container->size() - 1; uint16_t pageEnd = openContainer.index + container->capacity() - 1; if (containerSize > pageEnd) { slot = pageEnd; item = container->getItemByIndex(pageEnd); } else { slot = containerSize; } } else if (openContainer.index >= container->capacity()) { item = container->getItemByIndex(openContainer.index); } if (item) { client->sendAddContainerItem(it.first, slot, item); } } } void Player::sendUpdateContainerItem(const Container* container, uint16_t slot, const Item* newItem) { if (!client) { return; } for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } if (slot < openContainer.index) { continue; } uint16_t pageEnd = openContainer.index + container->capacity(); if (slot >= pageEnd) { continue; } client->sendUpdateContainerItem(it.first, slot, newItem); } } void Player::sendRemoveContainerItem(const Container* container, uint16_t slot) { if (!client) { return; } for (auto& it : openContainers) { OpenContainer& openContainer = it.second; if (openContainer.container != container) { continue; } uint16_t& firstIndex = openContainer.index; if (firstIndex > 0 && firstIndex >= container->size() - 1) { firstIndex -= container->capacity(); sendContainer(it.first, container, false, firstIndex); } client->sendRemoveContainerItem(it.first, std::max<uint16_t>(slot, firstIndex), container->getItemByIndex(container->capacity() + firstIndex)); } } void Player::onUpdateTileItem(const Tile* tile, const Position& pos, const Item* oldItem, const ItemType& oldType, const Item* newItem, const ItemType& newType) { Creature::onUpdateTileItem(tile, pos, oldItem, oldType, newItem, newType); if (oldItem != newItem) { onRemoveTileItem(tile, pos, oldType, oldItem); } if (tradeState != TRADE_TRANSFER) { if (tradeItem && oldItem == tradeItem) { g_game.internalCloseTrade(this); } } } void Player::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item* item) { Creature::onRemoveTileItem(tile, pos, iType, item); if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { const Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::onCreatureAppear(Creature* creature, bool isLogin) { Creature::onCreatureAppear(creature, isLogin); if (isLogin && creature == this) { sendItems(); for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) { Item* item = inventory[slot]; if (item) { item->startDecaying(); g_moveEvents->onPlayerEquip(this, item, static_cast<slots_t>(slot), false); } } for (Condition* condition : storedConditionList) { addCondition(condition); } storedConditionList.clear(); BedItem* bed = g_game.getBedBySleeper(guid); if (bed) { bed->wakeUp(this); } Account account = IOLoginData::loadAccount(accountNumber); std::cout << name << " has logged in." << std::endl; if (guild) { guild->addMember(this); } int32_t offlineTime; if (getLastLogout() != 0) { // Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds). offlineTime = std::min<int32_t>(time(nullptr) - getLastLogout(), 86400 * 21); } else { offlineTime = 0; } for (Condition* condition : getMuteConditions()) { condition->setTicks(condition->getTicks() - (offlineTime * 1000)); if (condition->getTicks() <= 0) { removeCondition(condition); } } g_game.checkPlayersRecord(); IOLoginData::updateOnlineStatus(guid, true); } } void Player::onAttackedCreatureDisappear(bool isLogout) { sendCancelTarget(); if (!isLogout) { sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost."); } } void Player::onFollowCreatureDisappear(bool isLogout) { sendCancelTarget(); if (!isLogout) { sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost."); } } void Player::onChangeZone(ZoneType_t zone) { if (zone == ZONE_PROTECTION) { if (attackedCreature && !hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } if (!group->access && isMounted()) { dismount(); g_game.internalCreatureChangeOutfit(this, defaultOutfit); wasMounted = true; } } else { if (wasMounted) { toggleMount(true); wasMounted = false; } } g_game.updateCreatureWalkthrough(this); sendIcons(); } void Player::onAttackedCreatureChangeZone(ZoneType_t zone) { if (zone == ZONE_PROTECTION) { if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } else if (zone == ZONE_NOPVP) { if (attackedCreature->getPlayer()) { if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } } else if (zone == ZONE_NORMAL) { //attackedCreature can leave a pvp zone if not pzlocked if (g_game.getWorldType() == WORLD_TYPE_NO_PVP) { if (attackedCreature->getPlayer()) { setAttackedCreature(nullptr); onAttackedCreatureDisappear(false); } } } } void Player::onRemoveCreature(Creature* creature, bool isLogout) { Creature::onRemoveCreature(creature, isLogout); if (creature == this) { if (isLogout) { loginPosition = getPosition(); } lastLogout = time(nullptr); if (eventWalk != 0) { setFollowCreature(nullptr); } if (tradePartner) { g_game.internalCloseTrade(this); } closeShopWindow(); clearPartyInvitations(); if (party) { party->leaveParty(this); } g_chat->removeUserFromAllChannels(*this); std::cout << getName() << " has logged out." << std::endl; if (guild) { guild->removeMember(this); } IOLoginData::updateOnlineStatus(guid, false); bool saved = false; for (uint32_t tries = 0; tries < 3; ++tries) { if (IOLoginData::savePlayer(this)) { saved = true; break; } } if (!saved) { std::cout << "Error while saving player: " << getName() << std::endl; } } } void Player::openShopWindow(Npc* npc, const std::list<ShopInfo>& shop) { shopItemList = shop; sendShop(npc); sendSaleItemList(); } bool Player::closeShopWindow(bool sendCloseShopWindow /*= true*/) { //unreference callbacks int32_t onBuy; int32_t onSell; Npc* npc = getShopOwner(onBuy, onSell); if (!npc) { shopItemList.clear(); return false; } setShopOwner(nullptr, -1, -1); npc->onPlayerEndTrade(this, onBuy, onSell); if (sendCloseShopWindow) { sendCloseShop(); } shopItemList.clear(); return true; } void Player::onWalk(Direction& dir) { Creature::onWalk(dir); setNextActionTask(nullptr); setNextAction(OTSYS_TIME() + getStepDuration(dir)); } void Player::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos, const Tile* oldTile, const Position& oldPos, bool teleport) { Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport); if (hasFollowPath && (creature == followCreature || (creature == this && followCreature))) { isUpdatingPath = false; g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, &g_game, getID()))); } if (creature != this) { return; } if (tradeState != TRADE_TRANSFER) { //check if we should close trade if (tradeItem && !Position::areInRange<1, 1, 0>(tradeItem->getPosition(), getPosition())) { g_game.internalCloseTrade(this); } if (tradePartner && !Position::areInRange<2, 2, 0>(tradePartner->getPosition(), getPosition())) { g_game.internalCloseTrade(this); } } // close modal windows if (!modalWindows.empty()) { // TODO: This shouldn't be hard-coded for (uint32_t modalWindowId : modalWindows) { if (modalWindowId == std::numeric_limits<uint32_t>::max()) { sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted."); break; } } modalWindows.clear(); } // leave market if (inMarket) { inMarket = false; } if (party) { party->updateSharedExperience(); } if (teleport || oldPos.z != newPos.z) { int32_t ticks = g_config.getNumber(ConfigManager::STAIRHOP_DELAY); if (ticks > 0) { if (Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_PACIFIED, ticks, 0)) { addCondition(condition); } } } } //container void Player::onAddContainerItem(const Item* item) { checkTradeState(item); } void Player::onUpdateContainerItem(const Container* container, const Item* oldItem, const Item* newItem) { if (oldItem != newItem) { onRemoveContainerItem(container, oldItem); } if (tradeState != TRADE_TRANSFER) { checkTradeState(oldItem); } } void Player::onRemoveContainerItem(const Container* container, const Item* item) { if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { if (tradeItem->getParent() != container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::onCloseContainer(const Container* container) { if (!client) { return; } for (const auto& it : openContainers) { if (it.second.container == container) { client->sendCloseContainer(it.first); } } } void Player::onSendContainer(const Container* container) { if (!client) { return; } bool hasParent = container->hasParent(); for (const auto& it : openContainers) { const OpenContainer& openContainer = it.second; if (openContainer.container == container) { client->sendContainer(it.first, container, hasParent, openContainer.index); } } } //inventory void Player::onUpdateInventoryItem(Item* oldItem, Item* newItem) { if (oldItem != newItem) { onRemoveInventoryItem(oldItem); } if (tradeState != TRADE_TRANSFER) { checkTradeState(oldItem); } } void Player::onRemoveInventoryItem(Item* item) { if (tradeState != TRADE_TRANSFER) { checkTradeState(item); if (tradeItem) { const Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { g_game.internalCloseTrade(this); } } } } void Player::checkTradeState(const Item* item) { if (!tradeItem || tradeState == TRADE_TRANSFER) { return; } if (tradeItem == item) { g_game.internalCloseTrade(this); } else { const Container* container = dynamic_cast<const Container*>(item->getParent()); while (container) { if (container == tradeItem) { g_game.internalCloseTrade(this); break; } container = dynamic_cast<const Container*>(container->getParent()); } } } void Player::setNextWalkActionTask(SchedulerTask* task) { if (walkTaskEvent != 0) { g_scheduler.stopEvent(walkTaskEvent); walkTaskEvent = 0; } delete walkTask; walkTask = task; } void Player::setNextWalkTask(SchedulerTask* task) { if (nextStepEvent != 0) { g_scheduler.stopEvent(nextStepEvent); nextStepEvent = 0; } if (task) { nextStepEvent = g_scheduler.addEvent(task); resetIdleTime(); } } void Player::setNextActionTask(SchedulerTask* task, bool resetIdleTime /*= true */) { if (actionTaskEvent != 0) { g_scheduler.stopEvent(actionTaskEvent); actionTaskEvent = 0; } if (task) { actionTaskEvent = g_scheduler.addEvent(task); if (resetIdleTime) { this->resetIdleTime(); } } } uint32_t Player::getNextActionTime() const { return std::max<int64_t>(SCHEDULER_MINTICKS, nextAction - OTSYS_TIME()); } void Player::onThink(uint32_t interval) { Creature::onThink(interval); sendPing(); MessageBufferTicks += interval; if (MessageBufferTicks >= 1500) { MessageBufferTicks = 0; addMessageBuffer(); } if (!getTile()->hasFlag(TILESTATE_NOLOGOUT) && !isAccessPlayer()) { idleTime += interval; const int32_t kickAfterMinutes = g_config.getNumber(ConfigManager::KICK_AFTER_MINUTES); if (idleTime > (kickAfterMinutes * 60000) + 60000) { kickPlayer(true); } else if (client && idleTime == 60000 * kickAfterMinutes) { std::ostringstream ss; ss << "There was no variation in your behaviour for " << kickAfterMinutes << " minutes. You will be disconnected in one minute if there is no change in your actions until then."; client->sendTextMessage(TextMessage(MESSAGE_STATUS_WARNING, ss.str())); } } if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) { checkSkullTicks(interval / 1000); } addOfflineTrainingTime(interval); if (lastStatsTrainingTime != getOfflineTrainingTime() / 60 / 1000) { sendStats(); } } uint32_t Player::isMuted() const { if (hasFlag(PlayerFlag_CannotBeMuted)) { return 0; } int32_t muteTicks = 0; for (Condition* condition : conditions) { if (condition->getType() == CONDITION_MUTED && condition->getTicks() > muteTicks) { muteTicks = condition->getTicks(); } } return static_cast<uint32_t>(muteTicks) / 1000; } void Player::addMessageBuffer() { if (MessageBufferCount > 0 && g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER) != 0 && !hasFlag(PlayerFlag_CannotBeMuted)) { --MessageBufferCount; } } void Player::removeMessageBuffer() { if (hasFlag(PlayerFlag_CannotBeMuted)) { return; } const int32_t maxMessageBuffer = g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER); if (maxMessageBuffer != 0 && MessageBufferCount <= maxMessageBuffer + 1) { if (++MessageBufferCount > maxMessageBuffer) { uint32_t muteCount = 1; auto it = muteCountMap.find(guid); if (it != muteCountMap.end()) { muteCount = it->second; } uint32_t muteTime = 5 * muteCount * muteCount; muteCountMap[guid] = muteCount + 1; Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_MUTED, muteTime * 1000, 0); addCondition(condition); std::ostringstream ss; ss << "You are muted for " << muteTime << " seconds."; sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); } } } void Player::drainHealth(Creature* attacker, int32_t damage) { Creature::drainHealth(attacker, damage); sendStats(); } void Player::drainMana(Creature* attacker, int32_t manaLoss) { onAttacked(); changeMana(-manaLoss); if (attacker) { addDamagePoints(attacker, manaLoss); } sendStats(); } void Player::addManaSpent(uint64_t amount) { if (hasFlag(PlayerFlag_NotGainMana)) { return; } uint64_t currReqMana = vocation->getReqMana(magLevel); uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { //player has reached max magic level return; } g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, amount); if (amount == 0) { return; } bool sendUpdateStats = false; while ((manaSpent + amount) >= nextReqMana) { amount -= nextReqMana - manaSpent; magLevel++; manaSpent = 0; std::ostringstream ss; ss << "You advanced to magic level " << magLevel << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel); sendUpdateStats = true; currReqMana = nextReqMana; nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { return; } } manaSpent += amount; uint8_t oldPercent = magLevelPercent; if (nextReqMana > currReqMana) { magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana); } else { magLevelPercent = 0; } if (oldPercent != magLevelPercent) { sendUpdateStats = true; } if (sendUpdateStats) { sendStats(); } } void Player::addExperience(Creature* source, uint64_t exp, bool sendText/* = false*/) { uint64_t currLevelExp = Player::getExpForLevel(level); uint64_t nextLevelExp = Player::getExpForLevel(level + 1); uint64_t rawExp = exp; if (currLevelExp >= nextLevelExp) { //player has reached max level levelPercent = 0; sendStats(); return; } g_events->eventPlayerOnGainExperience(this, source, exp, rawExp); if (exp == 0) { return; } experience += exp; if (sendText) { std::string expString = std::to_string(exp) + (exp != 1 ? " experience points." : " experience point."); TextMessage message(MESSAGE_EXPERIENCE, "You gained " + expString); message.position = position; message.primary.value = exp; message.primary.color = TEXTCOLOR_WHITE_EXP; sendTextMessage(message); SpectatorVec spectators; g_game.map.getSpectators(spectators, position, false, true); spectators.erase(this); if (!spectators.empty()) { message.type = MESSAGE_EXPERIENCE_OTHERS; message.text = getName() + " gained " + expString; for (Creature* spectator : spectators) { spectator->getPlayer()->sendTextMessage(message); } } } uint32_t prevLevel = level; while (experience >= nextLevelExp) { ++level; healthMax += vocation->getHPGain(); health += vocation->getHPGain(); manaMax += vocation->getManaGain(); mana += vocation->getManaGain(); capacity += vocation->getCapGain(); currLevelExp = nextLevelExp; nextLevelExp = Player::getExpForLevel(level + 1); if (currLevelExp >= nextLevelExp) { //player has reached max level break; } } if (prevLevel != level) { health = getMaxHealth(); mana = getMaxMana(); updateBaseSpeed(); setBaseSpeed(getBaseSpeed()); g_game.changeSpeed(this, 0); g_game.addCreatureHealth(this); const uint32_t protectionLevel = static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL)); if (prevLevel < protectionLevel && level >= protectionLevel) { g_game.updateCreatureWalkthrough(this); } if (party) { party->updateSharedExperience(); } g_creatureEvents->playerAdvance(this, SKILL_LEVEL, prevLevel, level); std::ostringstream ss; ss << "You advanced from Level " << prevLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } sendStats(); } void Player::removeExperience(uint64_t exp, bool sendText/* = false*/) { if (experience == 0 || exp == 0) { return; } g_events->eventPlayerOnLoseExperience(this, exp); if (exp == 0) { return; } uint64_t lostExp = experience; experience = std::max<int64_t>(0, experience - exp); if (sendText) { lostExp -= experience; std::string expString = std::to_string(lostExp) + (lostExp != 1 ? " experience points." : " experience point."); TextMessage message(MESSAGE_EXPERIENCE, "You lost " + expString); message.position = position; message.primary.value = lostExp; message.primary.color = TEXTCOLOR_RED; sendTextMessage(message); SpectatorVec spectators; g_game.map.getSpectators(spectators, position, false, true); spectators.erase(this); if (!spectators.empty()) { message.type = MESSAGE_EXPERIENCE_OTHERS; message.text = getName() + " lost " + expString; for (Creature* spectator : spectators) { spectator->getPlayer()->sendTextMessage(message); } } } uint32_t oldLevel = level; uint64_t currLevelExp = Player::getExpForLevel(level); while (level > 1 && experience < currLevelExp) { --level; healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain()); manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain()); capacity = std::max<int32_t>(0, capacity - vocation->getCapGain()); currLevelExp = Player::getExpForLevel(level); } if (oldLevel != level) { health = getMaxHealth(); mana = getMaxMana(); updateBaseSpeed(); setBaseSpeed(getBaseSpeed()); g_game.changeSpeed(this, 0); g_game.addCreatureHealth(this); const uint32_t protectionLevel = static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL)); if (oldLevel >= protectionLevel && level < protectionLevel) { g_game.updateCreatureWalkthrough(this); } if (party) { party->updateSharedExperience(); } std::ostringstream ss; ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint64_t nextLevelExp = Player::getExpForLevel(level + 1); if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } sendStats(); } uint8_t Player::getPercentLevel(uint64_t count, uint64_t nextLevelCount) { if (nextLevelCount == 0) { return 0; } uint8_t result = (count * 100) / nextLevelCount; if (result > 100) { return 0; } return result; } void Player::onBlockHit() { if (shieldBlockCount > 0) { --shieldBlockCount; if (hasShield()) { addSkillAdvance(SKILL_SHIELD, 1); } } } void Player::onAttackedCreatureBlockHit(BlockType_t blockType) { lastAttackBlockType = blockType; switch (blockType) { case BLOCK_NONE: { addAttackSkillPoint = true; bloodHitCount = 30; shieldBlockCount = 30; break; } case BLOCK_DEFENSE: case BLOCK_ARMOR: { //need to draw blood every 30 hits if (bloodHitCount > 0) { addAttackSkillPoint = true; --bloodHitCount; } else { addAttackSkillPoint = false; } break; } default: { addAttackSkillPoint = false; break; } } } bool Player::hasShield() const { Item* item = inventory[CONST_SLOT_LEFT]; if (item && item->getWeaponType() == WEAPON_SHIELD) { return true; } item = inventory[CONST_SLOT_RIGHT]; if (item && item->getWeaponType() == WEAPON_SHIELD) { return true; } return false; } BlockType_t Player::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage, bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool field /* = false*/, bool ignoreResistances /* = false*/) { BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor, field, ignoreResistances); if (attacker) { sendCreatureSquare(attacker, SQ_COLOR_BLACK); } if (blockType != BLOCK_NONE) { return blockType; } if (damage <= 0) { damage = 0; return BLOCK_ARMOR; } if (!ignoreResistances) { for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_AMMO; ++slot) { if (!isItemAbilityEnabled(static_cast<slots_t>(slot))) { continue; } Item* item = inventory[slot]; if (!item) { continue; } const ItemType& it = Item::items[item->getID()]; if (!it.abilities) { if (damage <= 0) { damage = 0; return BLOCK_ARMOR; } continue; } const int16_t& absorbPercent = it.abilities->absorbPercent[combatTypeToIndex(combatType)]; if (absorbPercent != 0) { damage -= std::round(damage * (absorbPercent / 100.)); uint16_t charges = item->getCharges(); if (charges != 0) { g_game.transformItem(item, item->getID(), charges - 1); } } if (field) { const int16_t& fieldAbsorbPercent = it.abilities->fieldAbsorbPercent[combatTypeToIndex(combatType)]; if (fieldAbsorbPercent != 0) { damage -= std::round(damage * (fieldAbsorbPercent / 100.)); uint16_t charges = item->getCharges(); if (charges != 0) { g_game.transformItem(item, item->getID(), charges - 1); } } } } } if (damage <= 0) { damage = 0; blockType = BLOCK_ARMOR; } return blockType; } uint32_t Player::getIP() const { if (client) { return client->getIP(); } return 0; } void Player::death(Creature* lastHitCreature) { loginPosition = town->getTemplePosition(); if (skillLoss) { uint8_t unfairFightReduction = 100; bool lastHitPlayer = Player::lastHitIsPlayer(lastHitCreature); if (lastHitPlayer) { uint32_t sumLevels = 0; uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED); for (const auto& it : damageMap) { CountBlock_t cb = it.second; if ((OTSYS_TIME() - cb.ticks) <= inFightTicks) { Player* damageDealer = g_game.getPlayerByID(it.first); if (damageDealer) { sumLevels += damageDealer->getLevel(); } } } if (sumLevels > level) { double reduce = level / static_cast<double>(sumLevels); unfairFightReduction = std::max<uint8_t>(20, std::floor((reduce * 100) + 0.5)); } } //Magic level loss uint64_t sumMana = 0; uint64_t lostMana = 0; //sum up all the mana for (uint32_t i = 1; i <= magLevel; ++i) { sumMana += vocation->getReqMana(i); } sumMana += manaSpent; double deathLossPercent = getLostPercent() * (unfairFightReduction / 100.); lostMana = static_cast<uint64_t>(sumMana * deathLossPercent); while (lostMana > manaSpent && magLevel > 0) { lostMana -= manaSpent; manaSpent = vocation->getReqMana(magLevel); magLevel--; } manaSpent -= lostMana; uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (nextReqMana > vocation->getReqMana(magLevel)) { magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana); } else { magLevelPercent = 0; } //Skill loss for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) { //for each skill uint64_t sumSkillTries = 0; for (uint16_t c = 11; c <= skills[i].level; ++c) { //sum up all required tries for all skill levels sumSkillTries += vocation->getReqSkillTries(i, c); } sumSkillTries += skills[i].tries; uint32_t lostSkillTries = static_cast<uint32_t>(sumSkillTries * deathLossPercent); while (lostSkillTries > skills[i].tries) { lostSkillTries -= skills[i].tries; if (skills[i].level <= 10) { skills[i].level = 10; skills[i].tries = 0; lostSkillTries = 0; break; } skills[i].tries = vocation->getReqSkillTries(i, skills[i].level); skills[i].level--; } skills[i].tries = std::max<int32_t>(0, skills[i].tries - lostSkillTries); skills[i].percent = Player::getPercentLevel(skills[i].tries, vocation->getReqSkillTries(i, skills[i].level)); } //Level loss uint64_t expLoss = static_cast<uint64_t>(experience * deathLossPercent); g_events->eventPlayerOnLoseExperience(this, expLoss); if (expLoss != 0) { uint32_t oldLevel = level; if (vocation->getId() == VOCATION_NONE || level > 7) { experience -= expLoss; } while (level > 1 && experience < Player::getExpForLevel(level)) { --level; healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain()); manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain()); capacity = std::max<int32_t>(0, capacity - vocation->getCapGain()); } if (oldLevel != level) { std::ostringstream ss; ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint64_t currLevelExp = Player::getExpForLevel(level); uint64_t nextLevelExp = Player::getExpForLevel(level + 1); if (nextLevelExp > currLevelExp) { levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp); } else { levelPercent = 0; } } std::bitset<6> bitset(blessings); if (bitset[5]) { if (lastHitPlayer) { bitset.reset(5); blessings = bitset.to_ulong(); } else { blessings = 32; } } else { blessings = 0; } sendStats(); sendSkills(); sendReLoginWindow(unfairFightReduction); if (getSkull() == SKULL_BLACK) { health = 40; mana = 0; } else { health = healthMax; mana = manaMax; } auto it = conditions.begin(), end = conditions.end(); while (it != end) { Condition* condition = *it; if (condition->isPersistent()) { it = conditions.erase(it); condition->endCondition(this); onEndCondition(condition->getType()); delete condition; } else { ++it; } } } else { setSkillLoss(true); auto it = conditions.begin(), end = conditions.end(); while (it != end) { Condition* condition = *it; if (condition->isPersistent()) { it = conditions.erase(it); condition->endCondition(this); onEndCondition(condition->getType()); delete condition; } else { ++it; } } health = healthMax; g_game.internalTeleport(this, getTemplePosition(), true); g_game.addCreatureHealth(this); onThink(EVENT_CREATURE_THINK_INTERVAL); onIdleStatus(); sendStats(); } } bool Player::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified) { if (getZone() != ZONE_PVP || !Player::lastHitIsPlayer(lastHitCreature)) { return Creature::dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified); } setDropLoot(true); return false; } Item* Player::getCorpse(Creature* lastHitCreature, Creature* mostDamageCreature) { Item* corpse = Creature::getCorpse(lastHitCreature, mostDamageCreature); if (corpse && corpse->getContainer()) { std::ostringstream ss; if (lastHitCreature) { ss << "You recognize " << getNameDescription() << ". " << (getSex() == PLAYERSEX_FEMALE ? "She" : "He") << " was killed by " << lastHitCreature->getNameDescription() << '.'; } else { ss << "You recognize " << getNameDescription() << '.'; } corpse->setSpecialDescription(ss.str()); } return corpse; } void Player::addInFightTicks(bool pzlock /*= false*/) { if (hasFlag(PlayerFlag_NotGainInFight)) { return; } if (pzlock) { pzLocked = true; } Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::PZ_LOCKED), 0); addCondition(condition); } void Player::removeList() { g_game.removePlayer(this); for (const auto& it : g_game.getPlayers()) { it.second->notifyStatusChange(this, VIPSTATUS_OFFLINE); } } void Player::addList() { for (const auto& it : g_game.getPlayers()) { it.second->notifyStatusChange(this, VIPSTATUS_ONLINE); } g_game.addPlayer(this); } void Player::kickPlayer(bool displayEffect) { g_creatureEvents->playerLogout(this); if (client) { client->logout(displayEffect, true); } else { g_game.removeCreature(this); } } void Player::notifyStatusChange(Player* loginPlayer, VipStatus_t status) { if (!client) { return; } auto it = VIPList.find(loginPlayer->guid); if (it == VIPList.end()) { return; } client->sendUpdatedVIPStatus(loginPlayer->guid, status); if (status == VIPSTATUS_ONLINE) { client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged in.")); } else if (status == VIPSTATUS_OFFLINE) { client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged out.")); } } bool Player::removeVIP(uint32_t vipGuid) { if (VIPList.erase(vipGuid) == 0) { return false; } IOLoginData::removeVIPEntry(accountNumber, vipGuid); return true; } bool Player::addVIP(uint32_t vipGuid, const std::string& vipName, VipStatus_t status) { if (VIPList.size() >= getMaxVIPEntries()) { sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add more buddies."); return false; } auto result = VIPList.insert(vipGuid); if (!result.second) { sendTextMessage(MESSAGE_STATUS_SMALL, "This player is already in your list."); return false; } IOLoginData::addVIPEntry(accountNumber, vipGuid, "", 0, false); if (client) { client->sendVIP(vipGuid, vipName, "", 0, false, status); } return true; } bool Player::addVIPInternal(uint32_t vipGuid) { if (VIPList.size() >= getMaxVIPEntries()) { return false; } return VIPList.insert(vipGuid).second; } bool Player::editVIP(uint32_t vipGuid, const std::string& description, uint32_t icon, bool notify) { auto it = VIPList.find(vipGuid); if (it == VIPList.end()) { return false; // player is not in VIP } IOLoginData::editVIPEntry(accountNumber, vipGuid, description, icon, notify); return true; } //close container and its child containers void Player::autoCloseContainers(const Container* container) { std::vector<uint32_t> closeList; for (const auto& it : openContainers) { Container* tmpContainer = it.second.container; while (tmpContainer) { if (tmpContainer->isRemoved() || tmpContainer == container) { closeList.push_back(it.first); break; } tmpContainer = dynamic_cast<Container*>(tmpContainer->getParent()); } } for (uint32_t containerId : closeList) { closeContainer(containerId); if (client) { client->sendCloseContainer(containerId); } } } bool Player::hasCapacity(const Item* item, uint32_t count) const { if (hasFlag(PlayerFlag_CannotPickupItem)) { return false; } if (hasFlag(PlayerFlag_HasInfiniteCapacity) || item->getTopParent() == this) { return true; } uint32_t itemWeight = item->getContainer() != nullptr ? item->getWeight() : item->getBaseWeight(); if (item->isStackable()) { itemWeight *= count; } return itemWeight <= getFreeCapacity(); } ReturnValue Player::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature*) const { const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags); if (childIsOwner) { //a child container is querying the player, just check if enough capacity bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags); if (skipLimit || hasCapacity(item, count)) { return RETURNVALUE_NOERROR; } return RETURNVALUE_NOTENOUGHCAPACITY; } if (!item->isPickupable()) { return RETURNVALUE_CANNOTPICKUP; } if (item->isStoreItem()) { return RETURNVALUE_ITEMCANNOTBEMOVEDTHERE; } ReturnValue ret = RETURNVALUE_NOERROR; const int32_t& slotPosition = item->getSlotPosition(); if ((slotPosition & SLOTP_HEAD) || (slotPosition & SLOTP_NECKLACE) || (slotPosition & SLOTP_BACKPACK) || (slotPosition & SLOTP_ARMOR) || (slotPosition & SLOTP_LEGS) || (slotPosition & SLOTP_FEET) || (slotPosition & SLOTP_RING)) { ret = RETURNVALUE_CANNOTBEDRESSED; } else if (slotPosition & SLOTP_TWO_HAND) { ret = RETURNVALUE_PUTTHISOBJECTINBOTHHANDS; } else if ((slotPosition & SLOTP_RIGHT) || (slotPosition & SLOTP_LEFT)) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { ret = RETURNVALUE_CANNOTBEDRESSED; } else { ret = RETURNVALUE_PUTTHISOBJECTINYOURHAND; } } switch (index) { case CONST_SLOT_HEAD: { if (slotPosition & SLOTP_HEAD) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_NECKLACE: { if (slotPosition & SLOTP_NECKLACE) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_BACKPACK: { if (slotPosition & SLOTP_BACKPACK) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_ARMOR: { if (slotPosition & SLOTP_ARMOR) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_RIGHT: { if (slotPosition & SLOTP_RIGHT) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { if (item->getWeaponType() != WEAPON_SHIELD) { ret = RETURNVALUE_CANNOTBEDRESSED; } else { const Item* leftItem = inventory[CONST_SLOT_LEFT]; if (leftItem) { if ((leftItem->getSlotPosition() | slotPosition) & SLOTP_TWO_HAND) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else { ret = RETURNVALUE_NOERROR; } } } else if (slotPosition & SLOTP_TWO_HAND) { if (inventory[CONST_SLOT_LEFT] && inventory[CONST_SLOT_LEFT] != item) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (inventory[CONST_SLOT_LEFT]) { const Item* leftItem = inventory[CONST_SLOT_LEFT]; WeaponType_t type = item->getWeaponType(), leftType = leftItem->getWeaponType(); if (leftItem->getSlotPosition() & SLOTP_TWO_HAND) { ret = RETURNVALUE_DROPTWOHANDEDITEM; } else if (item == leftItem && count == item->getItemCount()) { ret = RETURNVALUE_NOERROR; } else if (leftType == WEAPON_SHIELD && type == WEAPON_SHIELD) { ret = RETURNVALUE_CANONLYUSEONESHIELD; } else if (leftType == WEAPON_NONE || type == WEAPON_NONE || leftType == WEAPON_SHIELD || leftType == WEAPON_AMMO || type == WEAPON_SHIELD || type == WEAPON_AMMO) { ret = RETURNVALUE_NOERROR; } else { ret = RETURNVALUE_CANONLYUSEONEWEAPON; } } else { ret = RETURNVALUE_NOERROR; } } break; } case CONST_SLOT_LEFT: { if (slotPosition & SLOTP_LEFT) { if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { WeaponType_t type = item->getWeaponType(); if (type == WEAPON_NONE || type == WEAPON_SHIELD) { ret = RETURNVALUE_CANNOTBEDRESSED; } else if (inventory[CONST_SLOT_RIGHT] && (slotPosition & SLOTP_TWO_HAND)) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (slotPosition & SLOTP_TWO_HAND) { if (inventory[CONST_SLOT_RIGHT] && inventory[CONST_SLOT_RIGHT] != item) { ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE; } else { ret = RETURNVALUE_NOERROR; } } else if (inventory[CONST_SLOT_RIGHT]) { const Item* rightItem = inventory[CONST_SLOT_RIGHT]; WeaponType_t type = item->getWeaponType(), rightType = rightItem->getWeaponType(); if (rightItem->getSlotPosition() & SLOTP_TWO_HAND) { ret = RETURNVALUE_DROPTWOHANDEDITEM; } else if (item == rightItem && count == item->getItemCount()) { ret = RETURNVALUE_NOERROR; } else if (rightType == WEAPON_SHIELD && type == WEAPON_SHIELD) { ret = RETURNVALUE_CANONLYUSEONESHIELD; } else if (rightType == WEAPON_NONE || type == WEAPON_NONE || rightType == WEAPON_SHIELD || rightType == WEAPON_AMMO || type == WEAPON_SHIELD || type == WEAPON_AMMO) { ret = RETURNVALUE_NOERROR; } else { ret = RETURNVALUE_CANONLYUSEONEWEAPON; } } else { ret = RETURNVALUE_NOERROR; } } break; } case CONST_SLOT_LEGS: { if (slotPosition & SLOTP_LEGS) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_FEET: { if (slotPosition & SLOTP_FEET) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_RING: { if (slotPosition & SLOTP_RING) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_AMMO: { if ((slotPosition & SLOTP_AMMO) || g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) { ret = RETURNVALUE_NOERROR; } break; } case CONST_SLOT_WHEREEVER: case -1: ret = RETURNVALUE_NOTENOUGHROOM; break; default: ret = RETURNVALUE_NOTPOSSIBLE; break; } if (ret != RETURNVALUE_NOERROR && ret != RETURNVALUE_NOTENOUGHROOM) { return ret; } //check if enough capacity if (!hasCapacity(item, count)) { return RETURNVALUE_NOTENOUGHCAPACITY; } ret = g_moveEvents->onPlayerEquip(const_cast<Player*>(this), const_cast<Item*>(item), static_cast<slots_t>(index), true); if (ret != RETURNVALUE_NOERROR) { return ret; } //need an exchange with source? (destination item is swapped with currently moved item) const Item* inventoryItem = getInventoryItem(static_cast<slots_t>(index)); if (inventoryItem && (!inventoryItem->isStackable() || inventoryItem->getID() != item->getID())) { const Cylinder* cylinder = item->getTopParent(); if (cylinder && (dynamic_cast<const DepotChest*>(cylinder) || dynamic_cast<const Player*>(cylinder))) { return RETURNVALUE_NEEDEXCHANGE; } return RETURNVALUE_NOTENOUGHROOM; } return ret; } ReturnValue Player::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount, uint32_t flags) const { const Item* item = thing.getItem(); if (item == nullptr) { maxQueryCount = 0; return RETURNVALUE_NOTPOSSIBLE; } if (index == INDEX_WHEREEVER) { uint32_t n = 0; for (int32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) { Item* inventoryItem = inventory[slotIndex]; if (inventoryItem) { if (Container* subContainer = inventoryItem->getContainer()) { uint32_t queryCount = 0; subContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags); n += queryCount; //iterate through all items, including sub-containers (deep search) for (ContainerIterator it = subContainer->iterator(); it.hasNext(); it.advance()) { if (Container* tmpContainer = (*it)->getContainer()) { queryCount = 0; tmpContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags); n += queryCount; } } } else if (inventoryItem->isStackable() && item->equals(inventoryItem) && inventoryItem->getItemCount() < 100) { uint32_t remainder = (100 - inventoryItem->getItemCount()); if (queryAdd(slotIndex, *item, remainder, flags) == RETURNVALUE_NOERROR) { n += remainder; } } } else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot if (item->isStackable()) { n += 100; } else { ++n; } } } maxQueryCount = n; } else { const Item* destItem = nullptr; const Thing* destThing = getThing(index); if (destThing) { destItem = destThing->getItem(); } if (destItem) { if (destItem->isStackable() && item->equals(destItem) && destItem->getItemCount() < 100) { maxQueryCount = 100 - destItem->getItemCount(); } else { maxQueryCount = 0; } } else if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) { //empty slot if (item->isStackable()) { maxQueryCount = 100; } else { maxQueryCount = 1; } return RETURNVALUE_NOERROR; } } if (maxQueryCount < count) { return RETURNVALUE_NOTENOUGHROOM; } else { return RETURNVALUE_NOERROR; } } ReturnValue Player::queryRemove(const Thing& thing, uint32_t count, uint32_t flags, Creature* /*= nullptr*/) const { int32_t index = getThingIndex(&thing); if (index == -1) { return RETURNVALUE_NOTPOSSIBLE; } const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } if (count == 0 || (item->isStackable() && count > item->getItemCount())) { return RETURNVALUE_NOTPOSSIBLE; } if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) { return RETURNVALUE_NOTMOVEABLE; } return RETURNVALUE_NOERROR; } Cylinder* Player::queryDestination(int32_t& index, const Thing& thing, Item** destItem, uint32_t& flags) { if (index == 0 /*drop to capacity window*/ || index == INDEX_WHEREEVER) { *destItem = nullptr; const Item* item = thing.getItem(); if (item == nullptr) { return this; } bool autoStack = !((flags & FLAG_IGNOREAUTOSTACK) == FLAG_IGNOREAUTOSTACK); bool isStackable = item->isStackable(); std::vector<Container*> containers; for (uint32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) { Item* inventoryItem = inventory[slotIndex]; if (inventoryItem) { if (inventoryItem == tradeItem) { continue; } if (inventoryItem == item) { continue; } if (autoStack && isStackable) { //try find an already existing item to stack with if (queryAdd(slotIndex, *item, item->getItemCount(), 0) == RETURNVALUE_NOERROR) { if (inventoryItem->equals(item) && inventoryItem->getItemCount() < 100) { index = slotIndex; *destItem = inventoryItem; return this; } } if (Container* subContainer = inventoryItem->getContainer()) { containers.push_back(subContainer); } } else if (Container* subContainer = inventoryItem->getContainer()) { containers.push_back(subContainer); } } else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot index = slotIndex; *destItem = nullptr; return this; } } size_t i = 0; while (i < containers.size()) { Container* tmpContainer = containers[i++]; if (!autoStack || !isStackable) { //we need to find first empty container as fast as we can for non-stackable items uint32_t n = tmpContainer->capacity() - tmpContainer->size(); while (n) { if (tmpContainer->queryAdd(tmpContainer->capacity() - n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { index = tmpContainer->capacity() - n; *destItem = nullptr; return tmpContainer; } n--; } for (Item* tmpContainerItem : tmpContainer->getItemList()) { if (Container* subContainer = tmpContainerItem->getContainer()) { containers.push_back(subContainer); } } continue; } uint32_t n = 0; for (Item* tmpItem : tmpContainer->getItemList()) { if (tmpItem == tradeItem) { continue; } if (tmpItem == item) { continue; } //try find an already existing item to stack with if (tmpItem->equals(item) && tmpItem->getItemCount() < 100) { index = n; *destItem = tmpItem; return tmpContainer; } if (Container* subContainer = tmpItem->getContainer()) { containers.push_back(subContainer); } n++; } if (n < tmpContainer->capacity() && tmpContainer->queryAdd(n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { index = n; *destItem = nullptr; return tmpContainer; } } return this; } Thing* destThing = getThing(index); if (destThing) { *destItem = destThing->getItem(); } Cylinder* subCylinder = dynamic_cast<Cylinder*>(destThing); if (subCylinder) { index = INDEX_WHEREEVER; *destItem = nullptr; return subCylinder; } else { return this; } } void Player::addThing(int32_t index, Thing* thing) { if (index < CONST_SLOT_FIRST || index > CONST_SLOT_LAST) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setParent(this); inventory[index] = item; //send to client sendInventoryItem(static_cast<slots_t>(index), item); } void Player::updateThing(Thing* thing, uint16_t itemId, uint32_t count) { int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setID(itemId); item->setSubType(count); //send to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(item, item); } void Player::replaceThing(uint32_t index, Thing* thing) { if (index > CONST_SLOT_LAST) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* oldItem = getInventoryItem(static_cast<slots_t>(index)); if (!oldItem) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } //send to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(oldItem, item); item->setParent(this); inventory[index] = item; } void Player::removeThing(Thing* thing, uint32_t count) { Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } if (item->isStackable()) { if (count == item->getItemCount()) { //send change to client sendInventoryItem(static_cast<slots_t>(index), nullptr); //event methods onRemoveInventoryItem(item); item->setParent(nullptr); inventory[index] = nullptr; } else { uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count)); item->setItemCount(newCount); //send change to client sendInventoryItem(static_cast<slots_t>(index), item); //event methods onUpdateInventoryItem(item, item); } } else { //send change to client sendInventoryItem(static_cast<slots_t>(index), nullptr); //event methods onRemoveInventoryItem(item); item->setParent(nullptr); inventory[index] = nullptr; } } int32_t Player::getThingIndex(const Thing* thing) const { for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { if (inventory[i] == thing) { return i; } } return -1; } size_t Player::getFirstIndex() const { return CONST_SLOT_FIRST; } size_t Player::getLastIndex() const { return CONST_SLOT_LAST + 1; } uint32_t Player::getItemTypeCount(uint16_t itemId, int32_t subType /*= -1*/) const { uint32_t count = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } if (item->getID() == itemId) { count += Item::countByType(item, subType); } if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if ((*it)->getID() == itemId) { count += Item::countByType(*it, subType); } } } } return count; } bool Player::removeItemOfType(uint16_t itemId, uint32_t amount, int32_t subType, bool ignoreEquipped/* = false*/) const { if (amount == 0) { return true; } std::vector<Item*> itemList; uint32_t count = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } if (!ignoreEquipped && item->getID() == itemId) { uint32_t itemCount = Item::countByType(item, subType); if (itemCount == 0) { continue; } itemList.push_back(item); count += itemCount; if (count >= amount) { g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable); return true; } } else if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { Item* containerItem = *it; if (containerItem->getID() == itemId) { uint32_t itemCount = Item::countByType(containerItem, subType); if (itemCount == 0) { continue; } itemList.push_back(containerItem); count += itemCount; if (count >= amount) { g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable); return true; } } } } } return false; } std::map<uint32_t, uint32_t>& Player::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const { for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) { Item* item = inventory[i]; if (!item) { continue; } countMap[item->getID()] += Item::countByType(item, -1); if (Container* container = item->getContainer()) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { countMap[(*it)->getID()] += Item::countByType(*it, -1); } } } return countMap; } Thing* Player::getThing(size_t index) const { if (index >= CONST_SLOT_FIRST && index <= CONST_SLOT_LAST) { return inventory[index]; } return nullptr; } void Player::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/) { if (link == LINK_OWNER) { //calling movement scripts g_moveEvents->onPlayerEquip(this, thing->getItem(), static_cast<slots_t>(index), false); } bool requireListUpdate = false; if (link == LINK_OWNER || link == LINK_TOPPARENT) { const Item* i = (oldParent ? oldParent->getItem() : nullptr); // Check if we owned the old container too, so we don't need to do anything, // as the list was updated in postRemoveNotification assert(i ? i->getContainer() != nullptr : true); if (i) { requireListUpdate = i->getContainer()->getHoldingPlayer() != this; } else { requireListUpdate = oldParent != this; } updateInventoryWeight(); updateItemsLight(); sendStats(); } if (const Item* item = thing->getItem()) { if (const Container* container = item->getContainer()) { onSendContainer(container); } if (shopOwner && requireListUpdate) { updateSaleShopList(item); } } else if (const Creature* creature = thing->getCreature()) { if (creature == this) { //check containers std::vector<Container*> containers; for (const auto& it : openContainers) { Container* container = it.second.container; if (!Position::areInRange<1, 1, 0>(container->getPosition(), getPosition())) { containers.push_back(container); } } for (const Container* container : containers) { autoCloseContainers(container); } } } } void Player::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/) { if (link == LINK_OWNER) { //calling movement scripts g_moveEvents->onPlayerDeEquip(this, thing->getItem(), static_cast<slots_t>(index)); } bool requireListUpdate = false; if (link == LINK_OWNER || link == LINK_TOPPARENT) { const Item* i = (newParent ? newParent->getItem() : nullptr); // Check if we owned the old container too, so we don't need to do anything, // as the list was updated in postRemoveNotification assert(i ? i->getContainer() != nullptr : true); if (i) { requireListUpdate = i->getContainer()->getHoldingPlayer() != this; } else { requireListUpdate = newParent != this; } updateInventoryWeight(); updateItemsLight(); sendStats(); } if (const Item* item = thing->getItem()) { if (const Container* container = item->getContainer()) { if (container->isRemoved() || !Position::areInRange<1, 1, 0>(getPosition(), container->getPosition())) { autoCloseContainers(container); } else if (container->getTopParent() == this) { onSendContainer(container); } else if (const Container* topContainer = dynamic_cast<const Container*>(container->getTopParent())) { if (const DepotChest* depotChest = dynamic_cast<const DepotChest*>(topContainer)) { bool isOwner = false; for (const auto& it : depotChests) { if (it.second == depotChest) { isOwner = true; onSendContainer(container); } } if (!isOwner) { autoCloseContainers(container); } } else { onSendContainer(container); } } else { autoCloseContainers(container); } } if (shopOwner && requireListUpdate) { updateSaleShopList(item); } } } bool Player::updateSaleShopList(const Item* item) { uint16_t itemId = item->getID(); if (itemId != ITEM_GOLD_COIN && itemId != ITEM_PLATINUM_COIN && itemId != ITEM_CRYSTAL_COIN) { auto it = std::find_if(shopItemList.begin(), shopItemList.end(), [itemId](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.sellPrice != 0; }); if (it == shopItemList.end()) { const Container* container = item->getContainer(); if (!container) { return false; } const auto& items = container->getItemList(); return std::any_of(items.begin(), items.end(), [this](const Item* containerItem) { return updateSaleShopList(containerItem); }); } } if (client) { client->sendSaleItemList(shopItemList); } return true; } bool Player::hasShopItemForSale(uint32_t itemId, uint8_t subType) const { const ItemType& itemType = Item::items[itemId]; return std::any_of(shopItemList.begin(), shopItemList.end(), [&](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.buyPrice != 0 && (!itemType.isFluidContainer() || shopInfo.subType == subType); }); } void Player::internalAddThing(Thing* thing) { internalAddThing(0, thing); } void Player::internalAddThing(uint32_t index, Thing* thing) { Item* item = thing->getItem(); if (!item) { return; } //index == 0 means we should equip this item at the most appropriate slot (no action required here) if (index > CONST_SLOT_WHEREEVER && index <= CONST_SLOT_LAST) { if (inventory[index]) { return; } inventory[index] = item; item->setParent(this); } } bool Player::setFollowCreature(Creature* creature) { if (!Creature::setFollowCreature(creature)) { setFollowCreature(nullptr); setAttackedCreature(nullptr); sendCancelMessage(RETURNVALUE_THEREISNOWAY); sendCancelTarget(); stopWalk(); return false; } return true; } bool Player::setAttackedCreature(Creature* creature) { if (!Creature::setAttackedCreature(creature)) { sendCancelTarget(); return false; } if (chaseMode && creature) { if (followCreature != creature) { //chase opponent setFollowCreature(creature); } } else if (followCreature) { setFollowCreature(nullptr); } if (creature) { g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID()))); } return true; } void Player::goToFollowCreature() { if (!walkTask) { if ((OTSYS_TIME() - lastFailedFollow) < 2000) { return; } Creature::goToFollowCreature(); if (followCreature && !hasFollowPath) { lastFailedFollow = OTSYS_TIME(); } } } void Player::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const { Creature::getPathSearchParams(creature, fpp); fpp.fullPathSearch = true; } void Player::doAttacking(uint32_t) { if (lastAttack == 0) { lastAttack = OTSYS_TIME() - getAttackSpeed() - 1; } if (hasCondition(CONDITION_PACIFIED)) { return; } if ((OTSYS_TIME() - lastAttack) >= getAttackSpeed()) { bool result = false; Item* tool = getWeapon(); const Weapon* weapon = g_weapons->getWeapon(tool); uint32_t delay = getAttackSpeed(); bool classicSpeed = g_config.getBoolean(ConfigManager::CLASSIC_ATTACK_SPEED); if (weapon) { if (!weapon->interruptSwing()) { result = weapon->useWeapon(this, tool, attackedCreature); } else if (!classicSpeed && !canDoAction()) { delay = getNextActionTime(); } else { result = weapon->useWeapon(this, tool, attackedCreature); } } else { result = Weapon::useFist(this, attackedCreature); } SchedulerTask* task = createSchedulerTask(std::max<uint32_t>(SCHEDULER_MINTICKS, delay), std::bind(&Game::checkCreatureAttack, &g_game, getID())); if (!classicSpeed) { setNextActionTask(task, false); } else { g_scheduler.addEvent(task); } if (result) { lastAttack = OTSYS_TIME(); } } } uint64_t Player::getGainedExperience(Creature* attacker) const { if (g_config.getBoolean(ConfigManager::EXPERIENCE_FROM_PLAYERS)) { Player* attackerPlayer = attacker->getPlayer(); if (attackerPlayer && attackerPlayer != this && skillLoss && std::abs(static_cast<int32_t>(attackerPlayer->getLevel() - level)) <= g_config.getNumber(ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)) { return std::max<uint64_t>(0, std::floor(getLostExperience() * getDamageRatio(attacker) * 0.75)); } } return 0; } void Player::onFollowCreature(const Creature* creature) { if (!creature) { stopWalk(); } } void Player::setChaseMode(bool mode) { bool prevChaseMode = chaseMode; chaseMode = mode; if (prevChaseMode != chaseMode) { if (chaseMode) { if (!followCreature && attackedCreature) { //chase opponent setFollowCreature(attackedCreature); } } else if (attackedCreature) { setFollowCreature(nullptr); cancelNextWalk = true; } } } void Player::onWalkAborted() { setNextWalkActionTask(nullptr); sendCancelWalk(); } void Player::onWalkComplete() { if (walkTask) { walkTaskEvent = g_scheduler.addEvent(walkTask); walkTask = nullptr; } } void Player::stopWalk() { cancelNextWalk = true; } LightInfo Player::getCreatureLight() const { if (internalLight.level > itemsLight.level) { return internalLight; } return itemsLight; } void Player::updateItemsLight(bool internal /*=false*/) { LightInfo maxLight; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { Item* item = inventory[i]; if (item) { LightInfo curLight = item->getLightInfo(); if (curLight.level > maxLight.level) { maxLight = std::move(curLight); } } } if (itemsLight.level != maxLight.level || itemsLight.color != maxLight.color) { itemsLight = maxLight; if (!internal) { g_game.changeLight(this); } } } void Player::onAddCondition(ConditionType_t type) { Creature::onAddCondition(type); if (type == CONDITION_OUTFIT && isMounted()) { dismount(); } sendIcons(); } void Player::onAddCombatCondition(ConditionType_t type) { switch (type) { case CONDITION_POISON: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are poisoned."); break; case CONDITION_DROWN: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drowning."); break; case CONDITION_PARALYZE: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are paralyzed."); break; case CONDITION_DRUNK: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drunk."); break; case CONDITION_CURSED: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are cursed."); break; case CONDITION_FREEZING: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are freezing."); break; case CONDITION_DAZZLED: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are dazzled."); break; case CONDITION_BLEEDING: sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are bleeding."); break; default: break; } } void Player::onEndCondition(ConditionType_t type) { Creature::onEndCondition(type); if (type == CONDITION_INFIGHT) { onIdleStatus(); pzLocked = false; clearAttacked(); if (getSkull() != SKULL_RED && getSkull() != SKULL_BLACK) { setSkull(SKULL_NONE); } } sendIcons(); } void Player::onCombatRemoveCondition(Condition* condition) { //Creature::onCombatRemoveCondition(condition); if (condition->getId() > 0) { //Means the condition is from an item, id == slot if (g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { Item* item = getInventoryItem(static_cast<slots_t>(condition->getId())); if (item) { //25% chance to destroy the item if (25 >= uniform_random(1, 100)) { g_game.internalRemoveItem(item); } } } } else { if (!canDoAction()) { const uint32_t delay = getNextActionTime(); const int32_t ticks = delay - (delay % EVENT_CREATURE_THINK_INTERVAL); if (ticks < 0) { removeCondition(condition); } else { condition->setTicks(ticks); } } else { removeCondition(condition); } } } void Player::onAttackedCreature(Creature* target, bool addFightTicks /* = true */) { Creature::onAttackedCreature(target); if (target->getZone() == ZONE_PVP) { return; } if (target == this) { if (addFightTicks) { addInFightTicks(); } return; } if (hasFlag(PlayerFlag_NotGainInFight)) { return; } Player* targetPlayer = target->getPlayer(); if (targetPlayer && !isPartner(targetPlayer) && !isGuildMate(targetPlayer)) { if (!pzLocked && g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { pzLocked = true; sendIcons(); } targetPlayer->addInFightTicks(); if (getSkull() == SKULL_NONE && getSkullClient(targetPlayer) == SKULL_YELLOW) { addAttacked(targetPlayer); targetPlayer->sendCreatureSkull(this); } else if (!targetPlayer->hasAttacked(this)) { if (!pzLocked) { pzLocked = true; sendIcons(); } if (!Combat::isInPvpZone(this, targetPlayer) && !isInWar(targetPlayer)) { addAttacked(targetPlayer); if (targetPlayer->getSkull() == SKULL_NONE && getSkull() == SKULL_NONE) { setSkull(SKULL_WHITE); } if (getSkull() == SKULL_NONE) { targetPlayer->sendCreatureSkull(this); } } } } if (addFightTicks) { addInFightTicks(); } } void Player::onAttacked() { Creature::onAttacked(); addInFightTicks(); } void Player::onIdleStatus() { Creature::onIdleStatus(); if (party) { party->clearPlayerPoints(this); } } void Player::onPlacedCreature() { //scripting event - onLogin if (!g_creatureEvents->playerLogin(this)) { kickPlayer(true); } } void Player::onAttackedCreatureDrainHealth(Creature* target, int32_t points) { Creature::onAttackedCreatureDrainHealth(target, points); if (target) { if (party && !Combat::isPlayerCombat(target)) { Monster* tmpMonster = target->getMonster(); if (tmpMonster && tmpMonster->isHostile()) { //We have fulfilled a requirement for shared experience party->updatePlayerTicks(this, points); } } } } void Player::onTargetCreatureGainHealth(Creature* target, int32_t points) { if (target && party) { Player* tmpPlayer = nullptr; if (target->getPlayer()) { tmpPlayer = target->getPlayer(); } else if (Creature* targetMaster = target->getMaster()) { if (Player* targetMasterPlayer = targetMaster->getPlayer()) { tmpPlayer = targetMasterPlayer; } } if (isPartner(tmpPlayer)) { party->updatePlayerTicks(this, points); } } } bool Player::onKilledCreature(Creature* target, bool lastHit/* = true*/) { bool unjustified = false; if (hasFlag(PlayerFlag_NotGenerateLoot)) { target->setDropLoot(false); } Creature::onKilledCreature(target, lastHit); Player* targetPlayer = target->getPlayer(); if (!targetPlayer) { return false; } if (targetPlayer->getZone() == ZONE_PVP) { targetPlayer->setDropLoot(false); targetPlayer->setSkillLoss(false); } else if (!hasFlag(PlayerFlag_NotGainInFight) && !isPartner(targetPlayer)) { if (!Combat::isInPvpZone(this, targetPlayer) && hasAttacked(targetPlayer) && !targetPlayer->hasAttacked(this) && !isGuildMate(targetPlayer) && targetPlayer != this) { if (targetPlayer->getSkull() == SKULL_NONE && !isInWar(targetPlayer)) { unjustified = true; addUnjustifiedDead(targetPlayer); } if (lastHit && hasCondition(CONDITION_INFIGHT)) { pzLocked = true; Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::WHITE_SKULL_TIME), 0); addCondition(condition); } } } return unjustified; } void Player::gainExperience(uint64_t gainExp, Creature* source) { if (hasFlag(PlayerFlag_NotGainExperience) || gainExp == 0 || staminaMinutes == 0) { return; } addExperience(source, gainExp, true); } void Player::onGainExperience(uint64_t gainExp, Creature* target) { if (hasFlag(PlayerFlag_NotGainExperience)) { return; } if (target && !target->getPlayer() && party && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) { party->shareExperience(gainExp, target); //We will get a share of the experience through the sharing mechanism return; } Creature::onGainExperience(gainExp, target); gainExperience(gainExp, target); } void Player::onGainSharedExperience(uint64_t gainExp, Creature* source) { gainExperience(gainExp, source); } bool Player::isImmune(CombatType_t type) const { if (hasFlag(PlayerFlag_CannotBeAttacked)) { return true; } return Creature::isImmune(type); } bool Player::isImmune(ConditionType_t type) const { if (hasFlag(PlayerFlag_CannotBeAttacked)) { return true; } return Creature::isImmune(type); } bool Player::isAttackable() const { return !hasFlag(PlayerFlag_CannotBeAttacked); } bool Player::lastHitIsPlayer(Creature* lastHitCreature) { if (!lastHitCreature) { return false; } if (lastHitCreature->getPlayer()) { return true; } Creature* lastHitMaster = lastHitCreature->getMaster(); return lastHitMaster && lastHitMaster->getPlayer(); } void Player::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/) { Creature::changeHealth(healthChange, sendHealthChange); sendStats(); } void Player::changeMana(int32_t manaChange) { if (!hasFlag(PlayerFlag_HasInfiniteMana)) { if (manaChange > 0) { mana += std::min<int32_t>(manaChange, getMaxMana() - mana); } else { mana = std::max<int32_t>(0, mana + manaChange); } } sendStats(); } void Player::changeSoul(int32_t soulChange) { if (soulChange > 0) { soul += std::min<int32_t>(soulChange, vocation->getSoulMax() - soul); } else { soul = std::max<int32_t>(0, soul + soulChange); } sendStats(); } bool Player::canWear(uint32_t lookType, uint8_t addons) const { if (group->access) { return true; } const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType); if (!outfit) { return false; } if (outfit->premium && !isPremium()) { return false; } if (outfit->unlocked && addons == 0) { return true; } for (const OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { if (outfitEntry.addons == addons || outfitEntry.addons == 3 || addons == 0) { return true; } return false; //have lookType on list and addons don't match } } return false; } bool Player::hasOutfit(uint32_t lookType, uint8_t addons) { const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType); if (!outfit) { return false; } if (outfit->unlocked && addons == 0) { return true; } for (const OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { if (outfitEntry.addons == addons || outfitEntry.addons == 3 || addons == 0){ return true; } return false; //have lookType on list and addons don't match } } return false; } bool Player::canLogout() { if (isConnecting) { return false; } if (getTile()->hasFlag(TILESTATE_NOLOGOUT)) { return false; } if (getTile()->hasFlag(TILESTATE_PROTECTIONZONE)) { return true; } return !isPzLocked() && !hasCondition(CONDITION_INFIGHT); } void Player::genReservedStorageRange() { //generate outfits range uint32_t base_key = PSTRG_OUTFITS_RANGE_START; for (const OutfitEntry& entry : outfits) { storageMap[++base_key] = (entry.lookType << 16) | entry.addons; } } void Player::addOutfit(uint16_t lookType, uint8_t addons) { for (OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { outfitEntry.addons |= addons; return; } } outfits.emplace_back(lookType, addons); } bool Player::removeOutfit(uint16_t lookType) { for (auto it = outfits.begin(), end = outfits.end(); it != end; ++it) { OutfitEntry& entry = *it; if (entry.lookType == lookType) { outfits.erase(it); return true; } } return false; } bool Player::removeOutfitAddon(uint16_t lookType, uint8_t addons) { for (OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType == lookType) { outfitEntry.addons &= ~addons; return true; } } return false; } bool Player::getOutfitAddons(const Outfit& outfit, uint8_t& addons) const { if (group->access) { addons = 3; return true; } if (outfit.premium && !isPremium()) { return false; } for (const OutfitEntry& outfitEntry : outfits) { if (outfitEntry.lookType != outfit.lookType) { continue; } addons = outfitEntry.addons; return true; } if (!outfit.unlocked) { return false; } addons = 0; return true; } void Player::setSex(PlayerSex_t newSex) { sex = newSex; } Skulls_t Player::getSkull() const { if (hasFlag(PlayerFlag_NotGainInFight)) { return SKULL_NONE; } return skull; } Skulls_t Player::getSkullClient(const Creature* creature) const { if (!creature || g_game.getWorldType() != WORLD_TYPE_PVP) { return SKULL_NONE; } const Player* player = creature->getPlayer(); if (!player || player->getSkull() != SKULL_NONE) { return Creature::getSkullClient(creature); } if (player->hasAttacked(this)) { return SKULL_YELLOW; } if (isPartner(player)) { return SKULL_GREEN; } return Creature::getSkullClient(creature); } bool Player::hasAttacked(const Player* attacked) const { if (hasFlag(PlayerFlag_NotGainInFight) || !attacked) { return false; } return attackedSet.find(attacked->guid) != attackedSet.end(); } void Player::addAttacked(const Player* attacked) { if (hasFlag(PlayerFlag_NotGainInFight) || !attacked || attacked == this) { return; } attackedSet.insert(attacked->guid); } void Player::removeAttacked(const Player* attacked) { if (!attacked || attacked == this) { return; } auto it = attackedSet.find(attacked->guid); if (it != attackedSet.end()) { attackedSet.erase(it); } } void Player::clearAttacked() { attackedSet.clear(); } void Player::addUnjustifiedDead(const Player* attacked) { if (hasFlag(PlayerFlag_NotGainInFight) || attacked == this || g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) { return; } sendTextMessage(MESSAGE_EVENT_ADVANCE, "Warning! The murder of " + attacked->getName() + " was not justified."); skullTicks += g_config.getNumber(ConfigManager::FRAG_TIME); if (getSkull() != SKULL_BLACK) { if (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) { setSkull(SKULL_BLACK); } else if (getSkull() != SKULL_RED && g_config.getNumber(ConfigManager::KILLS_TO_RED) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_RED) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) { setSkull(SKULL_RED); } } } void Player::checkSkullTicks(int64_t ticks) { int64_t newTicks = skullTicks - ticks; if (newTicks < 0) { skullTicks = 0; } else { skullTicks = newTicks; } if ((skull == SKULL_RED || skull == SKULL_BLACK) && skullTicks < 1 && !hasCondition(CONDITION_INFIGHT)) { setSkull(SKULL_NONE); } } bool Player::isPromoted() const { uint16_t promotedVocation = g_vocations.getPromotedVocation(vocation->getId()); return promotedVocation == VOCATION_NONE && vocation->getId() != promotedVocation; } double Player::getLostPercent() const { int32_t blessingCount = std::bitset<5>(blessings).count(); int32_t deathLosePercent = g_config.getNumber(ConfigManager::DEATH_LOSE_PERCENT); if (deathLosePercent != -1) { if (isPromoted()) { deathLosePercent -= 3; } deathLosePercent -= blessingCount; return std::max<int32_t>(0, deathLosePercent) / 100.; } double lossPercent; if (level >= 25) { double tmpLevel = level + (levelPercent / 100.); lossPercent = static_cast<double>((tmpLevel + 50) * 50 * ((tmpLevel * tmpLevel) - (5 * tmpLevel) + 8)) / experience; } else { lossPercent = 10; } double percentReduction = 0; if (isPromoted()) { percentReduction += 30; } percentReduction += blessingCount * 8; return lossPercent * (1 - (percentReduction / 100.)) / 100.; } void Player::learnInstantSpell(const std::string& spellName) { if (!hasLearnedInstantSpell(spellName)) { learnedInstantSpellList.push_front(spellName); } } void Player::forgetInstantSpell(const std::string& spellName) { learnedInstantSpellList.remove(spellName); } bool Player::hasLearnedInstantSpell(const std::string& spellName) const { if (hasFlag(PlayerFlag_CannotUseSpells)) { return false; } if (hasFlag(PlayerFlag_IgnoreSpellCheck)) { return true; } for (const auto& learnedSpellName : learnedInstantSpellList) { if (strcasecmp(learnedSpellName.c_str(), spellName.c_str()) == 0) { return true; } } return false; } bool Player::isInWar(const Player* player) const { if (!player || !guild) { return false; } const Guild* playerGuild = player->getGuild(); if (!playerGuild) { return false; } return isInWarList(playerGuild->getId()) && player->isInWarList(guild->getId()); } bool Player::isInWarList(uint32_t guildId) const { return std::find(guildWarVector.begin(), guildWarVector.end(), guildId) != guildWarVector.end(); } bool Player::isPremium() const { if (g_config.getBoolean(ConfigManager::FREE_PREMIUM) || hasFlag(PlayerFlag_IsAlwaysPremium)) { return true; } return premiumEndsAt > time(nullptr); } void Player::setPremiumTime(time_t premiumEndsAt) { this->premiumEndsAt = premiumEndsAt; sendBasicData(); } PartyShields_t Player::getPartyShield(const Player* player) const { if (!player) { return SHIELD_NONE; } if (party) { if (party->getLeader() == player) { if (party->isSharedExperienceActive()) { if (party->isSharedExperienceEnabled()) { return SHIELD_YELLOW_SHAREDEXP; } if (party->canUseSharedExperience(player)) { return SHIELD_YELLOW_NOSHAREDEXP; } return SHIELD_YELLOW_NOSHAREDEXP_BLINK; } return SHIELD_YELLOW; } if (player->party == party) { if (party->isSharedExperienceActive()) { if (party->isSharedExperienceEnabled()) { return SHIELD_BLUE_SHAREDEXP; } if (party->canUseSharedExperience(player)) { return SHIELD_BLUE_NOSHAREDEXP; } return SHIELD_BLUE_NOSHAREDEXP_BLINK; } return SHIELD_BLUE; } if (isInviting(player)) { return SHIELD_WHITEBLUE; } } if (player->isInviting(this)) { return SHIELD_WHITEYELLOW; } if (player->party) { return SHIELD_GRAY; } return SHIELD_NONE; } bool Player::isInviting(const Player* player) const { if (!player || !party || party->getLeader() != this) { return false; } return party->isPlayerInvited(player); } bool Player::isPartner(const Player* player) const { if (!player || !party || player == this) { return false; } return party == player->party; } bool Player::isGuildMate(const Player* player) const { if (!player || !guild) { return false; } return guild == player->guild; } void Player::sendPlayerPartyIcons(Player* player) { sendCreatureShield(player); sendCreatureSkull(player); } bool Player::addPartyInvitation(Party* party) { auto it = std::find(invitePartyList.begin(), invitePartyList.end(), party); if (it != invitePartyList.end()) { return false; } invitePartyList.push_front(party); return true; } void Player::removePartyInvitation(Party* party) { invitePartyList.remove(party); } void Player::clearPartyInvitations() { for (Party* invitingParty : invitePartyList) { invitingParty->removeInvite(*this, false); } invitePartyList.clear(); } GuildEmblems_t Player::getGuildEmblem(const Player* player) const { if (!player) { return GUILDEMBLEM_NONE; } const Guild* playerGuild = player->getGuild(); if (!playerGuild) { return GUILDEMBLEM_NONE; } if (player->getGuildWarVector().empty()) { if (guild == playerGuild) { return GUILDEMBLEM_MEMBER; } else { return GUILDEMBLEM_OTHER; } } else if (guild == playerGuild) { return GUILDEMBLEM_ALLY; } else if (isInWar(player)) { return GUILDEMBLEM_ENEMY; } return GUILDEMBLEM_NEUTRAL; } uint8_t Player::getCurrentMount() const { int32_t value; if (getStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, value)) { return value; } return 0; } void Player::setCurrentMount(uint8_t mountId) { addStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, mountId); } bool Player::toggleMount(bool mount) { if ((OTSYS_TIME() - lastToggleMount) < 3000 && !wasMounted) { sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED); return false; } if (mount) { if (isMounted()) { return false; } if (!group->access && tile->hasFlag(TILESTATE_PROTECTIONZONE)) { sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE); return false; } const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(getSex(), defaultOutfit.lookType); if (!playerOutfit) { return false; } uint8_t currentMountId = getCurrentMount(); if (currentMountId == 0) { sendOutfitWindow(); return false; } Mount* currentMount = g_game.mounts.getMountByID(currentMountId); if (!currentMount) { return false; } if (!hasMount(currentMount)) { setCurrentMount(0); sendOutfitWindow(); return false; } if (currentMount->premium && !isPremium()) { sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT); return false; } if (hasCondition(CONDITION_OUTFIT)) { sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return false; } defaultOutfit.lookMount = currentMount->clientId; if (currentMount->speed != 0) { g_game.changeSpeed(this, currentMount->speed); } } else { if (!isMounted()) { return false; } dismount(); } g_game.internalCreatureChangeOutfit(this, defaultOutfit); lastToggleMount = OTSYS_TIME(); return true; } bool Player::tameMount(uint8_t mountId) { if (!g_game.mounts.getMountByID(mountId)) { return false; } const uint8_t tmpMountId = mountId - 1; const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31); int32_t value; if (getStorageValue(key, value)) { value |= (1 << (tmpMountId % 31)); } else { value = (1 << (tmpMountId % 31)); } addStorageValue(key, value); return true; } bool Player::untameMount(uint8_t mountId) { if (!g_game.mounts.getMountByID(mountId)) { return false; } const uint8_t tmpMountId = mountId - 1; const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31); int32_t value; if (!getStorageValue(key, value)) { return true; } value &= ~(1 << (tmpMountId % 31)); addStorageValue(key, value); if (getCurrentMount() == mountId) { if (isMounted()) { dismount(); g_game.internalCreatureChangeOutfit(this, defaultOutfit); } setCurrentMount(0); } return true; } bool Player::hasMount(const Mount* mount) const { if (isAccessPlayer()) { return true; } if (mount->premium && !isPremium()) { return false; } const uint8_t tmpMountId = mount->id - 1; int32_t value; if (!getStorageValue(PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31), value)) { return false; } return ((1 << (tmpMountId % 31)) & value) != 0; } void Player::dismount() { Mount* mount = g_game.mounts.getMountByID(getCurrentMount()); if (mount && mount->speed > 0) { g_game.changeSpeed(this, -mount->speed); } defaultOutfit.lookMount = 0; } bool Player::addOfflineTrainingTries(skills_t skill, uint64_t tries) { if (tries == 0 || skill == SKILL_LEVEL) { return false; } bool sendUpdate = false; uint32_t oldSkillValue, newSkillValue; long double oldPercentToNextLevel, newPercentToNextLevel; if (skill == SKILL_MAGLEVEL) { uint64_t currReqMana = vocation->getReqMana(magLevel); uint64_t nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { return false; } oldSkillValue = magLevel; oldPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana; g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, tries); uint32_t currMagLevel = magLevel; while ((manaSpent + tries) >= nextReqMana) { tries -= nextReqMana - manaSpent; magLevel++; manaSpent = 0; g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel); sendUpdate = true; currReqMana = nextReqMana; nextReqMana = vocation->getReqMana(magLevel + 1); if (currReqMana >= nextReqMana) { tries = 0; break; } } manaSpent += tries; if (magLevel != currMagLevel) { std::ostringstream ss; ss << "You advanced to magic level " << magLevel << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint8_t newPercent; if (nextReqMana > currReqMana) { newPercent = Player::getPercentLevel(manaSpent, nextReqMana); newPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana; } else { newPercent = 0; newPercentToNextLevel = 0; } if (newPercent != magLevelPercent) { magLevelPercent = newPercent; sendUpdate = true; } newSkillValue = magLevel; } else { uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level); uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { return false; } oldSkillValue = skills[skill].level; oldPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries; g_events->eventPlayerOnGainSkillTries(this, skill, tries); uint32_t currSkillLevel = skills[skill].level; while ((skills[skill].tries + tries) >= nextReqTries) { tries -= nextReqTries - skills[skill].tries; skills[skill].level++; skills[skill].tries = 0; skills[skill].percent = 0; g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level); sendUpdate = true; currReqTries = nextReqTries; nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1); if (currReqTries >= nextReqTries) { tries = 0; break; } } skills[skill].tries += tries; if (currSkillLevel != skills[skill].level) { std::ostringstream ss; ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); } uint8_t newPercent; if (nextReqTries > currReqTries) { newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries); newPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries; } else { newPercent = 0; newPercentToNextLevel = 0; } if (skills[skill].percent != newPercent) { skills[skill].percent = newPercent; sendUpdate = true; } newSkillValue = skills[skill].level; } if (sendUpdate) { sendSkills(); } std::ostringstream ss; ss << std::fixed << std::setprecision(2) << "Your " << ucwords(getSkillName(skill)) << " skill changed from level " << oldSkillValue << " (with " << oldPercentToNextLevel << "% progress towards level " << (oldSkillValue + 1) << ") to level " << newSkillValue << " (with " << newPercentToNextLevel << "% progress towards level " << (newSkillValue + 1) << ')'; sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); return sendUpdate; } bool Player::hasModalWindowOpen(uint32_t modalWindowId) const { return find(modalWindows.begin(), modalWindows.end(), modalWindowId) != modalWindows.end(); } void Player::onModalWindowHandled(uint32_t modalWindowId) { modalWindows.remove(modalWindowId); } void Player::sendModalWindow(const ModalWindow& modalWindow) { if (!client) { return; } modalWindows.push_front(modalWindow.id); client->sendModalWindow(modalWindow); } void Player::clearModalWindows() { modalWindows.clear(); } uint16_t Player::getHelpers() const { uint16_t helpers; if (guild && party) { std::unordered_set<Player*> helperSet; const auto& guildMembers = guild->getMembersOnline(); helperSet.insert(guildMembers.begin(), guildMembers.end()); const auto& partyMembers = party->getMembers(); helperSet.insert(partyMembers.begin(), partyMembers.end()); const auto& partyInvitees = party->getInvitees(); helperSet.insert(partyInvitees.begin(), partyInvitees.end()); helperSet.insert(party->getLeader()); helpers = helperSet.size(); } else if (guild) { helpers = guild->getMembersOnline().size(); } else if (party) { helpers = party->getMemberCount() + party->getInvitationCount() + 1; } else { helpers = 0; } return helpers; } void Player::sendClosePrivate(uint16_t channelId) { if (channelId == CHANNEL_GUILD || channelId == CHANNEL_PARTY) { g_chat->removeUserFromChannel(*this, channelId); } if (client) { client->sendClosePrivate(channelId); } } uint64_t Player::getMoney() const { std::vector<const Container*> containers; uint64_t moneyCount = 0; for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) { Item* item = inventory[i]; if (!item) { continue; } const Container* container = item->getContainer(); if (container) { containers.push_back(container); } else { moneyCount += item->getWorth(); } } size_t i = 0; while (i < containers.size()) { const Container* container = containers[i++]; for (const Item* item : container->getItemList()) { const Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } else { moneyCount += item->getWorth(); } } } return moneyCount; } size_t Player::getMaxVIPEntries() const { if (group->maxVipEntries != 0) { return group->maxVipEntries; } return g_config.getNumber(isPremium() ? ConfigManager::VIP_PREMIUM_LIMIT : ConfigManager::VIP_FREE_LIMIT ); } size_t Player::getMaxDepotItems() const { if (group->maxDepotItems != 0) { return group->maxDepotItems; } else if (isPremium()) { return 2000; } return 1000; } std::forward_list<Condition*> Player::getMuteConditions() const { std::forward_list<Condition*> muteConditions; for (Condition* condition : conditions) { if (condition->getTicks() <= 0) { continue; } ConditionType_t type = condition->getType(); if (type != CONDITION_MUTED && type != CONDITION_CHANNELMUTEDTICKS && type != CONDITION_YELLTICKS) { continue; } muteConditions.push_front(condition); } return muteConditions; } void Player::setGuild(Guild* guild) { if (guild == this->guild) { return; } Guild* oldGuild = this->guild; this->guildNick.clear(); this->guild = nullptr; this->guildRank = nullptr; if (guild) { GuildRank_ptr rank = guild->getRankByLevel(1); if (!rank) { return; } this->guild = guild; this->guildRank = rank; guild->addMember(this); } if (oldGuild) { oldGuild->removeMember(this); } }
1
18,546
if (StoreInbox* storeInbox = getStoreInbox()) {
otland-forgottenserver
cpp
@@ -459,9 +459,14 @@ API.prototype._import = function(cb) { }); }; + +API.prototype.importAllFromMnemonic = function(words, opts, cb) { +} + /** * Import from Mnemonics (language autodetected) * Can throw an error if mnemonic is invalid + * Will try compilant and non-compliantDerivation * * @param {String} BIP39 words * @param {Object} opts
1
'use strict'; var _ = require('lodash'); var $ = require('preconditions').singleton(); var util = require('util'); var async = require('async'); var events = require('events'); var Bitcore = require('bitcore-lib'); var Bitcore_ = { btc: Bitcore, bch: require('bitcore-lib-cash'), }; var Mnemonic = require('bitcore-mnemonic'); var sjcl = require('sjcl'); var url = require('url'); var querystring = require('querystring'); var Common = require('./common'); var Constants = Common.Constants; var Defaults = Common.Defaults; var Utils = Common.Utils; var PayPro = require('./paypro'); var log = require('./log'); var Credentials = require('./credentials'); var Verifier = require('./verifier'); var Errors = require('./errors'); const Request = require('./request'); var BASE_URL = 'http://localhost:3232/bws/api'; /** * @desc ClientAPI constructor. * * @param {Object} opts * @constructor */ function API(opts) { opts = opts || {}; this.doNotVerifyPayPro = opts.doNotVerifyPayPro; this.timeout = opts.timeout || 50000; this.logLevel = opts.logLevel || 'silent'; this.supportStaffWalletId = opts.supportStaffWalletId; this.request = new Request(opts.baseUrl || BASE_URL, {r: opts.request}); log.setLevel(this.logLevel); }; util.inherits(API, events.EventEmitter); API.privateKeyEncryptionOpts = { iter: 10000 }; API.prototype.initNotifications = function(cb) { log.warn('DEPRECATED: use initialize() instead.'); this.initialize({}, cb); }; API.prototype.initialize = function(opts, cb) { $.checkState(this.credentials); var self = this; self.notificationIncludeOwn = !!opts.notificationIncludeOwn; self._initNotifications(opts); return cb(); }; API.prototype.dispose = function(cb) { var self = this; self._disposeNotifications(); self.request.logout(cb); }; API.prototype._fetchLatestNotifications = function(interval, cb) { var self = this; cb = cb || function() {}; var opts = { lastNotificationId: self.lastNotificationId, includeOwn: self.notificationIncludeOwn, }; if (!self.lastNotificationId) { opts.timeSpan = interval + 1; } self.getNotifications(opts, function(err, notifications) { if (err) { log.warn('Error receiving notifications.'); log.debug(err); return cb(err); } if (notifications.length > 0) { self.lastNotificationId = _.last(notifications).id; } _.each(notifications, function(notification) { self.emit('notification', notification); }); return cb(); }); }; API.prototype._initNotifications = function(opts) { var self = this; opts = opts || {}; var interval = opts.notificationIntervalSeconds || 5; self.notificationsIntervalId = setInterval(function() { self._fetchLatestNotifications(interval, function(err) { if (err) { if (err instanceof Errors.NOT_FOUND || err instanceof Errors.NOT_AUTHORIZED) { self._disposeNotifications(); } } }); }, interval * 1000); }; API.prototype._disposeNotifications = function() { var self = this; if (self.notificationsIntervalId) { clearInterval(self.notificationsIntervalId); self.notificationsIntervalId = null; } }; /** * Reset notification polling with new interval * @param {Numeric} notificationIntervalSeconds - use 0 to pause notifications */ API.prototype.setNotificationsInterval = function(notificationIntervalSeconds) { var self = this; self._disposeNotifications(); if (notificationIntervalSeconds > 0) { self._initNotifications({ notificationIntervalSeconds: notificationIntervalSeconds }); } }; /** * Encrypt a message * @private * @static * @memberof Client.API * @param {String} message * @param {String} encryptingKey */ API._encryptMessage = function(message, encryptingKey) { if (!message) return null; return Utils.encryptMessage(message, encryptingKey); }; API.prototype._processTxNotes = function(notes) { var self = this; if (!notes) return; var encryptingKey = self.credentials.sharedEncryptingKey; _.each([].concat(notes), function(note) { note.encryptedBody = note.body; note.body = Utils.decryptMessageNoThrow(note.body, encryptingKey); note.encryptedEditedByName = note.editedByName; note.editedByName = Utils.decryptMessageNoThrow(note.editedByName, encryptingKey); }); }; /** * Decrypt text fields in transaction proposals * @private * @static * @memberof Client.API * @param {Array} txps * @param {String} encryptingKey */ API.prototype._processTxps = function(txps) { var self = this; if (!txps) return; var encryptingKey = self.credentials.sharedEncryptingKey; _.each([].concat(txps), function(txp) { txp.encryptedMessage = txp.message; txp.message = Utils.decryptMessageNoThrow(txp.message, encryptingKey) || null; txp.creatorName = Utils.decryptMessageNoThrow(txp.creatorName, encryptingKey); _.each(txp.actions, function(action) { // CopayerName encryption is optional (not available in older wallets) action.copayerName = Utils.decryptMessageNoThrow(action.copayerName, encryptingKey); action.comment = Utils.decryptMessageNoThrow(action.comment, encryptingKey); // TODO get copayerName from Credentials -> copayerId to copayerName // action.copayerName = null; }); _.each(txp.outputs, function(output) { output.encryptedMessage = output.message; output.message = Utils.decryptMessageNoThrow(output.message, encryptingKey) || null; }); txp.hasUnconfirmedInputs = _.some(txp.inputs, function(input) { return input.confirmations == 0; }); self._processTxNotes(txp.note); }); }; /** * Seed from random * * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {String} opts.network - default 'livenet' */ API.prototype.seedFromRandom = function(opts) { $.checkArgument(arguments.length <= 1, 'DEPRECATED: only 1 argument accepted.'); $.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: argument should be an options object.'); opts = opts || {}; this.credentials = Credentials.create(opts.coin || 'btc', opts.network || 'livenet'); this.request.setCredentials(this.credentials); }; var _deviceValidated; /** * Seed from random * * @param {Object} opts * @param {String} opts.passphrase * @param {Boolean} opts.skipDeviceValidation */ API.prototype.validateKeyDerivation = function(opts, cb) { var self = this; opts = opts || {}; var c = self.credentials; function testMessageSigning(xpriv, xpub) { var nonHardenedPath = 'm/0/0'; var message = 'Lorem ipsum dolor sit amet, ne amet urbanitas percipitur vim, libris disputando his ne, et facer suavitate qui. Ei quidam laoreet sea. Cu pro dico aliquip gubergren, in mundi postea usu. Ad labitur posidonium interesset duo, est et doctus molestie adipiscing.'; var priv = xpriv.deriveChild(nonHardenedPath).privateKey; var signature = Utils.signMessage(message, priv); var pub = xpub.deriveChild(nonHardenedPath).publicKey; return Utils.verifyMessage(message, signature, pub); }; function testHardcodedKeys() { var words = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; var xpriv = Mnemonic(words).toHDPrivateKey(); if (xpriv.toString() != 'xprv9s21ZrQH143K3GJpoapnV8SFfukcVBSfeCficPSGfubmSFDxo1kuHnLisriDvSnRRuL2Qrg5ggqHKNVpxR86QEC8w35uxmGoggxtQTPvfUu') return false; xpriv = xpriv.deriveChild("m/44'/0'/0'"); if (xpriv.toString() != 'xprv9xpXFhFpqdQK3TmytPBqXtGSwS3DLjojFhTGht8gwAAii8py5X6pxeBnQ6ehJiyJ6nDjWGJfZ95WxByFXVkDxHXrqu53WCRGypk2ttuqncb') return false; var xpub = Bitcore.HDPublicKey.fromString('xpub6BosfCnifzxcFwrSzQiqu2DBVTshkCXacvNsWGYJVVhhawA7d4R5WSWGFNbi8Aw6ZRc1brxMyWMzG3DSSSSoekkudhUd9yLb6qx39T9nMdj'); return testMessageSigning(xpriv, xpub); }; function testLiveKeys() { var words; try { words = c.getMnemonic(); } catch (ex) {} var xpriv; if (words && (!c.mnemonicHasPassphrase || opts.passphrase)) { var m = new Mnemonic(words); xpriv = m.toHDPrivateKey(opts.passphrase, c.network); } if (!xpriv) { xpriv = new Bitcore.HDPrivateKey(c.xPrivKey); } xpriv = xpriv.deriveChild(c.getBaseAddressDerivationPath()); var xpub = new Bitcore.HDPublicKey(c.xPubKey); return testMessageSigning(xpriv, xpub); }; var hardcodedOk = true; if (!_deviceValidated && !opts.skipDeviceValidation) { hardcodedOk = testHardcodedKeys(); _deviceValidated = true; } var liveOk = (c.canSign() && !c.isPrivKeyEncrypted()) ? testLiveKeys() : true; self.keyDerivationOk = hardcodedOk && liveOk; return cb(null, self.keyDerivationOk); }; /** * Seed from random with mnemonic * * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {String} opts.network - default 'livenet' * @param {String} opts.passphrase * @param {Number} opts.language - default 'en' * @param {Number} opts.account - default 0 */ API.prototype.seedFromRandomWithMnemonic = function(opts) { $.checkArgument(arguments.length <= 1, 'DEPRECATED: only 1 argument accepted.'); $.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: argument should be an options object.'); opts = opts || {}; this.credentials = Credentials.createWithMnemonic(opts.coin || 'btc', opts.network || 'livenet', opts.passphrase, opts.language || 'en', opts.account || 0); this.request.setCredentials(this.credentials); }; API.prototype.getMnemonic = function() { return this.credentials.getMnemonic(); }; API.prototype.mnemonicHasPassphrase = function() { return this.credentials.mnemonicHasPassphrase; }; API.prototype.clearMnemonic = function() { return this.credentials.clearMnemonic(); }; /** * Seed from extended private key * * @param {String} xPrivKey * @param {String} opts.coin - default 'btc' * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' */ API.prototype.seedFromExtendedPrivateKey = function(xPrivKey, opts) { opts = opts || {}; this.credentials = Credentials.fromExtendedPrivateKey(opts.coin || 'btc', xPrivKey, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts); this.request.setCredentials(this.credentials); }; /** * Seed from Mnemonics (language autodetected) * Can throw an error if mnemonic is invalid * * @param {String} BIP39 words * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {String} opts.network - default 'livenet' * @param {String} opts.passphrase * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' */ API.prototype.seedFromMnemonic = function(words, opts) { $.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: second argument should be an options object.'); opts = opts || {}; this.credentials = Credentials.fromMnemonic(opts.coin || 'btc', opts.network || 'livenet', words, opts.passphrase, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts); this.request.setCredentials(this.credentials); }; /** * Seed from external wallet public key * * @param {String} xPubKey * @param {String} source - A name identifying the source of the xPrivKey (e.g. ledger, TREZOR, ...) * @param {String} entropySourceHex - A HEX string containing pseudo-random data, that can be deterministically derived from the xPrivKey, and should not be derived from xPubKey. * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' */ API.prototype.seedFromExtendedPublicKey = function(xPubKey, source, entropySourceHex, opts) { $.checkArgument(_.isUndefined(opts) || _.isObject(opts)); opts = opts || {}; this.credentials = Credentials.fromExtendedPublicKey(opts.coin || 'btc', xPubKey, source, entropySourceHex, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44); this.request.setCredentials(this.credentials); }; /** * Export wallet * * @param {Object} opts * @param {Boolean} opts.password * @param {Boolean} opts.noSign */ API.prototype.export = function(opts) { $.checkState(this.credentials); opts = opts || {}; var output; var c = Credentials.fromObj(this.credentials); if (opts.noSign) { c.setNoSign(); } else if (opts.password) { c.decryptPrivateKey(opts.password); } output = JSON.stringify(c.toObj()); return output; }; /** * Import wallet * * @param {Object} str - The serialized JSON created with #export */ API.prototype.import = function(str) { try { var credentials = Credentials.fromObj(JSON.parse(str)); this.credentials = credentials; } catch (ex) { throw new Errors.INVALID_BACKUP; } this.request.setCredentials(this.credentials); }; API.prototype._import = function(cb) { $.checkState(this.credentials); var self = this; // First option, grab wallet info from BWS. self.openWallet(function(err, ret) { // it worked? if (!err) return cb(null, ret); // Is the error other than "copayer was not found"? || or no priv key. if (err instanceof Errors.NOT_AUTHORIZED || self.isPrivKeyExternal()) return cb(err); //Second option, lets try to add an access log.info('Copayer not found, trying to add access'); self.addAccess({}, function(err) { if (err) { return cb(new Errors.WALLET_DOES_NOT_EXIST); } self.openWallet(cb); }); }); }; /** * Import from Mnemonics (language autodetected) * Can throw an error if mnemonic is invalid * * @param {String} BIP39 words * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {String} opts.network - default 'livenet' * @param {String} opts.passphrase * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' * @param {String} opts.entropySourcePath - Only used if the wallet was created on a HW wallet, in which that private keys was not available for all the needed derivations * @param {String} opts.walletPrivKey - if available, walletPrivKey for encrypting metadata */ API.prototype.importFromMnemonic = function(words, opts, cb) { log.debug('Importing from Mnemonic'); var self = this; opts = opts || {}; function derive(nonCompliantDerivation) { return Credentials.fromMnemonic(opts.coin || 'btc', opts.network || 'livenet', words, opts.passphrase, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, { nonCompliantDerivation: nonCompliantDerivation, entropySourcePath: opts.entropySourcePath, walletPrivKey: opts.walletPrivKey, }); }; try { self.credentials = derive(false); } catch (e) { log.info('Mnemonic error:', e); return cb(new Errors.INVALID_BACKUP); } this.request.setCredentials(this.credentials); self._import(function(err, ret) { if (!err) return cb(null, ret); if (err instanceof Errors.INVALID_BACKUP) return cb(err); if (err instanceof Errors.NOT_AUTHORIZED || err instanceof Errors.WALLET_DOES_NOT_EXIST) { var altCredentials = derive(true); if (altCredentials.xPubKey.toString() == self.credentials.xPubKey.toString()) return cb(err); self.credentials = altCredentials; self.request.setCredentials(self.credentials); return self._import(cb); } return cb(err); }); }; /* * Import from extended private key * * @param {String} xPrivKey * @param {String} opts.coin - default 'btc' * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' * @param {String} opts.compliantDerivation - default 'true' * @param {String} opts.walletPrivKey - if available, walletPrivKey for encrypting metadata * @param {Callback} cb - The callback that handles the response. It returns a flag indicating that the wallet is imported. */ API.prototype.importFromExtendedPrivateKey = function(xPrivKey, opts, cb) { log.debug('Importing from Extended Private Key'); if (!cb) { cb = opts; opts = {}; log.warn('DEPRECATED WARN: importFromExtendedPrivateKey should receive 3 parameters.'); } try { this.credentials = Credentials.fromExtendedPrivateKey(opts.coin || 'btc', xPrivKey, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts); } catch (e) { log.info('xPriv error:', e); return cb(new Errors.INVALID_BACKUP); }; this.request.setCredentials(this.credentials); this._import(cb); }; /** * Import from Extended Public Key * * @param {String} xPubKey * @param {String} source - A name identifying the source of the xPrivKey * @param {String} entropySourceHex - A HEX string containing pseudo-random data, that can be deterministically derived from the xPrivKey, and should not be derived from xPubKey. * @param {Object} opts * @param {String} opts.coin - default 'btc' * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' * @param {String} opts.compliantDerivation - default 'true' */ API.prototype.importFromExtendedPublicKey = function(xPubKey, source, entropySourceHex, opts, cb) { $.checkArgument(arguments.length == 5, "DEPRECATED: should receive 5 arguments"); $.checkArgument(_.isUndefined(opts) || _.isObject(opts)); $.shouldBeFunction(cb); opts = opts || {}; log.debug('Importing from Extended Private Key'); try { this.credentials = Credentials.fromExtendedPublicKey(opts.coin || 'btc', xPubKey, source, entropySourceHex, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts); } catch (e) { log.info('xPriv error:', e); return cb(new Errors.INVALID_BACKUP); }; this.request.setCredentials(this.credentials); this._import(cb); }; API.prototype.decryptBIP38PrivateKey = function(encryptedPrivateKeyBase58, passphrase, opts, cb) { var Bip38 = require('bip38'); var bip38 = new Bip38(); var privateKeyWif; try { privateKeyWif = bip38.decrypt(encryptedPrivateKeyBase58, passphrase); } catch (ex) { return cb(new Error('Could not decrypt BIP38 private key', ex)); } var privateKey = new Bitcore.PrivateKey(privateKeyWif); var address = privateKey.publicKey.toAddress().toString(); var addrBuff = new Buffer(address, 'ascii'); var actualChecksum = Bitcore.crypto.Hash.sha256sha256(addrBuff).toString('hex').substring(0, 8); var expectedChecksum = Bitcore.encoding.Base58Check.decode(encryptedPrivateKeyBase58).toString('hex').substring(6, 14); if (actualChecksum != expectedChecksum) return cb(new Error('Incorrect passphrase')); return cb(null, privateKeyWif); }; API.prototype.getBalanceFromPrivateKey = function(privateKey, coin, cb) { var self = this; if (_.isFunction(coin)) { cb = coin; coin = 'btc'; } var B = Bitcore_[coin]; var privateKey = new B.PrivateKey(privateKey); var address = privateKey.publicKey.toAddress(); self.getUtxos({ addresses: coin == 'bch' ? address.toLegacyAddress() : address.toString(), }, function(err, utxos) { if (err) return cb(err); return cb(null, _.sumBy(utxos, 'satoshis')); }); }; API.prototype.buildTxFromPrivateKey = function(privateKey, destinationAddress, opts, cb) { var self = this; opts = opts || {}; var coin = opts.coin || 'btc'; var B = Bitcore_[coin]; var privateKey = B.PrivateKey(privateKey); var address = privateKey.publicKey.toAddress(); async.waterfall([ function(next) { self.getUtxos({ addresses: coin == 'bch' ? address.toLegacyAddress() : address.toString(), }, function(err, utxos) { return next(err, utxos); }); }, function(utxos, next) { if (!_.isArray(utxos) || utxos.length == 0) return next(new Error('No utxos found')); var fee = opts.fee || 10000; var amount = _.sumBy(utxos, 'satoshis') - fee; if (amount <= 0) return next(new Errors.INSUFFICIENT_FUNDS); var tx; try { var toAddress = B.Address.fromString(destinationAddress); tx = new B.Transaction() .from(utxos) .to(toAddress, amount) .fee(fee) .sign(privateKey); // Make sure the tx can be serialized tx.serialize(); } catch (ex) { log.error('Could not build transaction from private key', ex); return next(new Errors.COULD_NOT_BUILD_TRANSACTION); } return next(null, tx); } ], cb); }; /** * Open a wallet and try to complete the public key ring. * * @param {Callback} cb - The callback that handles the response. It returns a flag indicating that the wallet is complete. * @fires API#walletCompleted */ API.prototype.openWallet = function(cb) { $.checkState(this.credentials); var self = this; if (self.credentials.isComplete() && self.credentials.hasWalletInfo()) return cb(null, true); var qs = []; qs.push('includeExtendedInfo=1'); qs.push('serverMessageArray=1'); self.request.get('/v3/wallets/?' + qs.join('&'), function(err, ret) { if (err) return cb(err); var wallet = ret.wallet; self._processStatus(ret); if (!self.credentials.hasWalletInfo()) { var me = _.find(wallet.copayers, { id: self.credentials.copayerId }); self.credentials.addWalletInfo(wallet.id, wallet.name, wallet.m, wallet.n, me.name); } if (wallet.status != 'complete') return cb(); if (self.credentials.walletPrivKey) { if (!Verifier.checkCopayers(self.credentials, wallet.copayers)) { return cb(new Errors.SERVER_COMPROMISED); } } else { // this should only happen in AIR-GAPPED flows log.warn('Could not verify copayers key (missing wallet Private Key)'); } self.credentials.addPublicKeyRing(API._extractPublicKeyRing(wallet.copayers)); self.emit('walletCompleted', wallet); return cb(null, ret); }); }; API._buildSecret = function(walletId, walletPrivKey, coin, network) { if (_.isString(walletPrivKey)) { walletPrivKey = Bitcore.PrivateKey.fromString(walletPrivKey); } var widHex = new Buffer(walletId.replace(/-/g, ''), 'hex'); var widBase58 = new Bitcore.encoding.Base58(widHex).toString(); return _.padEnd(widBase58, 22, '0') + walletPrivKey.toWIF() + (network == 'testnet' ? 'T' : 'L') + coin; }; API.parseSecret = function(secret) { $.checkArgument(secret); function split(str, indexes) { var parts = []; indexes.push(str.length); var i = 0; while (i < indexes.length) { parts.push(str.substring(i == 0 ? 0 : indexes[i - 1], indexes[i])); i++; }; return parts; }; try { var secretSplit = split(secret, [22, 74, 75]); var widBase58 = secretSplit[0].replace(/0/g, ''); var widHex = Bitcore.encoding.Base58.decode(widBase58).toString('hex'); var walletId = split(widHex, [8, 12, 16, 20]).join('-'); var walletPrivKey = Bitcore.PrivateKey.fromString(secretSplit[1]); var networkChar = secretSplit[2]; var coin = secretSplit[3] || 'btc'; return { walletId: walletId, walletPrivKey: walletPrivKey, coin: coin, network: networkChar == 'T' ? 'testnet' : 'livenet', }; } catch (ex) { throw new Error('Invalid secret'); } }; API.getRawTx = function(txp) { var t = Utils.buildTx(txp); return t.uncheckedSerialize(); }; API.signTxp = function(txp, derivedXPrivKey) { //Derive proper key to sign, for each input var privs = []; var derived = {}; var xpriv = new Bitcore.HDPrivateKey(derivedXPrivKey); _.each(txp.inputs, function(i) { $.checkState(i.path, "Input derivation path not available (signing transaction)") if (!derived[i.path]) { derived[i.path] = xpriv.deriveChild(i.path).privateKey; privs.push(derived[i.path]); } }); var t = Utils.buildTx(txp); var signatures = _.map(privs, function(priv, i) { return t.getSignatures(priv); }); signatures = _.map(_.sortBy(_.flatten(signatures), 'inputIndex'), function(s) { return s.signature.toDER().toString('hex'); }); return signatures; }; API.prototype._signTxp = function(txp, password) { var derived = this.credentials.getDerivedXPrivKey(password); return API.signTxp(txp, derived); }; API.prototype._getCurrentSignatures = function(txp) { var acceptedActions = _.filter(txp.actions, { type: 'accept' }); return _.map(acceptedActions, function(x) { return { signatures: x.signatures, xpub: x.xpub, }; }); }; API.prototype._addSignaturesToBitcoreTx = function(txp, t, signatures, xpub) { if (signatures.length != txp.inputs.length) throw new Error('Number of signatures does not match number of inputs'); $.checkState(txp.coin); var bitcore = Bitcore_[txp.coin]; var i = 0, x = new bitcore.HDPublicKey(xpub); _.each(signatures, function(signatureHex) { var input = txp.inputs[i]; try { var signature = bitcore.crypto.Signature.fromString(signatureHex); var pub = x.deriveChild(txp.inputPaths[i]).publicKey; var s = { inputIndex: i, signature: signature, sigtype: bitcore.crypto.Signature.SIGHASH_ALL | bitcore.crypto.Signature.SIGHASH_FORKID, publicKey: pub, } ; t.inputs[i].addSignature(t, s); i++; } catch (e) {} ; }); if (i != txp.inputs.length) throw new Error('Wrong signatures'); }; API.prototype._applyAllSignatures = function(txp, t) { var self = this; $.checkState(txp.status == 'accepted'); var sigs = self._getCurrentSignatures(txp); _.each(sigs, function(x) { self._addSignaturesToBitcoreTx(txp, t, x.signatures, x.xpub); }); }; /** * Join * @private * * @param {String} walletId * @param {String} walletPrivKey * @param {String} xPubKey * @param {String} requestPubKey * @param {String} copayerName * @param {Object} Optional args * @param {String} opts.customData * @param {String} opts.coin * @param {Callback} cb */ API.prototype._doJoinWallet = function(walletId, walletPrivKey, xPubKey, requestPubKey, copayerName, opts, cb) { $.shouldBeFunction(cb); var self = this; opts = opts || {}; // Adds encrypted walletPrivateKey to CustomData opts.customData = opts.customData || {}; opts.customData.walletPrivKey = walletPrivKey.toString(); var encCustomData = Utils.encryptMessage(JSON.stringify(opts.customData), this.credentials.personalEncryptingKey); var encCopayerName = Utils.encryptMessage(copayerName, this.credentials.sharedEncryptingKey); var args = { walletId: walletId, coin: opts.coin, name: encCopayerName, xPubKey: xPubKey, requestPubKey: requestPubKey, customData: encCustomData, }; if (opts.dryRun) args.dryRun = true; if (_.isBoolean(opts.supportBIP44AndP2PKH)) args.supportBIP44AndP2PKH = opts.supportBIP44AndP2PKH; var hash = Utils.getCopayerHash(args.name, args.xPubKey, args.requestPubKey); args.copayerSignature = Utils.signMessage(hash, walletPrivKey); var url = '/v2/wallets/' + walletId + '/copayers'; this.request.post(url, args, function(err, body) { if (err) return cb(err); self._processWallet(body.wallet); return cb(null, body.wallet); }); }; /** * Return if wallet is complete */ API.prototype.isComplete = function() { return this.credentials && this.credentials.isComplete(); }; /** * Is private key currently encrypted? * * @return {Boolean} */ API.prototype.isPrivKeyEncrypted = function() { return this.credentials && this.credentials.isPrivKeyEncrypted(); }; /** * Is private key external? * * @return {Boolean} */ API.prototype.isPrivKeyExternal = function() { return this.credentials && this.credentials.hasExternalSource(); }; /** * Get external wallet source name * * @return {String} */ API.prototype.getPrivKeyExternalSourceName = function() { return this.credentials ? this.credentials.getExternalSourceName() : null; }; /** * Returns unencrypted extended private key and mnemonics * * @param password */ API.prototype.getKeys = function(password) { return this.credentials.getKeys(password); }; /** * Checks is password is valid * Returns null (keys not encrypted), true or false. * * @param password */ API.prototype.checkPassword = function(password) { if (!this.isPrivKeyEncrypted()) return; try { var keys = this.getKeys(password); return !!keys.xPrivKey; } catch (e) { return false; }; }; /** * Can this credentials sign a transaction? * (Only returns fail on a 'proxy' setup for airgapped operation) * * @return {undefined} */ API.prototype.canSign = function() { return this.credentials && this.credentials.canSign(); }; API._extractPublicKeyRing = function(copayers) { return _.map(copayers, function(copayer) { var pkr = _.pick(copayer, ['xPubKey', 'requestPubKey']); pkr.copayerName = copayer.name; return pkr; }); }; /** * sets up encryption for the extended private key * * @param {String} password Password used to encrypt * @param {Object} opts optional: SJCL options to encrypt (.iter, .salt, etc). * @return {undefined} */ API.prototype.encryptPrivateKey = function(password, opts) { this.credentials.encryptPrivateKey(password, opts || API.privateKeyEncryptionOpts); }; /** * disables encryption for private key. * * @param {String} password Password used to encrypt */ API.prototype.decryptPrivateKey = function(password) { return this.credentials.decryptPrivateKey(password); }; /** * Get current fee levels for the specified network * * @param {string} coin - 'btc' (default) or 'bch' * @param {string} network - 'livenet' (default) or 'testnet' * @param {Callback} cb * @returns {Callback} cb - Returns error or an object with status information */ API.prototype.getFeeLevels = function(coin, network, cb) { var self = this; $.checkArgument(coin || _.includes(['btc', 'bch'], coin)); $.checkArgument(network || _.includes(['livenet', 'testnet'], network)); self.request.get('/v2/feelevels/?coin=' + (coin || 'btc') + '&network=' + (network || 'livenet'), function(err, result) { if (err) return cb(err); return cb(err, result); }); }; /** * Get service version * * @param {Callback} cb */ API.prototype.getVersion = function(cb) { this.request.get('/v1/version/', cb); }; API.prototype._checkKeyDerivation = function() { var isInvalid = (this.keyDerivationOk === false); if (isInvalid) { log.error('Key derivation for this device is not working as expected'); } return !isInvalid; }; /** * * Create a wallet. * @param {String} walletName * @param {String} copayerName * @param {Number} m * @param {Number} n * @param {object} opts (optional: advanced options) * @param {string} opts.coin[='btc'] - The coin for this wallet (btc, bch). * @param {string} opts.network[='livenet'] * @param {string} opts.singleAddress[=false] - The wallet will only ever have one address. * @param {String} opts.walletPrivKey - set a walletPrivKey (instead of random) * @param {String} opts.id - set a id for wallet (instead of server given) * @param cb * @return {undefined} */ API.prototype.createWallet = function(walletName, copayerName, m, n, opts, cb) { var self = this; if (!self._checkKeyDerivation()) return cb(new Error('Cannot create new wallet')); if (opts) $.shouldBeObject(opts); opts = opts || {}; var coin = opts.coin || 'btc'; if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin')); var network = opts.network || 'livenet'; if (!_.includes(['testnet', 'livenet'], network)) return cb(new Error('Invalid network')); if (!self.credentials) { log.info('Generating new keys'); self.seedFromRandom({ coin: coin, network: network }); } else { log.info('Using existing keys'); } if (coin != self.credentials.coin) { return cb(new Error('Existing keys were created for a different coin')); } if (network != self.credentials.network) { return cb(new Error('Existing keys were created for a different network')); } var walletPrivKey = opts.walletPrivKey || new Bitcore.PrivateKey(); var c = self.credentials; c.addWalletPrivateKey(walletPrivKey.toString()); var encWalletName = Utils.encryptMessage(walletName, c.sharedEncryptingKey); var args = { name: encWalletName, m: m, n: n, pubKey: (new Bitcore.PrivateKey(walletPrivKey)).toPublicKey().toString(), coin: coin, network: network, singleAddress: !!opts.singleAddress, id: opts.id, }; self.request.post('/v2/wallets/', args, function(err, res) { if (err) return cb(err); var walletId = res.walletId; c.addWalletInfo(walletId, walletName, m, n, copayerName); var secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network); self._doJoinWallet(walletId, walletPrivKey, c.xPubKey, c.requestPubKey, copayerName, { coin: coin }, function(err, wallet) { if (err) return cb(err); return cb(null, n > 1 ? secret : null); }); }); }; /** * Join an existent wallet * * @param {String} secret * @param {String} copayerName * @param {Object} opts * @param {string} opts.coin[='btc'] - The expected coin for this wallet (btc, bch). * @param {Boolean} opts.dryRun[=false] - Simulate wallet join * @param {Callback} cb * @returns {Callback} cb - Returns the wallet */ API.prototype.joinWallet = function(secret, copayerName, opts, cb) { var self = this; if (!cb) { cb = opts; opts = {}; log.warn('DEPRECATED WARN: joinWallet should receive 4 parameters.'); } if (!self._checkKeyDerivation()) return cb(new Error('Cannot join wallet')); opts = opts || {}; var coin = opts.coin || 'btc'; if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin')); try { var secretData = API.parseSecret(secret); } catch (ex) { return cb(ex); } if (!self.credentials) { self.seedFromRandom({ coin: coin, network: secretData.network }); } self.credentials.addWalletPrivateKey(secretData.walletPrivKey.toString()); self._doJoinWallet(secretData.walletId, secretData.walletPrivKey, self.credentials.xPubKey, self.credentials.requestPubKey, copayerName, { coin: coin, dryRun: !!opts.dryRun, }, function(err, wallet) { if (err) return cb(err); if (!opts.dryRun) { self.credentials.addWalletInfo(wallet.id, wallet.name, wallet.m, wallet.n, copayerName); } return cb(null, wallet); }); }; /** * Recreates a wallet, given credentials (with wallet id) * * @returns {Callback} cb - Returns the wallet */ API.prototype.recreateWallet = function(cb) { $.checkState(this.credentials); $.checkState(this.credentials.isComplete()); $.checkState(this.credentials.walletPrivKey); //$.checkState(this.credentials.hasWalletInfo()); var self = this; // First: Try to get the wallet with current credentials this.getStatus({ includeExtendedInfo: true }, function(err) { // No error? -> Wallet is ready. if (!err) { log.info('Wallet is already created'); return cb(); }; var c = self.credentials; var walletPrivKey = Bitcore.PrivateKey.fromString(c.walletPrivKey); var walletId = c.walletId; var supportBIP44AndP2PKH = c.derivationStrategy != Constants.DERIVATION_STRATEGIES.BIP45; var encWalletName = Utils.encryptMessage(c.walletName || 'recovered wallet', c.sharedEncryptingKey); var coin = c.coin; var args = { name: encWalletName, m: c.m, n: c.n, pubKey: walletPrivKey.toPublicKey().toString(), coin: c.coin, network: c.network, id: walletId, supportBIP44AndP2PKH: supportBIP44AndP2PKH, }; self.request.post('/v2/wallets/', args, function(err, body) { if (err) { if (!(err instanceof Errors.WALLET_ALREADY_EXISTS)) return cb(err); return self.addAccess({}, function(err) { if (err) return cb(err); self.openWallet(function(err) { return cb(err); }); }); } if (!walletId) { walletId = body.walletId; } var i = 1; async.each(self.credentials.publicKeyRing, function(item, next) { var name = item.copayerName || ('copayer ' + i++); self._doJoinWallet(walletId, walletPrivKey, item.xPubKey, item.requestPubKey, name, { coin: c.coin, supportBIP44AndP2PKH: supportBIP44AndP2PKH, }, function(err) { //Ignore error is copayer already in wallet if (err && err instanceof Errors.COPAYER_IN_WALLET) return next(); return next(err); }); }, cb); }); }); }; API.prototype._processWallet = function(wallet) { var self = this; var encryptingKey = self.credentials.sharedEncryptingKey; var name = Utils.decryptMessageNoThrow(wallet.name, encryptingKey); if (name != wallet.name) { wallet.encryptedName = wallet.name; } wallet.name = name; _.each(wallet.copayers, function(copayer) { var name = Utils.decryptMessageNoThrow(copayer.name, encryptingKey); if (name != copayer.name) { copayer.encryptedName = copayer.name; } copayer.name = name; _.each(copayer.requestPubKeys, function(access) { if (!access.name) return; var name = Utils.decryptMessageNoThrow(access.name, encryptingKey); if (name != access.name) { access.encryptedName = access.name; } access.name = name; }); }); }; API.prototype._processStatus = function(status) { var self = this; function processCustomData(data) { var copayers = data.wallet.copayers; if (!copayers) return; var me = _.find(copayers, { 'id': self.credentials.copayerId }); if (!me || !me.customData) return; var customData; try { customData = JSON.parse(Utils.decryptMessage(me.customData, self.credentials.personalEncryptingKey)); } catch (e) { log.warn('Could not decrypt customData:', me.customData); } if (!customData) return; // Add it to result data.customData = customData; // Update walletPrivateKey if (!self.credentials.walletPrivKey && customData.walletPrivKey) self.credentials.addWalletPrivateKey(customData.walletPrivKey); }; processCustomData(status); self._processWallet(status.wallet); self._processTxps(status.pendingTxps); } /** * Get latest notifications * * @param {object} opts * @param {String} opts.lastNotificationId (optional) - The ID of the last received notification * @param {String} opts.timeSpan (optional) - A time window on which to look for notifications (in seconds) * @param {String} opts.includeOwn[=false] (optional) - Do not ignore notifications generated by the current copayer * @returns {Callback} cb - Returns error or an array of notifications */ API.prototype.getNotifications = function(opts, cb) { $.checkState(this.credentials); var self = this; opts = opts || {}; var url = '/v1/notifications/'; if (opts.lastNotificationId) { url += '?notificationId=' + opts.lastNotificationId; } else if (opts.timeSpan) { url += '?timeSpan=' + opts.timeSpan; } self.request.getWithLogin(url, function(err, result) { if (err) return cb(err); var notifications = _.filter(result, function(notification) { return opts.includeOwn || (notification.creatorId != self.credentials.copayerId); }); return cb(null, notifications); }); }; /** * Get status of the wallet * * @param {Boolean} opts.twoStep[=false] - Optional: use 2-step balance computation for improved performance * @param {Boolean} opts.includeExtendedInfo (optional: query extended status) * @returns {Callback} cb - Returns error or an object with status information */ API.prototype.getStatus = function(opts, cb) { $.checkState(this.credentials); if (!cb) { cb = opts; opts = {}; log.warn('DEPRECATED WARN: getStatus should receive 2 parameters.') } var self = this; opts = opts || {}; var qs = []; qs.push('includeExtendedInfo=' + (opts.includeExtendedInfo ? '1' : '0')); qs.push('twoStep=' + (opts.twoStep ? '1' : '0')); qs.push('serverMessageArray=1'); self.request.get('/v3/wallets/?' + qs.join('&'), function(err, result) { if (err) return cb(err); if (result.wallet.status == 'pending') { var c = self.credentials; result.wallet.secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network); } self._processStatus(result); return cb(err, result); }); }; /** * Get copayer preferences * * @param {Callback} cb * @return {Callback} cb - Return error or object */ API.prototype.getPreferences = function(cb) { $.checkState(this.credentials); $.checkArgument(cb); var self = this; self.request.get('/v1/preferences/', function(err, preferences) { if (err) return cb(err); return cb(null, preferences); }); }; /** * Save copayer preferences * * @param {Object} preferences * @param {Callback} cb * @return {Callback} cb - Return error or object */ API.prototype.savePreferences = function(preferences, cb) { $.checkState(this.credentials); $.checkArgument(cb); var self = this; self.request.put('/v1/preferences/', preferences, cb); }; /** * fetchPayPro * * @param opts.payProUrl URL for paypro request * @returns {Callback} cb - Return error or the parsed payment protocol request * Returns (err,paypro) * paypro.amount * paypro.toAddress * paypro.memo */ API.prototype.fetchPayPro = function(opts, cb) { $.checkArgument(opts) .checkArgument(opts.payProUrl); PayPro.get({ url: opts.payProUrl, coin: this.credentials.coin || 'btc', network: this.credentials.network || 'livenet', // for testing request: this.request, }, function(err, paypro) { if (err) return cb(err); return cb(null, paypro); }); }; /** * Gets list of utxos * * @param {Function} cb * @param {Object} opts * @param {Array} opts.addresses (optional) - List of addresses from where to fetch UTXOs. * @returns {Callback} cb - Return error or the list of utxos */ API.prototype.getUtxos = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); opts = opts || {}; var url = '/v1/utxos/'; if (opts.addresses) { url += '?' + querystring.stringify({ addresses: [].concat(opts.addresses).join(',') }); } this.request.get(url, cb); }; API.prototype._getCreateTxProposalArgs = function(opts) { var self = this; var args = _.cloneDeep(opts); args.message = API._encryptMessage(opts.message, this.credentials.sharedEncryptingKey) || null; args.payProUrl = opts.payProUrl || null; _.each(args.outputs, function(o) { o.message = API._encryptMessage(o.message, self.credentials.sharedEncryptingKey) || null; }); return args; }; /** * Create a transaction proposal * * @param {Object} opts * @param {string} opts.txProposalId - Optional. If provided it will be used as this TX proposal ID. Should be unique in the scope of the wallet. * @param {Array} opts.outputs - List of outputs. * @param {string} opts.outputs[].toAddress - Destination address. * @param {number} opts.outputs[].amount - Amount to transfer in satoshi. * @param {string} opts.outputs[].message - A message to attach to this output. * @param {string} opts.message - A message to attach to this transaction. * @param {number} opts.feeLevel[='normal'] - Optional. Specify the fee level for this TX ('priority', 'normal', 'economy', 'superEconomy'). * @param {number} opts.feePerKb - Optional. Specify the fee per KB for this TX (in satoshi). * @param {string} opts.changeAddress - Optional. Use this address as the change address for the tx. The address should belong to the wallet. In the case of singleAddress wallets, the first main address will be used. * @param {Boolean} opts.sendMax - Optional. Send maximum amount of funds that make sense under the specified fee/feePerKb conditions. (defaults to false). * @param {string} opts.payProUrl - Optional. Paypro URL for peers to verify TX * @param {Boolean} opts.excludeUnconfirmedUtxos[=false] - Optional. Do not use UTXOs of unconfirmed transactions as inputs * @param {Boolean} opts.validateOutputs[=true] - Optional. Perform validation on outputs. * @param {Boolean} opts.dryRun[=false] - Optional. Simulate the action but do not change server state. * @param {Array} opts.inputs - Optional. Inputs for this TX * @param {number} opts.fee - Optional. Use an fixed fee for this TX (only when opts.inputs is specified) * @param {Boolean} opts.noShuffleOutputs - Optional. If set, TX outputs won't be shuffled. Defaults to false * @returns {Callback} cb - Return error or the transaction proposal */ API.prototype.createTxProposal = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); $.checkState(this.credentials.sharedEncryptingKey); $.checkArgument(opts); var self = this; var args = self._getCreateTxProposalArgs(opts); self.request.post('/v2/txproposals/', args, function(err, txp) { if (err) return cb(err); self._processTxps(txp); if (!Verifier.checkProposalCreation(args, txp, self.credentials.sharedEncryptingKey)) { return cb(new Errors.SERVER_COMPROMISED); } return cb(null, txp); }); }; /** * Publish a transaction proposal * * @param {Object} opts * @param {Object} opts.txp - The transaction proposal object returned by the API#createTxProposal method * @returns {Callback} cb - Return error or null */ API.prototype.publishTxProposal = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); $.checkArgument(opts) .checkArgument(opts.txp); $.checkState(parseInt(opts.txp.version) >= 3); var self = this; var t = Utils.buildTx(opts.txp); var hash = t.uncheckedSerialize(); var args = { proposalSignature: Utils.signMessage(hash, self.credentials.requestPrivKey) }; var url = '/v1/txproposals/' + opts.txp.id + '/publish/'; self.request.post(url, args, function(err, txp) { if (err) return cb(err); self._processTxps(txp); return cb(null, txp); }); }; /** * Create a new address * * @param {Object} opts * @param {Boolean} opts.ignoreMaxGap[=false] * @param {Callback} cb * @returns {Callback} cb - Return error or the address */ API.prototype.createAddress = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; if (!cb) { cb = opts; opts = {}; log.warn('DEPRECATED WARN: createAddress should receive 2 parameters.') } if (!self._checkKeyDerivation()) return cb(new Error('Cannot create new address for this wallet')); opts = opts || {}; self.request.post('/v3/addresses/', opts, function(err, address) { if (err) return cb(err); if (!Verifier.checkAddress(self.credentials, address)) { return cb(new Errors.SERVER_COMPROMISED); } return cb(null, address); }); }; /** * Get your main addresses * * @param {Object} opts * @param {Boolean} opts.doNotVerify * @param {Numeric} opts.limit (optional) - Limit the resultset. Return all addresses by default. * @param {Boolean} [opts.reverse=false] (optional) - Reverse the order of returned addresses. * @param {Callback} cb * @returns {Callback} cb - Return error or the array of addresses */ API.prototype.getMainAddresses = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; opts = opts || {}; var args = []; if (opts.limit) args.push('limit=' + opts.limit); if (opts.reverse) args.push('reverse=1'); var qs = ''; if (args.length > 0) { qs = '?' + args.join('&'); } var url = '/v1/addresses/' + qs; self.request.get(url, function(err, addresses) { if (err) return cb(err); if (!opts.doNotVerify) { var fake = _.some(addresses, function(address) { return !Verifier.checkAddress(self.credentials, address); }); if (fake) return cb(new Errors.SERVER_COMPROMISED); } return cb(null, addresses); }); }; /** * Update wallet balance * * @param {String} opts.coin - Optional: defaults to current wallet coin * @param {Callback} cb */ API.prototype.getBalance = function(opts, cb) { if (!cb) { cb = opts; opts = {}; log.warn('DEPRECATED WARN: getBalance should receive 2 parameters.') } var self = this; opts = opts || {}; $.checkState(this.credentials && this.credentials.isComplete()); var args = []; if (opts.coin) { if (!_.includes(['btc', 'bch'], opts.coin)) return cb(new Error('Invalid coin')); args.push('coin=' + opts.coin); } var qs = ''; if (args.length > 0) { qs = '?' + args.join('&'); } var url = '/v1/balance/' + qs; this.request.get(url, cb); }; /** * Get list of transactions proposals * * @param {Object} opts * @param {Boolean} opts.doNotVerify * @param {Boolean} opts.forAirGapped * @param {Boolean} opts.doNotEncryptPkr * @return {Callback} cb - Return error or array of transactions proposals */ API.prototype.getTxProposals = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; self.request.get('/v1/txproposals/', function(err, txps) { if (err) return cb(err); self._processTxps(txps); async.every(txps, function(txp, acb) { if (opts.doNotVerify) return acb(true); self.getPayPro(txp, function(err, paypro) { var isLegit = Verifier.checkTxProposal(self.credentials, txp, { paypro: paypro, }); return acb(isLegit); }); }, function(isLegit) { if (!isLegit) return cb(new Errors.SERVER_COMPROMISED); var result; if (opts.forAirGapped) { result = { txps: JSON.parse(JSON.stringify(txps)), encryptedPkr: opts.doNotEncryptPkr ? null : Utils.encryptMessage(JSON.stringify(self.credentials.publicKeyRing), self.credentials.personalEncryptingKey), unencryptedPkr: opts.doNotEncryptPkr ? JSON.stringify(self.credentials.publicKeyRing) : null, m: self.credentials.m, n: self.credentials.n, }; } else { result = txps; } return cb(null, result); }); }); }; //private? API.prototype.getPayPro = function(txp, cb) { var self = this; if (!txp.payProUrl || this.doNotVerifyPayPro) return cb(); PayPro.get({ url: txp.payProUrl, coin: txp.coin || 'btc', network: txp.network || 'livenet', // for testing request: self.request, }, function(err, paypro) { if (err) return cb(new Error('Could not fetch invoice:' + (err.message? err.message : err))); return cb(null, paypro); }); }; /** * Sign a transaction proposal * * @param {Object} txp * @param {String} password - (optional) A password to decrypt the encrypted private key (if encryption is set). * @param {Callback} cb * @return {Callback} cb - Return error or object */ API.prototype.signTxProposal = function(txp, password, cb) { $.checkState(this.credentials && this.credentials.isComplete()); $.checkArgument(txp.creatorId); if (_.isFunction(password)) { cb = password; password = null; } var self = this; if (!txp.signatures) { if (!self.canSign()) return cb(new Errors.MISSING_PRIVATE_KEY); if (self.isPrivKeyEncrypted() && !password) return cb(new Errors.ENCRYPTED_PRIVATE_KEY); } self.getPayPro(txp, function(err, paypro) { if (err) return cb(err); var isLegit = Verifier.checkTxProposal(self.credentials, txp, { paypro: paypro, }); if (!isLegit) return cb(new Errors.SERVER_COMPROMISED); var signatures = txp.signatures; if (_.isEmpty(signatures)) { try { signatures = self._signTxp(txp, password); } catch (ex) { log.error('Error signing tx', ex); return cb(ex); } } var url = '/v1/txproposals/' + txp.id + '/signatures/'; var args = { signatures: signatures }; self.request.post(url, args, function(err, txp) { if (err) return cb(err); self._processTxps(txp); return cb(null, txp); }); }); }; /** * Sign transaction proposal from AirGapped * * @param {Object} txp * @param {String} encryptedPkr * @param {Number} m * @param {Number} n * @param {String} password - (optional) A password to decrypt the encrypted private key (if encryption is set). * @return {Object} txp - Return transaction */ API.prototype.signTxProposalFromAirGapped = function(txp, encryptedPkr, m, n, password) { $.checkState(this.credentials); var self = this; if (!self.canSign()) throw new Errors.MISSING_PRIVATE_KEY; if (self.isPrivKeyEncrypted() && !password) throw new Errors.ENCRYPTED_PRIVATE_KEY; var publicKeyRing; try { publicKeyRing = JSON.parse(Utils.decryptMessage(encryptedPkr, self.credentials.personalEncryptingKey)); } catch (ex) { throw new Error('Could not decrypt public key ring'); } if (!_.isArray(publicKeyRing) || publicKeyRing.length != n) { throw new Error('Invalid public key ring'); } self.credentials.m = m; self.credentials.n = n; self.credentials.addressType = txp.addressType; self.credentials.addPublicKeyRing(publicKeyRing); if (!Verifier.checkTxProposalSignature(self.credentials, txp)) throw new Error('Fake transaction proposal'); return self._signTxp(txp, password); }; /** * Sign transaction proposal from AirGapped * * @param {String} key - A mnemonic phrase or an xprv HD private key * @param {Object} txp * @param {String} unencryptedPkr * @param {Number} m * @param {Number} n * @param {Object} opts * @param {String} opts.coin (default 'btc') * @param {String} opts.passphrase * @param {Number} opts.account - default 0 * @param {String} opts.derivationStrategy - default 'BIP44' * @return {Object} txp - Return transaction */ API.signTxProposalFromAirGapped = function(key, txp, unencryptedPkr, m, n, opts) { var self = this; opts = opts || {} var coin = opts.coin || 'btc'; if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin')); var publicKeyRing = JSON.parse(unencryptedPkr); if (!_.isArray(publicKeyRing) || publicKeyRing.length != n) { throw new Error('Invalid public key ring'); } var newClient = new API({ baseUrl: 'https://bws.example.com/bws/api' }); if (key.slice(0, 4) === 'xprv' || key.slice(0, 4) === 'tprv') { if (key.slice(0, 4) === 'xprv' && txp.network == 'testnet') throw new Error("testnet HD keys must start with tprv"); if (key.slice(0, 4) === 'tprv' && txp.network == 'livenet') throw new Error("livenet HD keys must start with xprv"); newClient.seedFromExtendedPrivateKey(key, { 'coin': coin, 'account': opts.account, 'derivationStrategy': opts.derivationStrategy }); } else { newClient.seedFromMnemonic(key, { 'coin': coin, 'network': txp.network, 'passphrase': opts.passphrase, 'account': opts.account, 'derivationStrategy': opts.derivationStrategy }) } newClient.credentials.m = m; newClient.credentials.n = n; newClient.credentials.addressType = txp.addressType; newClient.credentials.addPublicKeyRing(publicKeyRing); if (!Verifier.checkTxProposalSignature(newClient.credentials, txp)) throw new Error('Fake transaction proposal'); return newClient._signTxp(txp); }; /** * Reject a transaction proposal * * @param {Object} txp * @param {String} reason * @param {Callback} cb * @return {Callback} cb - Return error or object */ API.prototype.rejectTxProposal = function(txp, reason, cb) { $.checkState(this.credentials && this.credentials.isComplete()); $.checkArgument(cb); var self = this; var url = '/v1/txproposals/' + txp.id + '/rejections/'; var args = { reason: API._encryptMessage(reason, self.credentials.sharedEncryptingKey) || '', }; self.request.post(url, args, function(err, txp) { if (err) return cb(err); self._processTxps(txp); return cb(null, txp); }); }; /** * Broadcast raw transaction * * @param {Object} opts * @param {String} opts.network * @param {String} opts.rawTx * @param {Callback} cb * @return {Callback} cb - Return error or txid */ API.prototype.broadcastRawTx = function(opts, cb) { $.checkState(this.credentials); $.checkArgument(cb); var self = this; opts = opts || {}; var url = '/v1/broadcast_raw/'; self.request.post(url, opts, function(err, txid) { if (err) return cb(err); return cb(null, txid); }); }; API.prototype._doBroadcast = function(txp, cb) { var self = this; var url = '/v1/txproposals/' + txp.id + '/broadcast/'; self.request.post(url, {}, function(err, txp) { if (err) return cb(err); self._processTxps(txp); return cb(null, txp); }); }; /** * Broadcast a transaction proposal * * @param {Object} txp * @param {Callback} cb * @return {Callback} cb - Return error or object */ API.prototype.broadcastTxProposal = function(txp, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; self.getPayPro(txp, function(err, paypro) { if (err) return cb(err); if (paypro) { var t_unsigned = Utils.buildTx(txp); var t = Utils.buildTx(txp); self._applyAllSignatures(txp, t); PayPro.send({ url: txp.payProUrl, amountSat: txp.amount, rawTxUnsigned: t_unsigned.uncheckedSerialize(), rawTx: t.serialize({ disableSmallFees: true, disableLargeFees: true, disableDustOutputs: true }), coin: txp.coin || 'btc', network: txp.network || 'livenet', // for testing request: self.request, }, function(err, ack, memo) { if (err) { return cb(err); } if (memo) { log.debug('Merchant memo:', memo); } self._doBroadcast(txp, function(err2, txp) { return cb(err2, txp, memo, err); }); }); } else { self._doBroadcast(txp, cb); } }); }; /** * Remove a transaction proposal * * @param {Object} txp * @param {Callback} cb * @return {Callback} cb - Return error or empty */ API.prototype.removeTxProposal = function(txp, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; var url = '/v1/txproposals/' + txp.id; self.request.delete(url, function(err) { return cb(err); }); }; /** * Get transaction history * * @param {Object} opts * @param {Number} opts.skip (defaults to 0) * @param {Number} opts.limit * @param {Boolean} opts.includeExtendedInfo * @param {Callback} cb * @return {Callback} cb - Return error or array of transactions */ API.prototype.getTxHistory = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; var args = []; if (opts) { if (opts.skip) args.push('skip=' + opts.skip); if (opts.limit) args.push('limit=' + opts.limit); if (opts.includeExtendedInfo) args.push('includeExtendedInfo=1'); } var qs = ''; if (args.length > 0) { qs = '?' + args.join('&'); } var url = '/v1/txhistory/' + qs; self.request.get(url, function(err, txs) { if (err) return cb(err); self._processTxps(txs); return cb(null, txs); }); }; /** * getTx * * @param {String} TransactionId * @return {Callback} cb - Return error or transaction */ API.prototype.getTx = function(id, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; var url = '/v1/txproposals/' + id; this.request.get(url, function(err, txp) { if (err) return cb(err); self._processTxps(txp); return cb(null, txp); }); }; /** * Start an address scanning process. * When finished, the scanning process will send a notification 'ScanFinished' to all copayers. * * @param {Object} opts * @param {Boolean} opts.includeCopayerBranches (defaults to false) * @param {Callback} cb */ API.prototype.startScan = function(opts, cb) { $.checkState(this.credentials && this.credentials.isComplete()); var self = this; var args = { includeCopayerBranches: opts.includeCopayerBranches, }; self.request.post('/v1/addresses/scan', args, function(err) { return cb(err); }); }; /** * Adds access to the current copayer * @param {Object} opts * @param {bool} opts.generateNewKey Optional: generate a new key for the new access * @param {string} opts.restrictions * - cannotProposeTXs * - cannotXXX TODO * @param {string} opts.name (name for the new access) * * return the accesses Wallet and the requestPrivateKey */ API.prototype.addAccess = function(opts, cb) { $.checkState(this.credentials && this.credentials.canSign()); opts = opts || {}; var reqPrivKey = new Bitcore.PrivateKey(opts.generateNewKey ? null : this.credentials.requestPrivKey); var requestPubKey = reqPrivKey.toPublicKey().toString(); var xPriv = new Bitcore.HDPrivateKey(this.credentials.xPrivKey) .deriveChild(this.credentials.getBaseAddressDerivationPath()); var sig = Utils.signRequestPubKey(requestPubKey, xPriv); var copayerId = this.credentials.copayerId; var encCopayerName = opts.name ? Utils.encryptMessage(opts.name, this.credentials.sharedEncryptingKey) : null; var opts = { copayerId: copayerId, requestPubKey: requestPubKey, signature: sig, name: encCopayerName, restrictions: opts.restrictions, }; this.request.put('/v1/copayers/' + copayerId + '/', opts, function(err, res) { if (err) return cb(err); return cb(null, res.wallet, reqPrivKey); }); }; /** * Get a note associated with the specified txid * @param {Object} opts * @param {string} opts.txid - The txid to associate this note with */ API.prototype.getTxNote = function(opts, cb) { $.checkState(this.credentials); var self = this; opts = opts || {}; self.request.get('/v1/txnotes/' + opts.txid + '/', function(err, note) { if (err) return cb(err); self._processTxNotes(note); return cb(null, note); }); }; /** * Edit a note associated with the specified txid * @param {Object} opts * @param {string} opts.txid - The txid to associate this note with * @param {string} opts.body - The contents of the note */ API.prototype.editTxNote = function(opts, cb) { $.checkState(this.credentials); var self = this; opts = opts || {}; if (opts.body) { opts.body = API._encryptMessage(opts.body, this.credentials.sharedEncryptingKey); } self.request.put('/v1/txnotes/' + opts.txid + '/', opts, function(err, note) { if (err) return cb(err); self._processTxNotes(note); return cb(null, note); }); }; /** * Get all notes edited after the specified date * @param {Object} opts * @param {string} opts.minTs - The starting timestamp */ API.prototype.getTxNotes = function(opts, cb) { $.checkState(this.credentials); var self = this; opts = opts || {}; var args = []; if (_.isNumber(opts.minTs)) { args.push('minTs=' + opts.minTs); } var qs = ''; if (args.length > 0) { qs = '?' + args.join('&'); } self.request.get('/v1/txnotes/' + qs, function(err, notes) { if (err) return cb(err); self._processTxNotes(notes); return cb(null, notes); }); }; /** * Returns exchange rate for the specified currency & timestamp. * @param {Object} opts * @param {string} opts.code - Currency ISO code. * @param {Date} [opts.ts] - A timestamp to base the rate on (default Date.now()). * @param {String} [opts.provider] - A provider of exchange rates (default 'BitPay'). * @returns {Object} rates - The exchange rate. */ API.prototype.getFiatRate = function(opts, cb) { $.checkArgument(cb); var self = this; var opts = opts || {}; var args = []; if (opts.ts) args.push('ts=' + opts.ts); if (opts.provider) args.push('provider=' + opts.provider); var qs = ''; if (args.length > 0) { qs = '?' + args.join('&'); } self.request.get('/v1/fiatrates/' + opts.code + '/' + qs, function(err, rates) { if (err) return cb(err); return cb(null, rates); }); } /** * Subscribe to push notifications. * @param {Object} opts * @param {String} opts.type - Device type (ios or android). * @param {String} opts.token - Device token. * @returns {Object} response - Status of subscription. */ API.prototype.pushNotificationsSubscribe = function(opts, cb) { var url = '/v1/pushnotifications/subscriptions/'; this.request.post(url, opts, function(err, response) { if (err) return cb(err); return cb(null, response); }); }; /** * Unsubscribe from push notifications. * @param {String} token - Device token * @return {Callback} cb - Return error if exists */ API.prototype.pushNotificationsUnsubscribe = function(token, cb) { var url = '/v2/pushnotifications/subscriptions/' + token; this.request.delete(url, cb); }; /** * Listen to a tx for its first confirmation. * @param {Object} opts * @param {String} opts.txid - The txid to subscribe to. * @returns {Object} response - Status of subscription. */ API.prototype.txConfirmationSubscribe = function(opts, cb) { var url = '/v1/txconfirmations/'; this.request.post(url, opts, function(err, response) { if (err) return cb(err); return cb(null, response); }); }; /** * Stop listening for a tx confirmation. * @param {String} txid - The txid to unsubscribe from. * @return {Callback} cb - Return error if exists */ API.prototype.txConfirmationUnsubscribe = function(txid, cb) { var url = '/v1/txconfirmations/' + txid; this.request.delete(url, cb); }; /** * Returns send max information. * @param {String} opts * @param {number} opts.feeLevel[='normal'] - Optional. Specify the fee level ('priority', 'normal', 'economy', 'superEconomy'). * @param {number} opts.feePerKb - Optional. Specify the fee per KB (in satoshi). * @param {Boolean} opts.excludeUnconfirmedUtxos - Indicates it if should use (or not) the unconfirmed utxos * @param {Boolean} opts.returnInputs - Indicates it if should return (or not) the inputs * @return {Callback} cb - Return error (if exists) and object result */ API.prototype.getSendMaxInfo = function(opts, cb) { var self = this; var args = []; opts = opts || {}; if (opts.feeLevel) args.push('feeLevel=' + opts.feeLevel); if (opts.feePerKb != null) args.push('feePerKb=' + opts.feePerKb); if (opts.excludeUnconfirmedUtxos) args.push('excludeUnconfirmedUtxos=1'); if (opts.returnInputs) args.push('returnInputs=1'); var qs = ''; if (args.length > 0) qs = '?' + args.join('&'); var url = '/v1/sendmaxinfo/' + qs; self.request.get(url, function(err, result) { if (err) return cb(err); return cb(null, result); }); }; /** * Get wallet status based on a string identifier (one of: walletId, address, txid) * * @param {string} opts.identifier - The identifier * @param {Boolean} opts.includeExtendedInfo (optional: query extended status) * @param {Boolean} opts.walletCheck (optional: run v8 walletCheck if wallet found) * @returns {Callback} cb - Returns error or an object with status information */ API.prototype.getStatusByIdentifier = function(opts, cb) { $.checkState(this.credentials); var self = this; opts = opts || {}; var qs = []; qs.push('includeExtendedInfo=' + (opts.includeExtendedInfo ? '1' : '0')); qs.push('walletCheck=' + (opts.walletCheck ? '1' : '0')); self.request.get('/v1/wallets/' + opts.identifier + '?' + qs.join('&'), function(err, result) { if (err || !result || !result.wallet) return cb(err); if (result.wallet.status == 'pending') { var c = self.credentials; result.wallet.secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network); } self._processStatus(result); return cb(err, result); }); }; /* * * Compatibility Functions * */ API.prototype._oldCopayDecrypt = function(username, password, blob) { var SEP1 = '@#$'; var SEP2 = '%^#@'; var decrypted; try { var passphrase = username + SEP1 + password; decrypted = sjcl.decrypt(passphrase, blob); } catch (e) { passphrase = username + SEP2 + password; try { decrypted = sjcl.decrypt(passphrase, blob); } catch (e) { log.debug(e); }; } if (!decrypted) return null; var ret; try { ret = JSON.parse(decrypted); } catch (e) {}; return ret; }; API.prototype.getWalletIdsFromOldCopay = function(username, password, blob) { var p = this._oldCopayDecrypt(username, password, blob); if (!p) return null; var ids = p.walletIds.concat(_.keys(p.focusedTimestamps)); return _.uniq(ids); }; API.PayPro = PayPro; module.exports = API;
1
15,125
what is this, is it going to be used later?
bitpay-bitcore
js
@@ -66,8 +66,7 @@ module Beaker name = host.name Dir.stub( :chdir ).and_yield() - out = double( 'stdout' ) - out.stub( :read ).and_return("Host #{host.name} + vagrant.should_receive(:`).and_return("Host #{host.name} HostName 127.0.0.1 User vagrant Port 2222
1
require 'spec_helper' module Beaker describe Vagrant do let( :vagrant ) { Beaker::Vagrant.new( @hosts, make_opts ) } before :each do @hosts = make_hosts() end it "stores the vagrant file in $WORKINGDIR/.vagrant/beaker_vagrant_files/sample.cfg" do FakeFS.activate! vagrant.stub( :randmac ).and_return( "0123456789" ) path = vagrant.instance_variable_get( :@vagrant_path ) expect( path ).to be === File.join(Dir.pwd, '.vagrant', 'beaker_vagrant_files', 'sample.cfg') end it "can make a Vagranfile for a set of hosts" do FakeFS.activate! path = vagrant.instance_variable_get( :@vagrant_path ) vagrant.stub( :randmac ).and_return( "0123456789" ) vagrant.make_vfile( @hosts ) expect( File.read( File.expand_path( File.join( path, "Vagrantfile") ) ) ).to be === "Vagrant.configure(\"2\") do |c|\n c.vm.define 'vm1' do |v|\n v.vm.hostname = 'vm1'\n v.vm.box = 'vm1_of_my_box'\n v.vm.box_url = 'http://address.for.my.box.vm1'\n v.vm.base_mac = '0123456789'\n v.vm.network :private_network, ip: \"ip.address.for.vm1\", :netmask => \"255.255.0.0\"\n end\n c.vm.define 'vm2' do |v|\n v.vm.hostname = 'vm2'\n v.vm.box = 'vm2_of_my_box'\n v.vm.box_url = 'http://address.for.my.box.vm2'\n v.vm.base_mac = '0123456789'\n v.vm.network :private_network, ip: \"ip.address.for.vm2\", :netmask => \"255.255.0.0\"\n end\n c.vm.define 'vm3' do |v|\n v.vm.hostname = 'vm3'\n v.vm.box = 'vm3_of_my_box'\n v.vm.box_url = 'http://address.for.my.box.vm3'\n v.vm.base_mac = '0123456789'\n v.vm.network :private_network, ip: \"ip.address.for.vm3\", :netmask => \"255.255.0.0\"\n end\n c.vm.provider :virtualbox do |vb|\n vb.customize [\"modifyvm\", :id, \"--memory\", \"1024\"]\n end\nend\n" end it "can generate a new /etc/hosts file referencing each host" do @hosts.each do |host| vagrant.should_receive( :set_etc_hosts ).with( host, "127.0.0.1\tlocalhost localhost.localdomain\nip.address.for.vm1\tvm1\nip.address.for.vm2\tvm2\nip.address.for.vm3\tvm3\n" ).once end vagrant.hack_etc_hosts( @hosts ) end context "can copy vagrant's key to root .ssh on each host" do it "can copy to root on unix" do host = @hosts[0] host[:platform] = 'unix' Command.should_receive( :new ).with("sudo su -c \"cp -r .ssh /root/.\"").once vagrant.copy_ssh_to_root( host ) end it "can copy to Administrator on windows" do host = @hosts[0] host[:platform] = 'windows' Command.should_receive( :new ).with("sudo su -c \"cp -r .ssh /home/Administrator/.\"").once vagrant.copy_ssh_to_root( host ) end end it "can generate a ssh-config file" do host = @hosts[0] name = host.name Dir.stub( :chdir ).and_yield() out = double( 'stdout' ) out.stub( :read ).and_return("Host #{host.name} HostName 127.0.0.1 User vagrant Port 2222 UserKnownHostsFile /dev/null StrictHostKeyChecking no PasswordAuthentication no IdentityFile /home/root/.vagrant.d/insecure_private_key IdentitiesOnly yes") wait_thr = OpenStruct.new state = mock( 'state' ) state.stub( :success? ).and_return( true ) wait_thr.value = state Open3.stub( :popen3 ).with( 'vagrant', 'ssh-config', host.name ).and_return( [ "", out, "", wait_thr ]) file = double( 'file' ) file.stub( :path ).and_return( '/path/sshconfig' ) file.stub( :rewind ).and_return( true ) Tempfile.should_receive( :new ).with( "#{host.name}").and_return( file ) file.should_receive( :write ).with("Host ip.address.for.#{name}\n HostName 127.0.0.1\n User root\n Port 2222\n UserKnownHostsFile /dev/null\n StrictHostKeyChecking no\n PasswordAuthentication no\n IdentityFile /home/root/.vagrant.d/insecure_private_key\n IdentitiesOnly yes") vagrant.set_ssh_config( host, 'root' ) expect( host['ssh'] ).to be === { :config => file.path } expect( host['user']).to be === 'root' end describe "get_ip_from_vagrant_file" do before :each do FakeFS.activate! vagrant.stub( :randmac ).and_return( "0123456789" ) vagrant.make_vfile( @hosts ) end it "can find the correct ip for the provided hostname" do @hosts.each do |host| expect( vagrant.get_ip_from_vagrant_file(host.name) ).to be === host[:ip] end end it "raises an error if it is unable to find an ip" do expect{ vagrant.get_ip_from_vagrant_file("unknown") }.to raise_error end it "raises an error if no Vagrantfile is present" do File.delete( vagrant.instance_variable_get( :@vagrant_file ) ) @hosts.each do |host| expect{ vagrant.get_ip_from_vagrant_file(host.name) }.to raise_error end end end describe "provisioning and cleanup" do before :each do FakeFS.activate! vagrant.should_receive( :vagrant_cmd ).with( "up" ).once @hosts.each do |host| host_prev_name = host['user'] vagrant.should_receive( :set_ssh_config ).with( host, 'vagrant' ).once vagrant.should_receive( :copy_ssh_to_root ).with( host ).once vagrant.should_receive( :set_ssh_config ).with( host, host_prev_name ).once end vagrant.should_receive( :hack_etc_hosts ).with( @hosts ).once end it "can provision a set of hosts" do vagrant.should_receive( :make_vfile ).with( @hosts ).once vagrant.should_receive( :vagrant_cmd ).with( "destroy --force" ).never vagrant.provision end it "destroys an existing set of hosts before provisioning" do vagrant.make_vfile(@hosts) vagrant.should_receive(:vagrant_cmd).with("destroy --force").once vagrant.provision end it "can cleanup" do vagrant.should_receive( :vagrant_cmd ).with( "destroy --force" ).once FileUtils.should_receive( :rm_rf ).once vagrant.provision vagrant.cleanup end end end end
1
5,122
@anodelman test failure is probably due to this, needs to be fixed to `("Host #{host.name}")`
voxpupuli-beaker
rb
@@ -75,10 +75,10 @@ -<div class="cart-comments-container"> - <div id="cart-comments"> - <h3>Comments on this purchase request</h3> - <%- if @show_comments %> +<%- if @include_comments_files %> + <div class="cart-comments-container proposal-submodel-container"> + <div id="cart-comments"> + <h3>Comments on this purchase request</h3> <%= form_for [cart, Comment.new] do |f| %> <%= f.text_area :comment_text, rows: 5 %>
1
<div class="inset"> <div class="row"> <div class="col-md-12 col-xs-12"> <h1 class="communicart_header"> <%= cart.proposal.name %> </h1> <div class="communicart_description"> <p> Purchase Request: <strong>#<%= cart.id %></strong> </p> <p> Requested by: <strong><%= cart.requester.full_name %></strong> </p> <%= client_partial(proposal.client, 'external_id', locals: {cart: cart}) %> </div> </div> <%= render partial: "carts/approval_status" %> </div> <div class="row"> <%= client_partial(proposal.client, 'proposal_properties', locals: {proposal: proposal }) %> </div> </div> <% if policy(proposal).can_edit? %> <%= client_partial(proposal.client, 'restart_link', locals: {proposal: proposal, cart: cart}) %> <% end %> <%- if cart.flow == 'parallel' %> <%- if cart.approvals.approved.any? %> <div class="approval-status-container"> <div id="approval-status"> <h3>Request approved by</h3> <ul> <%- cart.approvals.approved.each do |approval| %> <li class='icon-approved'> <%= approval.user_email_address %> <span class='timestamp'>on <%= l approval.updated_at %></span> </li> <%- end %> </ul> </div> </div> <%- end %> <%- if cart.approvals.pending.any? %> <div class="approval-status-container"> <div id="approval-status"> <h3>Waiting for approval from</h3> <ul class="left"> <%- cart.approvals.pending.each do |approval| %> <li class='icon-pending'> <%= approval.user_email_address %> </li> <%- end %> </ul> <ul class="right"> <%- cart.approvals.approved.each do |approval| %> <li class='icon-approved'> <%= approval.user_email_address %> </li> <%- end %> </ul> </div> </div> <%- end %> <%- end %> <div class="cart-comments-container"> <div id="cart-comments"> <h3>Comments on this purchase request</h3> <%- if @show_comments %> <%= form_for [cart, Comment.new] do |f| %> <%= f.text_area :comment_text, rows: 5 %> <div class='row text-area-info-container'> <div class='col-xs-7 col-sm-6 text-area-info-web'> <p> These comments will be sent to your requester through email </p> </div> <p class='col-xs-5 col-sm-6 text-area-button'> <%= submit_tag "Send a Comment", id: :add_a_comment %> </p> </div> <%- end %> <% if cart.comments.any? %> <% cart.comments.each do |c| %> <div class='comment-item'> <div class='row'> <% unless c.user.nil? %> <p class='comment-sender col-sm-6 col-xs-12'> <strong><%= c.user_full_name %></strong> </p> <% end %> <p class='comment-date col-sm-6 col-xs-12'> <%= date_with_tooltip(c.created_at) %> </p> </div> <div class='row'> <p class='comment-text col-sm-6 col-xs-12'> <%= c.comment_text %> </p> </div> </div> <% end %> <% else %> <p class='empty-list-label'> No comments have been added yet </p> <% end %> <%- end %> </div> </div> <% if policy(cart.proposal).can_approve_or_reject? %> <%= render partial: 'approval_actions', locals: { current_user: @current_user, cart: cart} %> <% end %>
1
12,887
I'm not sure that we need this anymore, but that can be a separate discussion.
18F-C2
rb
@@ -34,10 +34,14 @@ public interface HttpClient { * @throws IOException if an I/O error occurs. */ HttpResponse execute(HttpRequest request, boolean followRedirects) throws IOException; - + /** - * Creates HttpClient instances. - */ + * Closes the connections associated with this client. + * + * @throws IOException if an I/O error occurs. + */ + void close() throws IOException; + interface Factory { /**
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote.http; import java.io.IOException; import java.net.URL; /** * Defines a simple client for making HTTP requests. */ public interface HttpClient { /** * Executes the given request. * * @param request the request to execute. * @param followRedirects whether to automatically follow redirects. * @return the final response. * @throws IOException if an I/O error occurs. */ HttpResponse execute(HttpRequest request, boolean followRedirects) throws IOException; /** * Creates HttpClient instances. */ interface Factory { /** * Creates a HTTP client that will send requests to the given URL. * * @param url URL * @return HttpClient */ HttpClient createClient(URL url); } }
1
12,899
The formatting seems different from the rest of the code
SeleniumHQ-selenium
js
@@ -709,6 +709,9 @@ func (cg *ConfigGenerator) generateProbeConfig( if m.Spec.ProberSpec.Scheme != "" { cfg = append(cfg, yaml.MapItem{Key: "scheme", Value: m.Spec.ProberSpec.Scheme}) } + if m.Spec.ProberSpec.ProxyURL != "" { + cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: m.Spec.ProberSpec.ProxyURL}) + } if m.Spec.Module != "" { cfg = append(cfg, yaml.MapItem{Key: "params", Value: yaml.MapSlice{
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "path" "regexp" "sort" "strings" "github.com/blang/semver/v4" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" yaml "gopkg.in/yaml.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" "github.com/prometheus-operator/prometheus-operator/pkg/operator" ) const ( kubernetesSDRoleEndpoint = "endpoints" kubernetesSDRolePod = "pod" kubernetesSDRoleIngress = "ingress" ) var ( invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) ) // ConfigGenerator is used to create Prometheus configurations from operator resources. type ConfigGenerator struct { logger log.Logger } // NewConfigGenerator creates a ConfigGenerator instance using the provided Logger. func NewConfigGenerator(logger log.Logger) *ConfigGenerator { cg := &ConfigGenerator{ logger: logger, } return cg } func sanitizeLabelName(name string) string { return invalidLabelCharRE.ReplaceAllString(name, "_") } func stringMapToMapSlice(m map[string]string) yaml.MapSlice { res := yaml.MapSlice{} ks := make([]string, 0) for k := range m { ks = append(ks, k) } sort.Strings(ks) for _, k := range ks { res = append(res, yaml.MapItem{Key: k, Value: m[k]}) } return res } func addSafeTLStoYaml(cfg yaml.MapSlice, namespace string, tls v1.SafeTLSConfig) yaml.MapSlice { pathForSelector := func(sel v1.SecretOrConfigMap) string { return path.Join(tlsAssetsDir, assets.TLSAssetKeyFromSelector(namespace, sel).String()) } tlsConfig := yaml.MapSlice{ {Key: "insecure_skip_verify", Value: tls.InsecureSkipVerify}, } if tls.CA.Secret != nil || tls.CA.ConfigMap != nil { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "ca_file", Value: pathForSelector(tls.CA)}) } if tls.Cert.Secret != nil || tls.Cert.ConfigMap != nil { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "cert_file", Value: pathForSelector(tls.Cert)}) } if tls.KeySecret != nil { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "key_file", Value: pathForSelector(v1.SecretOrConfigMap{Secret: tls.KeySecret})}) } if tls.ServerName != "" { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "server_name", Value: tls.ServerName}) } cfg = append(cfg, yaml.MapItem{Key: "tls_config", Value: tlsConfig}) return cfg } func addTLStoYaml(cfg yaml.MapSlice, namespace string, tls *v1.TLSConfig) yaml.MapSlice { if tls != nil { tlsConfig := addSafeTLStoYaml(yaml.MapSlice{}, namespace, tls.SafeTLSConfig)[0].Value.(yaml.MapSlice) if tls.CAFile != "" { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "ca_file", Value: tls.CAFile}) } if tls.CertFile != "" { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "cert_file", Value: tls.CertFile}) } if tls.KeyFile != "" { tlsConfig = append(tlsConfig, yaml.MapItem{Key: "key_file", Value: tls.KeyFile}) } cfg = append(cfg, yaml.MapItem{Key: "tls_config", Value: tlsConfig}) } return cfg } func buildExternalLabels(p *v1.Prometheus) yaml.MapSlice { m := map[string]string{} // Use "prometheus" external label name by default if field is missing. // Do not add external label if field is set to empty string. prometheusExternalLabelName := "prometheus" if p.Spec.PrometheusExternalLabelName != nil { if *p.Spec.PrometheusExternalLabelName != "" { prometheusExternalLabelName = *p.Spec.PrometheusExternalLabelName } else { prometheusExternalLabelName = "" } } // Use defaultReplicaExternalLabelName constant by default if field is missing. // Do not add external label if field is set to empty string. replicaExternalLabelName := defaultReplicaExternalLabelName if p.Spec.ReplicaExternalLabelName != nil { if *p.Spec.ReplicaExternalLabelName != "" { replicaExternalLabelName = *p.Spec.ReplicaExternalLabelName } else { replicaExternalLabelName = "" } } if prometheusExternalLabelName != "" { m[prometheusExternalLabelName] = fmt.Sprintf("%s/%s", p.Namespace, p.Name) } if replicaExternalLabelName != "" { m[replicaExternalLabelName] = "$(POD_NAME)" } for n, v := range p.Spec.ExternalLabels { m[n] = v } return stringMapToMapSlice(m) } // GenerateConfig creates a serialized YAML representation of a Prometheus configuration using the provided resources. func (cg *ConfigGenerator) GenerateConfig( p *v1.Prometheus, sMons map[string]*v1.ServiceMonitor, pMons map[string]*v1.PodMonitor, probes map[string]*v1.Probe, basicAuthSecrets map[string]assets.BasicAuthCredentials, bearerTokens map[string]assets.BearerToken, additionalScrapeConfigs []byte, additionalAlertRelabelConfigs []byte, additionalAlertManagerConfigs []byte, ruleConfigMapNames []string, ) ([]byte, error) { versionStr := p.Spec.Version if versionStr == "" { versionStr = operator.DefaultPrometheusVersion } version, err := semver.ParseTolerant(versionStr) if err != nil { return nil, errors.Wrap(err, "parse version") } cfg := yaml.MapSlice{} scrapeInterval := "30s" if p.Spec.ScrapeInterval != "" { scrapeInterval = p.Spec.ScrapeInterval } evaluationInterval := "30s" if p.Spec.EvaluationInterval != "" { evaluationInterval = p.Spec.EvaluationInterval } globalItems := yaml.MapSlice{ {Key: "evaluation_interval", Value: evaluationInterval}, {Key: "scrape_interval", Value: scrapeInterval}, {Key: "external_labels", Value: buildExternalLabels(p)}, } if p.Spec.ScrapeTimeout != "" { globalItems = append(globalItems, yaml.MapItem{ Key: "scrape_timeout", Value: p.Spec.ScrapeTimeout, }) } if version.GTE(semver.MustParse("2.16.0")) && p.Spec.QueryLogFile != "" { globalItems = append(globalItems, yaml.MapItem{ Key: "query_log_file", Value: p.Spec.QueryLogFile, }) } cfg = append(cfg, yaml.MapItem{Key: "global", Value: globalItems}) ruleFilePaths := []string{} for _, name := range ruleConfigMapNames { ruleFilePaths = append(ruleFilePaths, rulesDir+"/"+name+"/*.yaml") } cfg = append(cfg, yaml.MapItem{ Key: "rule_files", Value: ruleFilePaths, }) sMonIdentifiers := make([]string, len(sMons)) i := 0 for k := range sMons { sMonIdentifiers[i] = k i++ } // Sorting ensures, that we always generate the config in the same order. sort.Strings(sMonIdentifiers) pMonIdentifiers := make([]string, len(pMons)) i = 0 for k := range pMons { pMonIdentifiers[i] = k i++ } // Sorting ensures, that we always generate the config in the same order. sort.Strings(pMonIdentifiers) probeIdentifiers := make([]string, len(probes)) i = 0 for k := range probes { probeIdentifiers[i] = k i++ } // Sorting ensures, that we always generate the config in the same order. sort.Strings(probeIdentifiers) apiserverConfig := p.Spec.APIServerConfig shards := int32(1) if p.Spec.Shards != nil && *p.Spec.Shards > 1 { shards = *p.Spec.Shards } var scrapeConfigs []yaml.MapSlice for _, identifier := range sMonIdentifiers { for i, ep := range sMons[identifier].Spec.Endpoints { scrapeConfigs = append(scrapeConfigs, cg.generateServiceMonitorConfig( version, sMons[identifier], ep, i, apiserverConfig, basicAuthSecrets, bearerTokens, p.Spec.OverrideHonorLabels, p.Spec.OverrideHonorTimestamps, p.Spec.IgnoreNamespaceSelectors, p.Spec.EnforcedNamespaceLabel, p.Spec.EnforcedSampleLimit, p.Spec.EnforcedTargetLimit, shards, ), ) } } for _, identifier := range pMonIdentifiers { for i, ep := range pMons[identifier].Spec.PodMetricsEndpoints { scrapeConfigs = append(scrapeConfigs, cg.generatePodMonitorConfig( version, pMons[identifier], ep, i, apiserverConfig, basicAuthSecrets, bearerTokens, p.Spec.OverrideHonorLabels, p.Spec.OverrideHonorTimestamps, p.Spec.IgnoreNamespaceSelectors, p.Spec.EnforcedNamespaceLabel, p.Spec.EnforcedSampleLimit, p.Spec.EnforcedTargetLimit, shards, ), ) } } for _, identifier := range probeIdentifiers { scrapeConfigs = append(scrapeConfigs, cg.generateProbeConfig( version, probes[identifier], apiserverConfig, basicAuthSecrets, bearerTokens, p.Spec.OverrideHonorLabels, p.Spec.OverrideHonorTimestamps, p.Spec.IgnoreNamespaceSelectors, p.Spec.EnforcedNamespaceLabel, ), ) } var alertmanagerConfigs []yaml.MapSlice if p.Spec.Alerting != nil { for _, am := range p.Spec.Alerting.Alertmanagers { alertmanagerConfigs = append(alertmanagerConfigs, cg.generateAlertmanagerConfig(version, am, apiserverConfig, basicAuthSecrets)) } } var additionalScrapeConfigsYaml []yaml.MapSlice err = yaml.Unmarshal([]byte(additionalScrapeConfigs), &additionalScrapeConfigsYaml) if err != nil { return nil, errors.Wrap(err, "unmarshalling additional scrape configs failed") } cfg = append(cfg, yaml.MapItem{ Key: "scrape_configs", Value: append(scrapeConfigs, additionalScrapeConfigsYaml...), }) var additionalAlertManagerConfigsYaml []yaml.MapSlice err = yaml.Unmarshal([]byte(additionalAlertManagerConfigs), &additionalAlertManagerConfigsYaml) if err != nil { return nil, errors.Wrap(err, "unmarshalling additional alert manager configs failed") } alertmanagerConfigs = append(alertmanagerConfigs, additionalAlertManagerConfigsYaml...) var alertRelabelConfigs []yaml.MapSlice // Use defaultReplicaExternalLabelName constant by default if field is missing. // Do not add external label if field is set to empty string. replicaExternalLabelName := defaultReplicaExternalLabelName if p.Spec.ReplicaExternalLabelName != nil { if *p.Spec.ReplicaExternalLabelName != "" { replicaExternalLabelName = *p.Spec.ReplicaExternalLabelName } else { replicaExternalLabelName = "" } } // action 'labeldrop' is not supported <= v1.4.1 if replicaExternalLabelName != "" && version.GT(semver.MustParse("1.4.1")) { // Drop replica label, to make alerts from multiple Prometheus replicas alike alertRelabelConfigs = append(alertRelabelConfigs, yaml.MapSlice{ {Key: "action", Value: "labeldrop"}, {Key: "regex", Value: regexp.QuoteMeta(replicaExternalLabelName)}, }) } var additionalAlertRelabelConfigsYaml []yaml.MapSlice err = yaml.Unmarshal([]byte(additionalAlertRelabelConfigs), &additionalAlertRelabelConfigsYaml) if err != nil { return nil, errors.Wrap(err, "unmarshalling additional alerting relabel configs failed") } cfg = append(cfg, yaml.MapItem{ Key: "alerting", Value: yaml.MapSlice{ { Key: "alert_relabel_configs", Value: append(alertRelabelConfigs, additionalAlertRelabelConfigsYaml...), }, { Key: "alertmanagers", Value: alertmanagerConfigs, }, }, }) if len(p.Spec.RemoteWrite) > 0 { cfg = append(cfg, cg.generateRemoteWriteConfig(version, p, basicAuthSecrets)) } if len(p.Spec.RemoteRead) > 0 { cfg = append(cfg, cg.generateRemoteReadConfig(version, p, basicAuthSecrets)) } return yaml.Marshal(cfg) } // honorLabels determines the value of honor_labels. // if overrideHonorLabels is true and user tries to set the // value to true, we want to set honor_labels to false. func honorLabels(userHonorLabels, overrideHonorLabels bool) bool { if userHonorLabels && overrideHonorLabels { return false } return userHonorLabels } // honorTimestamps adds option to enforce honor_timestamps option in scrape_config. // We want to disable honoring timestamps when user specified it or when global // override is set. For backwards compatibility with prometheus <2.9.0 we don't // set honor_timestamps when that option wasn't specified anywhere func honorTimestamps(cfg yaml.MapSlice, userHonorTimestamps *bool, overrideHonorTimestamps bool) yaml.MapSlice { // Ensuring backwards compatibility by checking if user set any option if userHonorTimestamps == nil && !overrideHonorTimestamps { return cfg } honor := false if userHonorTimestamps != nil { honor = *userHonorTimestamps } return append(cfg, yaml.MapItem{Key: "honor_timestamps", Value: honor && !overrideHonorTimestamps}) } func initRelabelings() []yaml.MapSlice { // Relabel prometheus job name into a meta label return []yaml.MapSlice{ { {Key: "source_labels", Value: []string{"job"}}, {Key: "target_label", Value: "__tmp_prometheus_job_name"}, }, } } func (cg *ConfigGenerator) generatePodMonitorConfig( version semver.Version, m *v1.PodMonitor, ep v1.PodMetricsEndpoint, i int, apiserverConfig *v1.APIServerConfig, basicAuthSecrets map[string]assets.BasicAuthCredentials, bearerTokens map[string]assets.BearerToken, ignoreHonorLabels bool, overrideHonorTimestamps bool, ignoreNamespaceSelectors bool, enforcedNamespaceLabel string, enforcedSampleLimit *uint64, enforcedTargetLimit *uint64, shards int32, ) yaml.MapSlice { hl := honorLabels(ep.HonorLabels, ignoreHonorLabels) cfg := yaml.MapSlice{ { Key: "job_name", Value: fmt.Sprintf("podMonitor/%s/%s/%d", m.Namespace, m.Name, i), }, { Key: "honor_labels", Value: hl, }, } if version.Major == 2 && version.Minor >= 9 { cfg = honorTimestamps(cfg, ep.HonorTimestamps, overrideHonorTimestamps) } selectedNamespaces := getNamespacesFromNamespaceSelector(&m.Spec.NamespaceSelector, m.Namespace, ignoreNamespaceSelectors) cfg = append(cfg, cg.generateK8SSDConfig(selectedNamespaces, apiserverConfig, basicAuthSecrets, kubernetesSDRolePod)) if ep.Interval != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_interval", Value: ep.Interval}) } if ep.ScrapeTimeout != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout}) } if ep.Path != "" { cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path}) } if ep.ProxyURL != nil { cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: ep.ProxyURL}) } if ep.Params != nil { cfg = append(cfg, yaml.MapItem{Key: "params", Value: ep.Params}) } if ep.Scheme != "" { cfg = append(cfg, yaml.MapItem{Key: "scheme", Value: ep.Scheme}) } if ep.TLSConfig != nil { cfg = addSafeTLStoYaml(cfg, m.Namespace, ep.TLSConfig.SafeTLSConfig) } if ep.BearerTokenSecret.Name != "" { if s, ok := bearerTokens[fmt.Sprintf("podMonitor/%s/%s/%d", m.Namespace, m.Name, i)]; ok { cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: s}) } } if ep.BasicAuth != nil { if s, ok := basicAuthSecrets[fmt.Sprintf("podMonitor/%s/%s/%d", m.Namespace, m.Name, i)]; ok { cfg = append(cfg, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } relabelings := initRelabelings() var labelKeys []string // Filter targets by pods selected by the monitor. // Exact label matches. for k := range m.Spec.Selector.MatchLabels { labelKeys = append(labelKeys, k) } sort.Strings(labelKeys) for _, k := range labelKeys { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(k)}}, {Key: "regex", Value: m.Spec.Selector.MatchLabels[k]}, }) } // Set based label matching. We have to map the valid relations // `In`, `NotIn`, `Exists`, and `DoesNotExist`, into relabeling rules. for _, exp := range m.Spec.Selector.MatchExpressions { switch exp.Operator { case metav1.LabelSelectorOpIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpNotIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpExists: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) case metav1.LabelSelectorOpDoesNotExist: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) } } // Filter targets based on correct port for the endpoint. if ep.Port != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_name"}}, {Key: "regex", Value: ep.Port}, }) } else if ep.TargetPort != nil { //nolint:staticcheck // Ignore SA1019 this field is marked as deprecated. level.Warn(cg.logger).Log("msg", "PodMonitor 'targetPort' is deprecated, use 'port' instead.", "podMonitor", m.Name) //nolint:staticcheck // Ignore SA1019 this field is marked as deprecated. if ep.TargetPort.StrVal != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_name"}}, {Key: "regex", Value: ep.TargetPort.String()}, }) } else if ep.TargetPort.IntVal != 0 { //nolint:staticcheck // Ignore SA1019 this field is marked as deprecated. relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_number"}}, {Key: "regex", Value: ep.TargetPort.String()}, }) } } // Relabel namespace and pod and service labels into proper labels. relabelings = append(relabelings, []yaml.MapSlice{ { {Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}}, {Key: "target_label", Value: "namespace"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_name"}}, {Key: "target_label", Value: "container"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_name"}}, {Key: "target_label", Value: "pod"}, }, }...) // Relabel targetLabels from Pod onto target. for _, l := range m.Spec.PodTargetLabels { relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(l)}}, {Key: "target_label", Value: sanitizeLabelName(l)}, {Key: "regex", Value: "(.+)"}, {Key: "replacement", Value: "${1}"}, }) } // By default, generate a safe job name from the PodMonitor. We also keep // this around if a jobLabel is set in case the targets don't actually have a // value for it. A single pod may potentially have multiple metrics // endpoints, therefore the endpoints labels is filled with the ports name or // as a fallback the port number. relabelings = append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: "job"}, {Key: "replacement", Value: fmt.Sprintf("%s/%s", m.GetNamespace(), m.GetName())}, }) if m.Spec.JobLabel != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(m.Spec.JobLabel)}}, {Key: "target_label", Value: "job"}, {Key: "regex", Value: "(.+)"}, {Key: "replacement", Value: "${1}"}, }) } if ep.Port != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: "endpoint"}, {Key: "replacement", Value: ep.Port}, }) } else if ep.TargetPort != nil && ep.TargetPort.String() != "" { //nolint:staticcheck // Ignore SA1019 this field is marked as deprecated. relabelings = append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: "endpoint"}, {Key: "replacement", Value: ep.TargetPort.String()}, //nolint:staticcheck // Ignore SA1019 this field is marked as deprecated. }) } if ep.RelabelConfigs != nil { for _, c := range ep.RelabelConfigs { relabelings = append(relabelings, generateRelabelConfig(c)) } } // Because of security risks, whenever enforcedNamespaceLabel is set, we want to append it to the // relabel_configs as the last relabeling, to ensure it overrides any other relabelings. relabelings = enforceNamespaceLabel(relabelings, m.Namespace, enforcedNamespaceLabel) relabelings = generateAddressShardingRelabelingRules(relabelings, shards) cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings}) if m.Spec.SampleLimit > 0 || enforcedSampleLimit != nil { cfg = append(cfg, yaml.MapItem{Key: "sample_limit", Value: getLimit(m.Spec.SampleLimit, enforcedSampleLimit)}) } if version.Major == 2 && version.Minor >= 21 && (m.Spec.TargetLimit > 0 || enforcedTargetLimit != nil) { cfg = append(cfg, yaml.MapItem{Key: "target_limit", Value: getLimit(m.Spec.TargetLimit, enforcedTargetLimit)}) } if ep.MetricRelabelConfigs != nil { var metricRelabelings []yaml.MapSlice for _, c := range ep.MetricRelabelConfigs { if c.TargetLabel != "" && enforcedNamespaceLabel != "" && c.TargetLabel == enforcedNamespaceLabel { continue } relabeling := generateRelabelConfig(c) metricRelabelings = append(metricRelabelings, relabeling) } cfg = append(cfg, yaml.MapItem{Key: "metric_relabel_configs", Value: metricRelabelings}) } return cfg } func (cg *ConfigGenerator) generateProbeConfig( version semver.Version, m *v1.Probe, apiserverConfig *v1.APIServerConfig, basicAuthSecrets map[string]assets.BasicAuthCredentials, bearerTokens map[string]assets.BearerToken, ignoreHonorLabels bool, overrideHonorTimestamps bool, ignoreNamespaceSelectors bool, enforcedNamespaceLabel string) yaml.MapSlice { jobName := fmt.Sprintf("probe/%s/%s", m.Namespace, m.Name) cfg := yaml.MapSlice{ { Key: "job_name", Value: jobName, }, } hTs := true cfg = honorTimestamps(cfg, &hTs, overrideHonorTimestamps) path := "/probe" if m.Spec.ProberSpec.Path != "" { path = m.Spec.ProberSpec.Path } cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: path}) if m.Spec.Interval != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_interval", Value: m.Spec.Interval}) } if m.Spec.ScrapeTimeout != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: m.Spec.ScrapeTimeout}) } if m.Spec.ProberSpec.Scheme != "" { cfg = append(cfg, yaml.MapItem{Key: "scheme", Value: m.Spec.ProberSpec.Scheme}) } if m.Spec.Module != "" { cfg = append(cfg, yaml.MapItem{Key: "params", Value: yaml.MapSlice{ {Key: "module", Value: []string{m.Spec.Module}}, }}) } relabelings := initRelabelings() if m.Spec.JobName != "" { relabelings = append(relabelings, []yaml.MapSlice{ { {Key: "target_label", Value: "job"}, {Key: "replacement", Value: m.Spec.JobName}, }, }...) } // Generate static_config section. if m.Spec.Targets.StaticConfig != nil { staticConfig := yaml.MapSlice{ {Key: "targets", Value: m.Spec.Targets.StaticConfig.Targets}, } if m.Spec.Targets.StaticConfig.Labels != nil { if _, ok := m.Spec.Targets.StaticConfig.Labels["namespace"]; !ok { m.Spec.Targets.StaticConfig.Labels["namespace"] = m.Namespace } } else { m.Spec.Targets.StaticConfig.Labels = map[string]string{"namespace": m.Namespace} } staticConfig = append(staticConfig, yaml.MapSlice{ {Key: "labels", Value: m.Spec.Targets.StaticConfig.Labels}, }...) cfg = append(cfg, yaml.MapItem{ Key: "static_configs", Value: []yaml.MapSlice{staticConfig}, }) // Relabelings for prober. relabelings = append(relabelings, []yaml.MapSlice{ { {Key: "source_labels", Value: []string{"__address__"}}, {Key: "target_label", Value: "__param_target"}, }, { {Key: "source_labels", Value: []string{"__param_target"}}, {Key: "target_label", Value: "instance"}, }, { {Key: "target_label", Value: "__address__"}, {Key: "replacement", Value: m.Spec.ProberSpec.URL}, }, }...) // Add configured relabelings. if m.Spec.Targets.StaticConfig.RelabelConfigs != nil { for _, r := range m.Spec.Targets.StaticConfig.RelabelConfigs { relabelings = append(relabelings, generateRelabelConfig(r)) } } cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: enforceNamespaceLabel(relabelings, m.Namespace, enforcedNamespaceLabel)}) } // Generate kubernetes_sd_config section for ingress resources. if m.Spec.Targets.StaticConfig == nil { labelKeys := make([]string, 0, len(m.Spec.Targets.Ingress.Selector.MatchLabels)) // Filter targets by ingresses selected by the monitor. // Exact label matches. for k := range m.Spec.Targets.Ingress.Selector.MatchLabels { labelKeys = append(labelKeys, k) } sort.Strings(labelKeys) for _, k := range labelKeys { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_label_" + sanitizeLabelName(k)}}, {Key: "regex", Value: m.Spec.Targets.Ingress.Selector.MatchLabels[k]}, }) } // Set based label matching. We have to map the valid relations // `In`, `NotIn`, `Exists`, and `DoesNotExist`, into relabeling rules. for _, exp := range m.Spec.Targets.Ingress.Selector.MatchExpressions { switch exp.Operator { case metav1.LabelSelectorOpIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpNotIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpExists: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) case metav1.LabelSelectorOpDoesNotExist: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) } } selectedNamespaces := getNamespacesFromNamespaceSelector(&m.Spec.Targets.Ingress.NamespaceSelector, m.Namespace, ignoreNamespaceSelectors) cfg = append(cfg, cg.generateK8SSDConfig(selectedNamespaces, apiserverConfig, basicAuthSecrets, kubernetesSDRoleIngress)) // Relabelings for ingress SD. relabelings = append(relabelings, []yaml.MapSlice{ { {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_scheme", "__address__", "__meta_kubernetes_ingress_path"}}, {Key: "separator", Value: ";"}, {Key: "regex", Value: "(.+);(.+);(.+)"}, {Key: "target_label", Value: "__param_target"}, {Key: "replacement", Value: "${1}://${2}${3}"}, {Key: "action", Value: "replace"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}}, {Key: "target_label", Value: "namespace"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_ingress_name"}}, {Key: "target_label", Value: "ingress"}, }, }...) // Relabelings for prober. relabelings = append(relabelings, []yaml.MapSlice{ { {Key: "source_labels", Value: []string{"__param_target"}}, {Key: "target_label", Value: "instance"}, }, { {Key: "target_label", Value: "__address__"}, {Key: "replacement", Value: m.Spec.ProberSpec.URL}, }, }...) // Add configured relabelings. if m.Spec.Targets.Ingress.RelabelConfigs != nil { for _, r := range m.Spec.Targets.Ingress.RelabelConfigs { relabelings = append(relabelings, generateRelabelConfig(r)) } } relabelings = enforceNamespaceLabel(relabelings, m.Namespace, enforcedNamespaceLabel) cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings}) } if m.Spec.TLSConfig != nil { cfg = addSafeTLStoYaml(cfg, m.Namespace, m.Spec.TLSConfig.SafeTLSConfig) } if m.Spec.BearerTokenSecret.Name != "" { pnKey := fmt.Sprintf("probe/%s/%s", m.GetNamespace(), m.GetName()) if s, ok := bearerTokens[pnKey]; ok { cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: s}) } } if m.Spec.BasicAuth != nil { if s, ok := basicAuthSecrets[fmt.Sprintf("probe/%s/%s", m.Namespace, m.Name)]; ok { cfg = append(cfg, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } return cfg } func (cg *ConfigGenerator) generateServiceMonitorConfig( version semver.Version, m *v1.ServiceMonitor, ep v1.Endpoint, i int, apiserverConfig *v1.APIServerConfig, basicAuthSecrets map[string]assets.BasicAuthCredentials, bearerTokens map[string]assets.BearerToken, overrideHonorLabels bool, overrideHonorTimestamps bool, ignoreNamespaceSelectors bool, enforcedNamespaceLabel string, enforcedSampleLimit *uint64, enforcedTargetLimit *uint64, shards int32, ) yaml.MapSlice { hl := honorLabels(ep.HonorLabels, overrideHonorLabels) cfg := yaml.MapSlice{ { Key: "job_name", Value: fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i), }, { Key: "honor_labels", Value: hl, }, } if version.Major == 2 && version.Minor >= 9 { cfg = honorTimestamps(cfg, ep.HonorTimestamps, overrideHonorTimestamps) } selectedNamespaces := getNamespacesFromNamespaceSelector(&m.Spec.NamespaceSelector, m.Namespace, ignoreNamespaceSelectors) cfg = append(cfg, cg.generateK8SSDConfig(selectedNamespaces, apiserverConfig, basicAuthSecrets, kubernetesSDRoleEndpoint)) if ep.Interval != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_interval", Value: ep.Interval}) } if ep.ScrapeTimeout != "" { cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout}) } if ep.Path != "" { cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path}) } if ep.ProxyURL != nil { cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: ep.ProxyURL}) } if ep.Params != nil { cfg = append(cfg, yaml.MapItem{Key: "params", Value: ep.Params}) } if ep.Scheme != "" { cfg = append(cfg, yaml.MapItem{Key: "scheme", Value: ep.Scheme}) } cfg = addTLStoYaml(cfg, m.Namespace, ep.TLSConfig) if ep.BearerTokenFile != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: ep.BearerTokenFile}) } if ep.BearerTokenSecret.Name != "" { if s, ok := bearerTokens[fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i)]; ok { cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: s}) } } if ep.BasicAuth != nil { if s, ok := basicAuthSecrets[fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i)]; ok { cfg = append(cfg, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } relabelings := initRelabelings() // Filter targets by services selected by the monitor. // Exact label matches. var labelKeys []string for k := range m.Spec.Selector.MatchLabels { labelKeys = append(labelKeys, k) } sort.Strings(labelKeys) for _, k := range labelKeys { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(k)}}, {Key: "regex", Value: m.Spec.Selector.MatchLabels[k]}, }) } // Set based label matching. We have to map the valid relations // `In`, `NotIn`, `Exists`, and `DoesNotExist`, into relabeling rules. for _, exp := range m.Spec.Selector.MatchExpressions { switch exp.Operator { case metav1.LabelSelectorOpIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpNotIn: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: strings.Join(exp.Values, "|")}, }) case metav1.LabelSelectorOpExists: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) case metav1.LabelSelectorOpDoesNotExist: relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "drop"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_labelpresent_" + sanitizeLabelName(exp.Key)}}, {Key: "regex", Value: "true"}, }) } } // Filter targets based on correct port for the endpoint. if ep.Port != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_port_name"}}, {Key: "regex", Value: ep.Port}, }) } else if ep.TargetPort != nil { if ep.TargetPort.StrVal != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_name"}}, {Key: "regex", Value: ep.TargetPort.String()}, }) } else if ep.TargetPort.IntVal != 0 { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_number"}}, {Key: "regex", Value: ep.TargetPort.String()}, }) } } // Relabel namespace and pod and service labels into proper labels. relabelings = append(relabelings, []yaml.MapSlice{ { // Relabel node labels for pre v2.3 meta labels {Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_address_target_kind", "__meta_kubernetes_endpoint_address_target_name"}}, {Key: "separator", Value: ";"}, {Key: "regex", Value: "Node;(.*)"}, {Key: "replacement", Value: "${1}"}, {Key: "target_label", Value: "node"}, }, { // Relabel pod labels for >=v2.3 meta labels {Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_address_target_kind", "__meta_kubernetes_endpoint_address_target_name"}}, {Key: "separator", Value: ";"}, {Key: "regex", Value: "Pod;(.*)"}, {Key: "replacement", Value: "${1}"}, {Key: "target_label", Value: "pod"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}}, {Key: "target_label", Value: "namespace"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}}, {Key: "target_label", Value: "service"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_name"}}, {Key: "target_label", Value: "pod"}, }, { {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_name"}}, {Key: "target_label", Value: "container"}, }, }...) // Relabel targetLabels from Service onto target. for _, l := range m.Spec.TargetLabels { relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(l)}}, {Key: "target_label", Value: sanitizeLabelName(l)}, {Key: "regex", Value: "(.+)"}, {Key: "replacement", Value: "${1}"}, }) } for _, l := range m.Spec.PodTargetLabels { relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_label_" + sanitizeLabelName(l)}}, {Key: "target_label", Value: sanitizeLabelName(l)}, {Key: "regex", Value: "(.+)"}, {Key: "replacement", Value: "${1}"}, }) } // By default, generate a safe job name from the service name. We also keep // this around if a jobLabel is set in case the targets don't actually have a // value for it. relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}}, {Key: "target_label", Value: "job"}, {Key: "replacement", Value: "${1}"}, }) if m.Spec.JobLabel != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(m.Spec.JobLabel)}}, {Key: "target_label", Value: "job"}, {Key: "regex", Value: "(.+)"}, {Key: "replacement", Value: "${1}"}, }) } // A single service may potentially have multiple metrics // endpoints, therefore the endpoints labels is filled with the ports name or // as a fallback the port number. if ep.Port != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: "endpoint"}, {Key: "replacement", Value: ep.Port}, }) } else if ep.TargetPort != nil && ep.TargetPort.String() != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: "endpoint"}, {Key: "replacement", Value: ep.TargetPort.String()}, }) } if ep.RelabelConfigs != nil { for _, c := range ep.RelabelConfigs { relabelings = append(relabelings, generateRelabelConfig(c)) } } // Because of security risks, whenever enforcedNamespaceLabel is set, we want to append it to the // relabel_configs as the last relabeling, to ensure it overrides any other relabelings. relabelings = enforceNamespaceLabel(relabelings, m.Namespace, enforcedNamespaceLabel) relabelings = generateAddressShardingRelabelingRules(relabelings, shards) cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings}) if m.Spec.SampleLimit > 0 || enforcedSampleLimit != nil { cfg = append(cfg, yaml.MapItem{Key: "sample_limit", Value: getLimit(m.Spec.SampleLimit, enforcedSampleLimit)}) } if version.Major == 2 && version.Minor >= 21 && (m.Spec.TargetLimit > 0 || enforcedTargetLimit != nil) { cfg = append(cfg, yaml.MapItem{Key: "target_limit", Value: getLimit(m.Spec.TargetLimit, enforcedTargetLimit)}) } if ep.MetricRelabelConfigs != nil { var metricRelabelings []yaml.MapSlice for _, c := range ep.MetricRelabelConfigs { if c.TargetLabel != "" && enforcedNamespaceLabel != "" && c.TargetLabel == enforcedNamespaceLabel { continue } relabeling := generateRelabelConfig(c) metricRelabelings = append(metricRelabelings, relabeling) } cfg = append(cfg, yaml.MapItem{Key: "metric_relabel_configs", Value: metricRelabelings}) } return cfg } func getLimit(user uint64, enforced *uint64) uint64 { if enforced != nil { if user < *enforced && user != 0 || *enforced == 0 { return user } return *enforced } return user } func generateAddressShardingRelabelingRules(relabelings []yaml.MapSlice, shards int32) []yaml.MapSlice { return append(relabelings, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__address__"}}, {Key: "target_label", Value: "__tmp_hash"}, {Key: "modulus", Value: shards}, {Key: "action", Value: "hashmod"}, }, yaml.MapSlice{ {Key: "source_labels", Value: []string{"__tmp_hash"}}, {Key: "regex", Value: "$(SHARD)"}, {Key: "action", Value: "keep"}, }) } func enforceNamespaceLabel(relabelings []yaml.MapSlice, namespace, enforcedNamespaceLabel string) []yaml.MapSlice { if enforcedNamespaceLabel == "" { return relabelings } return append(relabelings, yaml.MapSlice{ {Key: "target_label", Value: enforcedNamespaceLabel}, {Key: "replacement", Value: namespace}}) } func generateRelabelConfig(c *v1.RelabelConfig) yaml.MapSlice { relabeling := yaml.MapSlice{} if len(c.SourceLabels) > 0 { relabeling = append(relabeling, yaml.MapItem{Key: "source_labels", Value: c.SourceLabels}) } if c.Separator != "" { relabeling = append(relabeling, yaml.MapItem{Key: "separator", Value: c.Separator}) } if c.TargetLabel != "" { relabeling = append(relabeling, yaml.MapItem{Key: "target_label", Value: c.TargetLabel}) } if c.Regex != "" { relabeling = append(relabeling, yaml.MapItem{Key: "regex", Value: c.Regex}) } if c.Modulus != uint64(0) { relabeling = append(relabeling, yaml.MapItem{Key: "modulus", Value: c.Modulus}) } if c.Replacement != "" { relabeling = append(relabeling, yaml.MapItem{Key: "replacement", Value: c.Replacement}) } if c.Action != "" { relabeling = append(relabeling, yaml.MapItem{Key: "action", Value: c.Action}) } return relabeling } // getNamespacesFromNamespaceSelector gets a list of namespaces to select based on // the given namespace selector, the given default namespace, and whether to ignore namespace selectors func getNamespacesFromNamespaceSelector(nsel *v1.NamespaceSelector, namespace string, ignoreNamespaceSelectors bool) []string { if ignoreNamespaceSelectors { return []string{namespace} } else if nsel.Any { return []string{} } else if len(nsel.MatchNames) == 0 { return []string{namespace} } return nsel.MatchNames } func (cg *ConfigGenerator) generateK8SSDConfig(namespaces []string, apiserverConfig *v1.APIServerConfig, basicAuthSecrets map[string]assets.BasicAuthCredentials, role string) yaml.MapItem { k8sSDConfig := yaml.MapSlice{ { Key: "role", Value: role, }, } if len(namespaces) != 0 { k8sSDConfig = append(k8sSDConfig, yaml.MapItem{ Key: "namespaces", Value: yaml.MapSlice{ { Key: "names", Value: namespaces, }, }, }) } if apiserverConfig != nil { k8sSDConfig = append(k8sSDConfig, yaml.MapItem{ Key: "api_server", Value: apiserverConfig.Host, }) if apiserverConfig.BasicAuth != nil && basicAuthSecrets != nil { if s, ok := basicAuthSecrets["apiserver"]; ok { k8sSDConfig = append(k8sSDConfig, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } if apiserverConfig.BearerToken != "" { k8sSDConfig = append(k8sSDConfig, yaml.MapItem{Key: "bearer_token", Value: apiserverConfig.BearerToken}) } if apiserverConfig.BearerTokenFile != "" { k8sSDConfig = append(k8sSDConfig, yaml.MapItem{Key: "bearer_token_file", Value: apiserverConfig.BearerTokenFile}) } // TODO: If we want to support secret refs for k8s service discovery tls // config as well, make sure to path the right namespace here. k8sSDConfig = addTLStoYaml(k8sSDConfig, "", apiserverConfig.TLSConfig) } return yaml.MapItem{ Key: "kubernetes_sd_configs", Value: []yaml.MapSlice{ k8sSDConfig, }, } } func (cg *ConfigGenerator) generateAlertmanagerConfig(version semver.Version, am v1.AlertmanagerEndpoints, apiserverConfig *v1.APIServerConfig, basicAuthSecrets map[string]assets.BasicAuthCredentials) yaml.MapSlice { if am.Scheme == "" { am.Scheme = "http" } if am.PathPrefix == "" { am.PathPrefix = "/" } cfg := yaml.MapSlice{ {Key: "path_prefix", Value: am.PathPrefix}, {Key: "scheme", Value: am.Scheme}, } if am.Timeout != nil { cfg = append(cfg, yaml.MapItem{Key: "timeout", Value: am.Timeout}) } // TODO: If we want to support secret refs for alertmanager config tls // config as well, make sure to path the right namespace here. cfg = addTLStoYaml(cfg, "", am.TLSConfig) cfg = append(cfg, cg.generateK8SSDConfig([]string{am.Namespace}, apiserverConfig, basicAuthSecrets, kubernetesSDRoleEndpoint)) if am.BearerTokenFile != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: am.BearerTokenFile}) } if version.Major > 2 || (version.Major == 2 && version.Minor >= 11) { if am.APIVersion == "v1" || am.APIVersion == "v2" { cfg = append(cfg, yaml.MapItem{Key: "api_version", Value: am.APIVersion}) } } var relabelings []yaml.MapSlice relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}}, {Key: "regex", Value: am.Name}, }) if am.Port.StrVal != "" { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_port_name"}}, {Key: "regex", Value: am.Port.String()}, }) } else if am.Port.IntVal != 0 { relabelings = append(relabelings, yaml.MapSlice{ {Key: "action", Value: "keep"}, {Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_number"}}, {Key: "regex", Value: am.Port.String()}, }) } cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings}) return cfg } func (cg *ConfigGenerator) generateRemoteReadConfig(version semver.Version, p *v1.Prometheus, basicAuthSecrets map[string]assets.BasicAuthCredentials) yaml.MapItem { cfgs := []yaml.MapSlice{} for i, spec := range p.Spec.RemoteRead { //defaults if spec.RemoteTimeout == "" { spec.RemoteTimeout = "30s" } cfg := yaml.MapSlice{ {Key: "url", Value: spec.URL}, {Key: "remote_timeout", Value: spec.RemoteTimeout}, } if spec.Name != "" && version.GTE(semver.MustParse("2.15.0")) { cfg = append(cfg, yaml.MapItem{Key: "name", Value: spec.Name}) } if len(spec.RequiredMatchers) > 0 { cfg = append(cfg, yaml.MapItem{Key: "required_matchers", Value: stringMapToMapSlice(spec.RequiredMatchers)}) } if spec.ReadRecent { cfg = append(cfg, yaml.MapItem{Key: "read_recent", Value: spec.ReadRecent}) } if spec.BasicAuth != nil { if s, ok := basicAuthSecrets[fmt.Sprintf("remoteRead/%d", i)]; ok { cfg = append(cfg, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } if spec.BearerToken != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: spec.BearerToken}) } if spec.BearerTokenFile != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: spec.BearerTokenFile}) } cfg = addTLStoYaml(cfg, p.ObjectMeta.Namespace, spec.TLSConfig) if spec.ProxyURL != "" { cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: spec.ProxyURL}) } cfgs = append(cfgs, cfg) } return yaml.MapItem{ Key: "remote_read", Value: cfgs, } } func (cg *ConfigGenerator) generateRemoteWriteConfig(version semver.Version, p *v1.Prometheus, basicAuthSecrets map[string]assets.BasicAuthCredentials) yaml.MapItem { cfgs := []yaml.MapSlice{} for i, spec := range p.Spec.RemoteWrite { //defaults if spec.RemoteTimeout == "" { spec.RemoteTimeout = "30s" } cfg := yaml.MapSlice{ {Key: "url", Value: spec.URL}, {Key: "remote_timeout", Value: spec.RemoteTimeout}, } if len(spec.Headers) > 0 && version.GTE(semver.MustParse("2.25.0")) { cfg = append(cfg, yaml.MapItem{Key: "headers", Value: stringMapToMapSlice(spec.Headers)}) } if spec.Name != "" && version.GTE(semver.MustParse("2.15.0")) { cfg = append(cfg, yaml.MapItem{Key: "name", Value: spec.Name}) } if spec.WriteRelabelConfigs != nil { relabelings := []yaml.MapSlice{} for _, c := range spec.WriteRelabelConfigs { relabeling := yaml.MapSlice{} if len(c.SourceLabels) > 0 { relabeling = append(relabeling, yaml.MapItem{Key: "source_labels", Value: c.SourceLabels}) } if c.Separator != "" { relabeling = append(relabeling, yaml.MapItem{Key: "separator", Value: c.Separator}) } if c.TargetLabel != "" { relabeling = append(relabeling, yaml.MapItem{Key: "target_label", Value: c.TargetLabel}) } if c.Regex != "" { relabeling = append(relabeling, yaml.MapItem{Key: "regex", Value: c.Regex}) } if c.Modulus != uint64(0) { relabeling = append(relabeling, yaml.MapItem{Key: "modulus", Value: c.Modulus}) } if c.Replacement != "" { relabeling = append(relabeling, yaml.MapItem{Key: "replacement", Value: c.Replacement}) } if c.Action != "" { relabeling = append(relabeling, yaml.MapItem{Key: "action", Value: c.Action}) } relabelings = append(relabelings, relabeling) } cfg = append(cfg, yaml.MapItem{Key: "write_relabel_configs", Value: relabelings}) } if spec.BasicAuth != nil { if s, ok := basicAuthSecrets[fmt.Sprintf("remoteWrite/%d", i)]; ok { cfg = append(cfg, yaml.MapItem{ Key: "basic_auth", Value: yaml.MapSlice{ {Key: "username", Value: s.Username}, {Key: "password", Value: s.Password}, }, }) } } if spec.BearerToken != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: spec.BearerToken}) } if spec.BearerTokenFile != "" { cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: spec.BearerTokenFile}) } cfg = addTLStoYaml(cfg, p.ObjectMeta.Namespace, spec.TLSConfig) if spec.ProxyURL != "" { cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: spec.ProxyURL}) } if spec.QueueConfig != nil { queueConfig := yaml.MapSlice{} if spec.QueueConfig.Capacity != int(0) { queueConfig = append(queueConfig, yaml.MapItem{Key: "capacity", Value: spec.QueueConfig.Capacity}) } if version.GTE(semver.MustParse("2.6.0")) { if spec.QueueConfig.MinShards != int(0) { queueConfig = append(queueConfig, yaml.MapItem{Key: "min_shards", Value: spec.QueueConfig.MinShards}) } } if spec.QueueConfig.MaxShards != int(0) { queueConfig = append(queueConfig, yaml.MapItem{Key: "max_shards", Value: spec.QueueConfig.MaxShards}) } if spec.QueueConfig.MaxSamplesPerSend != int(0) { queueConfig = append(queueConfig, yaml.MapItem{Key: "max_samples_per_send", Value: spec.QueueConfig.MaxSamplesPerSend}) } if spec.QueueConfig.BatchSendDeadline != "" { queueConfig = append(queueConfig, yaml.MapItem{Key: "batch_send_deadline", Value: spec.QueueConfig.BatchSendDeadline}) } if spec.QueueConfig.MaxRetries != int(0) { queueConfig = append(queueConfig, yaml.MapItem{Key: "max_retries", Value: spec.QueueConfig.MaxRetries}) } if spec.QueueConfig.MinBackoff != "" { queueConfig = append(queueConfig, yaml.MapItem{Key: "min_backoff", Value: spec.QueueConfig.MinBackoff}) } if spec.QueueConfig.MaxBackoff != "" { queueConfig = append(queueConfig, yaml.MapItem{Key: "max_backoff", Value: spec.QueueConfig.MaxBackoff}) } cfg = append(cfg, yaml.MapItem{Key: "queue_config", Value: queueConfig}) } if spec.MetadataConfig != nil && version.GTE(semver.MustParse("2.23.0")) { metadataConfig := yaml.MapSlice{} metadataConfig = append(metadataConfig, yaml.MapItem{Key: "send", Value: spec.MetadataConfig.Send}) if spec.MetadataConfig.SendInterval != "" { metadataConfig = append(metadataConfig, yaml.MapItem{Key: "send_interval", Value: spec.MetadataConfig.SendInterval}) } cfg = append(cfg, yaml.MapItem{Key: "metadata_config", Value: metadataConfig}) } cfgs = append(cfgs, cfg) } return yaml.MapItem{ Key: "remote_write", Value: cfgs, } }
1
16,131
Could we have a unit test for this?
prometheus-operator-prometheus-operator
go
@@ -477,9 +477,11 @@ func (c *temporalImpl) startHistory( } } + stoppedCh := make(chan struct{}) var historyService *history.Service app := fx.New( - fx.Supply(params), + fx.Supply(params, + stoppedCh), history.Module, fx.Populate(&historyService)) err = app.Err()
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package host import ( "encoding/json" "fmt" "net" "sync" "time" "github.com/uber-go/tally" "github.com/uber/tchannel-go" "go.temporal.io/api/workflowservice/v1" sdkclient "go.temporal.io/sdk/client" "go.uber.org/fx" "go.temporal.io/server/common/persistence/visibility" esclient "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" "google.golang.org/grpc" "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" carchiver "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/provider" "go.temporal.io/server/common/authorization" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/config" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/persistence" persistenceClient "go.temporal.io/server/common/persistence/client" "go.temporal.io/server/common/resolver" "go.temporal.io/server/common/resource" "go.temporal.io/server/common/rpc" "go.temporal.io/server/service/frontend" "go.temporal.io/server/service/history" "go.temporal.io/server/service/matching" "go.temporal.io/server/service/worker" "go.temporal.io/server/service/worker/archiver" "go.temporal.io/server/service/worker/replicator" ) // Temporal hosts all of temporal services in one process type Temporal interface { Start() error Stop() GetAdminClient() adminservice.AdminServiceClient GetFrontendClient() workflowservice.WorkflowServiceClient GetHistoryClient() historyservice.HistoryServiceClient GetExecutionManager() persistence.ExecutionManager RefreshNamespaceCache() } type ( temporalImpl struct { frontendService resource.Resource matchingService resource.Resource historyServices []resource.Resource workerService resource.Resource adminClient adminservice.AdminServiceClient frontendClient workflowservice.WorkflowServiceClient historyClient historyservice.HistoryServiceClient logger log.Logger clusterMetadataConfig *config.ClusterMetadata persistenceConfig config.Persistence metadataMgr persistence.MetadataManager clusterMetadataMgr persistence.ClusterMetadataManager shardMgr persistence.ShardManager taskMgr persistence.TaskManager executionManager persistence.ExecutionManager namespaceReplicationQueue persistence.NamespaceReplicationQueue shutdownCh chan struct{} shutdownWG sync.WaitGroup clusterNo int // cluster number replicator *replicator.Replicator clientWorker archiver.ClientWorker archiverMetadata carchiver.ArchivalMetadata archiverProvider provider.ArchiverProvider historyConfig *HistoryConfig esConfig *esclient.Config esClient esclient.Client workerConfig *WorkerConfig mockAdminClient map[string]adminservice.AdminServiceClient namespaceReplicationTaskExecutor namespace.ReplicationTaskExecutor } // HistoryConfig contains configs for history service HistoryConfig struct { NumHistoryShards int32 NumHistoryHosts int HistoryCountLimitError int HistoryCountLimitWarn int } // TemporalParams contains everything needed to bootstrap Temporal TemporalParams struct { ClusterMetadataConfig *config.ClusterMetadata PersistenceConfig config.Persistence MetadataMgr persistence.MetadataManager ClusterMetadataManager persistence.ClusterMetadataManager ShardMgr persistence.ShardManager ExecutionManager persistence.ExecutionManager TaskMgr persistence.TaskManager NamespaceReplicationQueue persistence.NamespaceReplicationQueue Logger log.Logger ClusterNo int ArchiverMetadata carchiver.ArchivalMetadata ArchiverProvider provider.ArchiverProvider EnableReadHistoryFromArchival bool HistoryConfig *HistoryConfig ESConfig *esclient.Config ESClient esclient.Client WorkerConfig *WorkerConfig MockAdminClient map[string]adminservice.AdminServiceClient NamespaceReplicationTaskExecutor namespace.ReplicationTaskExecutor } membershipFactoryImpl struct { serviceName string hosts map[string][]string } ) // NewTemporal returns an instance that hosts full temporal in one process func NewTemporal(params *TemporalParams) *temporalImpl { return &temporalImpl{ logger: params.Logger, clusterMetadataConfig: params.ClusterMetadataConfig, persistenceConfig: params.PersistenceConfig, metadataMgr: params.MetadataMgr, clusterMetadataMgr: params.ClusterMetadataManager, shardMgr: params.ShardMgr, taskMgr: params.TaskMgr, executionManager: params.ExecutionManager, namespaceReplicationQueue: params.NamespaceReplicationQueue, shutdownCh: make(chan struct{}), clusterNo: params.ClusterNo, esConfig: params.ESConfig, esClient: params.ESClient, archiverMetadata: params.ArchiverMetadata, archiverProvider: params.ArchiverProvider, historyConfig: params.HistoryConfig, workerConfig: params.WorkerConfig, mockAdminClient: params.MockAdminClient, namespaceReplicationTaskExecutor: params.NamespaceReplicationTaskExecutor, } } func (c *temporalImpl) enableWorker() bool { return c.workerConfig.EnableArchiver || c.workerConfig.EnableReplicator } func (c *temporalImpl) Start() error { hosts := make(map[string][]string) hosts[common.FrontendServiceName] = []string{c.FrontendGRPCAddress()} hosts[common.MatchingServiceName] = []string{c.MatchingGRPCServiceAddress()} hosts[common.HistoryServiceName] = c.HistoryServiceAddress(3) if c.enableWorker() { hosts[common.WorkerServiceName] = []string{c.WorkerGRPCServiceAddress()} } // create temporal-system namespace, this must be created before starting // the services - so directly use the metadataManager to create this if err := c.createSystemNamespace(); err != nil { return err } var startWG sync.WaitGroup startWG.Add(2) go c.startHistory(hosts, &startWG) go c.startMatching(hosts, &startWG) startWG.Wait() startWG.Add(1) go c.startFrontend(hosts, &startWG) startWG.Wait() if c.enableWorker() { startWG.Add(1) go c.startWorker(hosts, &startWG) startWG.Wait() } return nil } func (c *temporalImpl) Stop() { if c.enableWorker() { c.shutdownWG.Add(4) c.workerService.Stop() } else { c.shutdownWG.Add(3) } c.frontendService.Stop() for _, historyService := range c.historyServices { historyService.Stop() } c.matchingService.Stop() if c.workerConfig.EnableReplicator { c.replicator.Stop() } if c.workerConfig.EnableArchiver { c.clientWorker.Stop() } close(c.shutdownCh) c.shutdownWG.Wait() } func (c *temporalImpl) FrontendGRPCAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7134" case 1: return "127.0.0.1:8134" case 2: return "127.0.0.1:9134" case 3: return "127.0.0.1:10134" default: return "127.0.0.1:7134" } } func (c *temporalImpl) FrontendRingpopAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7124" case 1: return "127.0.0.1:8124" case 2: return "127.0.0.1:9124" case 3: return "127.0.0.1:10124" default: return "127.0.0.1:7124" } } // penultimatePortDigit: 2 - ringpop, 3 - gRPC func (c *temporalImpl) HistoryServiceAddress(penultimatePortDigit int) []string { var hosts []string startPort := penultimatePortDigit * 10 switch c.clusterNo { case 0: startPort += 7201 case 1: startPort += 8201 case 2: startPort += 9201 case 3: startPort += 10201 default: startPort += 7201 } for i := 0; i < c.historyConfig.NumHistoryHosts; i++ { port := startPort + i hosts = append(hosts, fmt.Sprintf("127.0.0.1:%v", port)) } c.logger.Info("History hosts", tag.Addresses(hosts)) return hosts } func (c *temporalImpl) MatchingGRPCServiceAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7136" case 1: return "127.0.0.1:8136" case 2: return "127.0.0.1:9136" case 3: return "127.0.0.1:10136" default: return "127.0.0.1:7136" } } func (c *temporalImpl) MatchingServiceRingpopAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7126" case 1: return "127.0.0.1:8126" case 2: return "127.0.0.1:9126" case 3: return "127.0.0.1:10126" default: return "127.0.0.1:7126" } } func (c *temporalImpl) WorkerGRPCServiceAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7138" case 1: return "127.0.0.1:8138" case 2: return "127.0.0.1:9138" case 3: return "127.0.0.1:10138" default: return "127.0.0.1:7138" } } func (c *temporalImpl) WorkerServiceRingpopAddress() string { switch c.clusterNo { case 0: return "127.0.0.1:7128" case 1: return "127.0.0.1:8128" case 2: return "127.0.0.1:9128" case 3: return "127.0.0.1:10128" default: return "127.0.0.1:7128" } } func (c *temporalImpl) GetAdminClient() adminservice.AdminServiceClient { return c.adminClient } func (c *temporalImpl) GetFrontendClient() workflowservice.WorkflowServiceClient { return c.frontendClient } func (c *temporalImpl) GetHistoryClient() historyservice.HistoryServiceClient { return c.historyClient } func (c *temporalImpl) startFrontend(hosts map[string][]string, startWG *sync.WaitGroup) { params := &resource.BootstrapParams{} params.DCRedirectionPolicy = config.DCRedirectionPolicy{} params.Name = common.FrontendServiceName params.Logger = c.logger params.ThrottledLogger = c.logger params.RPCFactory = newRPCFactoryImpl(common.FrontendServiceName, c.FrontendGRPCAddress(), c.FrontendRingpopAddress(), c.logger) params.MetricsScope = tally.NewTestScope(common.FrontendServiceName, make(map[string]string)) params.MembershipFactoryInitializer = func(x persistenceClient.Bean, y log.Logger) (resource.MembershipMonitorFactory, error) { return newMembershipFactory(params.Name, hosts), nil } params.ClusterMetadataConfig = c.clusterMetadataConfig params.MetricsClient = metrics.NewClient(params.MetricsScope, metrics.GetMetricsServiceIdx(params.Name, c.logger)) params.DynamicConfigClient = newIntegrationConfigClient(dynamicconfig.NewNoopClient()) params.ArchivalMetadata = c.archiverMetadata params.ArchiverProvider = c.archiverProvider params.ESConfig = c.esConfig params.ESClient = c.esClient params.Authorizer = authorization.NewNoopAuthorizer() var err error params.PersistenceConfig, err = copyPersistenceConfig(c.persistenceConfig) if err != nil { c.logger.Fatal("Failed to copy persistence config for frontend", tag.Error(err)) } params.PersistenceServiceResolver = resolver.NewNoopResolver() if c.esConfig != nil { esDataStoreName := "es-visibility" params.PersistenceConfig.AdvancedVisibilityStore = esDataStoreName params.PersistenceConfig.DataStores[esDataStoreName] = config.DataStore{ Elasticsearch: c.esConfig, } } frontendService, err := frontend.NewService(params) if err != nil { params.Logger.Fatal("unable to start frontend service", tag.Error(err)) } if c.mockAdminClient != nil { clientBean := frontendService.GetClientBean() if clientBean != nil { for serviceName, client := range c.mockAdminClient { clientBean.SetRemoteAdminClient(serviceName, client) } } } c.frontendService = frontendService connection := params.RPCFactory.CreateFrontendGRPCConnection(c.FrontendGRPCAddress()) c.frontendClient = NewFrontendClient(connection) c.adminClient = NewAdminClient(connection) go frontendService.Start() startWG.Done() <-c.shutdownCh c.shutdownWG.Done() } func (c *temporalImpl) startHistory( hosts map[string][]string, startWG *sync.WaitGroup, ) { membershipPorts := c.HistoryServiceAddress(2) for i, grpcPort := range c.HistoryServiceAddress(3) { params := &resource.BootstrapParams{} params.Name = common.HistoryServiceName params.Logger = c.logger params.ThrottledLogger = c.logger params.RPCFactory = newRPCFactoryImpl(common.HistoryServiceName, grpcPort, membershipPorts[i], c.logger) params.MetricsScope = tally.NewTestScope(common.HistoryServiceName, make(map[string]string)) params.MembershipFactoryInitializer = func(x persistenceClient.Bean, y log.Logger) (resource.MembershipMonitorFactory, error) { return newMembershipFactory(params.Name, hosts), nil } params.ClusterMetadataConfig = c.clusterMetadataConfig params.MetricsClient = metrics.NewClient(params.MetricsScope, metrics.GetMetricsServiceIdx(params.Name, c.logger)) integrationClient := newIntegrationConfigClient(dynamicconfig.NewNoopClient()) c.overrideHistoryDynamicConfig(integrationClient) params.DynamicConfigClient = integrationClient var err error params.SdkClient, err = sdkclient.NewClient(sdkclient.Options{ HostPort: c.FrontendGRPCAddress(), Namespace: common.SystemLocalNamespace, MetricsScope: params.MetricsScope, ConnectionOptions: sdkclient.ConnectionOptions{ DisableHealthCheck: true, }, }) if err != nil { c.logger.Fatal("Failed to create client for history", tag.Error(err)) } params.ArchivalMetadata = c.archiverMetadata params.ArchiverProvider = c.archiverProvider params.ESConfig = c.esConfig params.ESClient = c.esClient params.PersistenceConfig, err = copyPersistenceConfig(c.persistenceConfig) if err != nil { c.logger.Fatal("Failed to copy persistence config for history", tag.Error(err)) } params.PersistenceServiceResolver = resolver.NewNoopResolver() if c.esConfig != nil { esDataStoreName := "es-visibility" params.PersistenceConfig.AdvancedVisibilityStore = esDataStoreName params.PersistenceConfig.DataStores[esDataStoreName] = config.DataStore{ Elasticsearch: c.esConfig, } } var historyService *history.Service app := fx.New( fx.Supply(params), history.Module, fx.Populate(&historyService)) err = app.Err() if err != nil { params.Logger.Fatal("unable to construct history service", tag.Error(err)) } if c.mockAdminClient != nil { clientBean := historyService.GetClientBean() if clientBean != nil { for serviceName, client := range c.mockAdminClient { clientBean.SetRemoteAdminClient(serviceName, client) } } } // TODO: this is not correct when there are multiple history hosts as later client will overwrite previous ones. // However current interface for getting history client doesn't specify which client it needs and the tests that use this API // depends on the fact that there's only one history host. // Need to change those tests and modify the interface for getting history client. historyConnection, err := rpc.Dial(c.HistoryServiceAddress(3)[0], nil, c.logger) if err != nil { c.logger.Fatal("Failed to create connection for history", tag.Error(err)) } c.historyClient = NewHistoryClient(historyConnection) c.historyServices = append(c.historyServices, historyService) go historyService.Start() } startWG.Done() <-c.shutdownCh c.shutdownWG.Done() } func (c *temporalImpl) startMatching(hosts map[string][]string, startWG *sync.WaitGroup) { params := &resource.BootstrapParams{} params.Name = common.MatchingServiceName params.Logger = c.logger params.ThrottledLogger = c.logger params.RPCFactory = newRPCFactoryImpl(common.MatchingServiceName, c.MatchingGRPCServiceAddress(), c.MatchingServiceRingpopAddress(), c.logger) params.MetricsScope = tally.NewTestScope(common.MatchingServiceName, make(map[string]string)) params.MembershipFactoryInitializer = func(x persistenceClient.Bean, y log.Logger) (resource.MembershipMonitorFactory, error) { return newMembershipFactory(params.Name, hosts), nil } params.ClusterMetadataConfig = c.clusterMetadataConfig params.MetricsClient = metrics.NewClient(params.MetricsScope, metrics.GetMetricsServiceIdx(params.Name, c.logger)) params.DynamicConfigClient = newIntegrationConfigClient(dynamicconfig.NewNoopClient()) params.ArchivalMetadata = c.archiverMetadata params.ArchiverProvider = c.archiverProvider var err error params.PersistenceConfig, err = copyPersistenceConfig(c.persistenceConfig) if err != nil { c.logger.Fatal("Failed to copy persistence config for matching", tag.Error(err)) } params.PersistenceServiceResolver = resolver.NewNoopResolver() matchingService, err := matching.NewService(params) if err != nil { params.Logger.Fatal("unable to start matching service", tag.Error(err)) } if c.mockAdminClient != nil { clientBean := matchingService.GetClientBean() if clientBean != nil { for serviceName, client := range c.mockAdminClient { clientBean.SetRemoteAdminClient(serviceName, client) } } } c.matchingService = matchingService go c.matchingService.Start() startWG.Done() <-c.shutdownCh c.shutdownWG.Done() } func (c *temporalImpl) startWorker(hosts map[string][]string, startWG *sync.WaitGroup) { params := &resource.BootstrapParams{} params.Name = common.WorkerServiceName params.Logger = c.logger params.ThrottledLogger = c.logger params.RPCFactory = newRPCFactoryImpl(common.WorkerServiceName, c.WorkerGRPCServiceAddress(), c.WorkerServiceRingpopAddress(), c.logger) params.MetricsScope = tally.NewTestScope(common.WorkerServiceName, make(map[string]string)) params.MembershipFactoryInitializer = func(x persistenceClient.Bean, y log.Logger) (resource.MembershipMonitorFactory, error) { return newMembershipFactory(params.Name, hosts), nil } params.ClusterMetadataConfig = c.clusterMetadataConfig params.MetricsClient = metrics.NewClient(params.MetricsScope, metrics.GetMetricsServiceIdx(params.Name, c.logger)) params.DynamicConfigClient = newIntegrationConfigClient(dynamicconfig.NewNoopClient()) params.ArchivalMetadata = c.archiverMetadata params.ArchiverProvider = c.archiverProvider var err error params.PersistenceConfig, err = copyPersistenceConfig(c.persistenceConfig) if err != nil { c.logger.Fatal("Failed to copy persistence config for worker", tag.Error(err)) } params.PersistenceServiceResolver = resolver.NewNoopResolver() params.SdkClient, err = sdkclient.NewClient(sdkclient.Options{ HostPort: c.FrontendGRPCAddress(), Namespace: common.SystemLocalNamespace, MetricsScope: params.MetricsScope, ConnectionOptions: sdkclient.ConnectionOptions{ DisableHealthCheck: true, }, }) if err != nil { c.logger.Fatal("Failed to create client for worker", tag.Error(err)) } service, err := resource.New( params, common.WorkerServiceName, dynamicconfig.GetIntPropertyFn(5000), dynamicconfig.GetIntPropertyFn(5000), dynamicconfig.GetIntPropertyFn(10000), ) if err != nil { params.Logger.Fatal("unable to create worker service", tag.Error(err)) } c.workerService = service service.Start() clusterMetadata := cluster.NewTestClusterMetadata(c.clusterMetadataConfig) var replicatorNamespaceCache namespace.Cache if c.workerConfig.EnableReplicator { metadataManager := persistence.NewMetadataPersistenceMetricsClient(c.metadataMgr, service.GetMetricsClient(), c.logger) replicatorNamespaceCache = namespace.NewNamespaceCache(metadataManager, clusterMetadata, service.GetMetricsClient(), service.GetLogger()) replicatorNamespaceCache.Start() c.startWorkerReplicator(service, clusterMetadata) } var clientWorkerNamespaceCache namespace.Cache if c.workerConfig.EnableArchiver { metadataProxyManager := persistence.NewMetadataPersistenceMetricsClient(c.metadataMgr, service.GetMetricsClient(), c.logger) clientWorkerNamespaceCache = namespace.NewNamespaceCache(metadataProxyManager, clusterMetadata, service.GetMetricsClient(), service.GetLogger()) clientWorkerNamespaceCache.Start() c.startWorkerClientWorker(params, service, clientWorkerNamespaceCache) } startWG.Done() <-c.shutdownCh if c.workerConfig.EnableReplicator { replicatorNamespaceCache.Stop() } if c.workerConfig.EnableArchiver { clientWorkerNamespaceCache.Stop() } c.shutdownWG.Done() } func (c *temporalImpl) startWorkerReplicator(service resource.Resource, clusterMetadata cluster.Metadata) { serviceResolver, err := service.GetMembershipMonitor().GetResolver(common.WorkerServiceName) if err != nil { c.logger.Fatal("Fail to start replicator when start worker", tag.Error(err)) } c.replicator = replicator.NewReplicator( clusterMetadata, service.GetClientBean(), c.logger, service.GetMetricsClient(), service.GetHostInfo(), serviceResolver, c.namespaceReplicationQueue, c.namespaceReplicationTaskExecutor, ) c.replicator.Start() } func (c *temporalImpl) startWorkerClientWorker(params *resource.BootstrapParams, service resource.Resource, namespaceCache namespace.Cache) { workerConfig := worker.NewConfig(params) workerConfig.ArchiverConfig.ArchiverConcurrency = dynamicconfig.GetIntPropertyFn(10) bc := &archiver.BootstrapContainer{ SdkClient: params.SdkClient, MetricsClient: service.GetMetricsClient(), Logger: c.logger, HistoryV2Manager: c.executionManager, NamespaceCache: namespaceCache, Config: workerConfig.ArchiverConfig, ArchiverProvider: c.archiverProvider, } c.clientWorker = archiver.NewClientWorker(bc) if err := c.clientWorker.Start(); err != nil { c.clientWorker.Stop() c.logger.Fatal("Fail to start archiver when start worker", tag.Error(err)) } } func (c *temporalImpl) createSystemNamespace() error { err := c.metadataMgr.InitializeSystemNamespaces(c.clusterMetadataConfig.CurrentClusterName) if err != nil { return fmt.Errorf("failed to create temporal-system namespace: %v", err) } return nil } func (c *temporalImpl) GetExecutionManager() persistence.ExecutionManager { return c.executionManager } func (c *temporalImpl) overrideHistoryDynamicConfig(client *dynamicClient) { client.OverrideValue(dynamicconfig.ReplicationTaskProcessorStartWait, time.Nanosecond) if c.workerConfig.EnableIndexer { client.OverrideValue(dynamicconfig.AdvancedVisibilityWritingMode, visibility.AdvancedVisibilityWritingModeDual) } if c.historyConfig.HistoryCountLimitWarn != 0 { client.OverrideValue(dynamicconfig.HistoryCountLimitWarn, c.historyConfig.HistoryCountLimitWarn) } if c.historyConfig.HistoryCountLimitError != 0 { client.OverrideValue(dynamicconfig.HistoryCountLimitError, c.historyConfig.HistoryCountLimitError) } } func (c *temporalImpl) RefreshNamespaceCache() { c.frontendService.GetNamespaceCache().Refresh() c.matchingService.GetNamespaceCache().Refresh() for _, r := range c.historyServices { r.GetNamespaceCache().Refresh() } if c.workerService != nil { c.workerService.GetNamespaceCache().Refresh() } } // copyPersistenceConfig makes a deepcopy of persistence config. // This is just a temp fix for the race condition of persistence config. // The race condition happens because all the services are using the same datastore map in the config. // Also all services will retry to modify the maxQPS field in the datastore during start up and use the modified maxQPS value to create a persistence factory. func copyPersistenceConfig(pConfig config.Persistence) (config.Persistence, error) { copiedDataStores := make(map[string]config.DataStore) for name, value := range pConfig.DataStores { copiedDataStore := config.DataStore{} encodedDataStore, err := json.Marshal(value) if err != nil { return pConfig, err } if err = json.Unmarshal(encodedDataStore, &copiedDataStore); err != nil { return pConfig, err } copiedDataStores[name] = copiedDataStore } pConfig.DataStores = copiedDataStores return pConfig, nil } func newMembershipFactory(serviceName string, hosts map[string][]string) resource.MembershipMonitorFactory { return &membershipFactoryImpl{ serviceName: serviceName, hosts: hosts, } } func (p *membershipFactoryImpl) GetMembershipMonitor() (membership.Monitor, error) { return newSimpleMonitor(p.serviceName, p.hosts), nil } type rpcFactoryImpl struct { serviceName string ringpopServiceName string grpcHostPort string ringpopHostPort string logger log.Logger sync.Mutex listener net.Listener ringpopChannel *tchannel.Channel serverCfg config.GroupTLS } func (c *rpcFactoryImpl) GetFrontendGRPCServerOptions() ([]grpc.ServerOption, error) { return nil, nil } func (c *rpcFactoryImpl) GetInternodeGRPCServerOptions() ([]grpc.ServerOption, error) { return nil, nil } func (c *rpcFactoryImpl) CreateFrontendGRPCConnection(hostName string) *grpc.ClientConn { return c.CreateGRPCConnection(hostName) } func (c *rpcFactoryImpl) CreateInternodeGRPCConnection(hostName string) *grpc.ClientConn { return c.CreateGRPCConnection(hostName) } func newRPCFactoryImpl(sName, grpcHostPort, ringpopHostPort string, logger log.Logger) common.RPCFactory { return &rpcFactoryImpl{ serviceName: sName, grpcHostPort: grpcHostPort, ringpopHostPort: ringpopHostPort, logger: logger, } } func (c *rpcFactoryImpl) GetGRPCListener() net.Listener { if c.listener != nil { return c.listener } c.Lock() defer c.Unlock() if c.listener == nil { var err error c.listener, err = net.Listen("tcp", c.grpcHostPort) if err != nil { c.logger.Fatal("Failed create gRPC listener", tag.Error(err), tag.Service(c.serviceName), tag.Address(c.grpcHostPort)) } c.logger.Info("Created gRPC listener", tag.Service(c.serviceName), tag.Address(c.grpcHostPort)) } return c.listener } func (c *rpcFactoryImpl) GetRingpopChannel() *tchannel.Channel { if c.ringpopChannel != nil { return c.ringpopChannel } c.Lock() defer c.Unlock() if c.ringpopChannel == nil { ringpopServiceName := fmt.Sprintf("%v-ringpop", c.serviceName) var err error c.ringpopChannel, err = tchannel.NewChannel(ringpopServiceName, nil) if err != nil { c.logger.Fatal("Failed to create ringpop TChannel", tag.Error(err)) } err = c.ringpopChannel.ListenAndServe(c.ringpopHostPort) if err != nil { c.logger.Fatal("Failed to start ringpop listener", tag.Error(err), tag.Address(c.ringpopHostPort)) } } return c.ringpopChannel } // CreateGRPCConnection creates connection for gRPC calls func (c *rpcFactoryImpl) CreateGRPCConnection(hostName string) *grpc.ClientConn { connection, err := rpc.Dial(hostName, nil, c.logger) if err != nil { c.logger.Fatal("Failed to create gRPC connection", tag.Error(err)) } return connection }
1
12,737
nit: put params in new line
temporalio-temporal
go
@@ -100,13 +100,15 @@ export default function DateRangeSelector() { 'mdc-button--dropdown', 'googlesitekit-header__dropdown', 'googlesitekit-header__date-range-selector-menu', + 'googlesitekit-border-radius-round--phone', + 'googlesitekit-button-icon--phone', { 'googlesitekit-header__date-range-selector-menu--has-unified-dashboard': unifiedDashboardEnabled, } ) } text onClick={ handleMenu } - icon={ <DateRangeIcon width="18" height="20" /> } + icon={ <DateRangeIcon width="20" height="20" /> } aria-haspopup="menu" aria-expanded={ menuOpen } aria-controls="date-range-selector-menu"
1
/** * Date range selector component. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import { useClickAway } from 'react-use'; import classnames from 'classnames'; /** * WordPress dependencies */ import { useCallback, useRef, useState, useContext } from '@wordpress/element'; import { ESCAPE, TAB } from '@wordpress/keycodes'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import DateRangeIcon from '../../svg/date-range.svg'; import { CORE_USER } from '../googlesitekit/datastore/user/constants'; import { useKeyCodesInside } from '../hooks/useKeyCodesInside'; import { getAvailableDateRanges } from '../util/date-range'; import ViewContextContext from './Root/ViewContextContext'; import Menu from './Menu'; import Button from './Button'; import { trackEvent } from '../util'; import { useFeature } from '../hooks/useFeature'; import { CORE_UI } from '../googlesitekit/datastore/ui/constants'; const { useSelect, useDispatch } = Data; export default function DateRangeSelector() { const unifiedDashboardEnabled = useFeature( 'unifiedDashboard' ); const ranges = getAvailableDateRanges(); const dateRange = useSelect( ( select ) => select( CORE_USER ).getDateRange() ); const { setDateRange } = useDispatch( CORE_USER ); const { resetInViewHook } = useDispatch( CORE_UI ); const [ menuOpen, setMenuOpen ] = useState( false ); const menuWrapperRef = useRef(); const viewContext = useContext( ViewContextContext ); useClickAway( menuWrapperRef, () => setMenuOpen( false ) ); useKeyCodesInside( [ ESCAPE, TAB ], menuWrapperRef, () => setMenuOpen( false ) ); const handleMenu = useCallback( () => { setMenuOpen( ! menuOpen ); }, [ menuOpen ] ); const handleMenuItemSelect = useCallback( ( index ) => { const newDateRange = Object.values( ranges )[ index ].slug; if ( dateRange !== newDateRange ) { trackEvent( `${ viewContext }_headerbar`, 'change_daterange', newDateRange ); } resetInViewHook(); setDateRange( newDateRange ); setMenuOpen( false ); }, [ ranges, dateRange, resetInViewHook, setDateRange, viewContext ] ); const currentDateRangeLabel = ranges[ dateRange ]?.label; const menuItems = Object.values( ranges ).map( ( range ) => range.label ); return ( <div ref={ menuWrapperRef } className="googlesitekit-date-range-selector googlesitekit-dropdown-menu mdc-menu-surface--anchor" > <Button className={ classnames( 'mdc-button--dropdown', 'googlesitekit-header__dropdown', 'googlesitekit-header__date-range-selector-menu', { 'googlesitekit-header__date-range-selector-menu--has-unified-dashboard': unifiedDashboardEnabled, } ) } text onClick={ handleMenu } icon={ <DateRangeIcon width="18" height="20" /> } aria-haspopup="menu" aria-expanded={ menuOpen } aria-controls="date-range-selector-menu" > { currentDateRangeLabel } </Button> <Menu menuOpen={ menuOpen } menuItems={ menuItems } onSelected={ handleMenuItemSelect } id="date-range-selector-menu" className="googlesitekit-width-auto" /> </div> ); }
1
42,547
The button for the date range selector has incorrect width since it has `padding-right: 8px` which sets the width to: `44px`. Can you review to make the button `36px` on small screens as per the AC?
google-site-kit-wp
js
@@ -2836,8 +2836,13 @@ client_process_bb(dcontext_t *dcontext, build_bb_t *bb) # ifdef X86 if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { - if (instr_may_write_zmm_register(inst)) + if (instr_may_write_zmm_register(inst)) { + LOG(THREAD, LOG_INTERP, 2, "Detected AVX-512 code in use\n"); + SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); d_r_set_avx512_code_in_use(true); + proc_set_num_simd_saved(MCXT_NUM_SIMD_SLOTS); + SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); + } } } # endif
1
/* ********************************************************** * Copyright (c) 2011-2019 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* * interp.c - interpreter used for native trace selection */ #include "../globals.h" #include "../link.h" #include "../fragment.h" #include "../emit.h" #include "../dispatch.h" #include "../fcache.h" #include "../monitor.h" /* for trace_abort and monitor_data_t */ #include "arch.h" #include "instr.h" #include "instr_create.h" #include "instrlist.h" #include "decode.h" #include "decode_fast.h" #include "disassemble.h" #include "instrument.h" #include "../hotpatch.h" #ifdef RETURN_AFTER_CALL # include "../rct.h" #endif #ifdef WINDOWS # include "ntdll.h" /* for EXCEPTION_REGISTRATION */ # include "../nudge.h" /* for generic_nudge_target() address */ #endif #include "../perscache.h" #include "../native_exec.h" #include "../jit_opt.h" #ifdef CHECK_RETURNS_SSE2 # include <setjmp.h> /* for warning when see libc setjmp */ #endif #ifdef VMX86_SERVER # include "vmkuw.h" /* VMKUW_SYSCALL_GATEWAY */ #endif #ifdef ANNOTATIONS # include "../annotations.h" #endif #ifdef AARCH64 # include "build_ldstex.h" #endif enum { DIRECT_XFER_LENGTH = 5 }; /* forward declarations */ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)); static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr); bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md); /* we use a branch limit of 1 to make it easier for the trace * creation mechanism to stitch basic blocks together */ #define BRANCH_LIMIT 1 /* we limit total bb size to handle cases like infinite loop or sequence * of calls. * also, we have a limit on fragment body sizes, which should be impossible * to break since x86 instrs are max 17 bytes and we only modify ctis. * Although...selfmod mangling does really expand fragments! * -selfmod_max_writes helps for selfmod bbs (case 7893/7909). * System call mangling is also large, for degenerate cases like tests/linux/infinite. * PR 215217: also client additions: we document and assert. * FIXME: need better way to know how big will get, b/c we can construct * cases that will trigger the size assertion! */ /* define replaced by -max_bb_instrs option */ /* exported so micro routines can assert whether held */ DECLARE_CXTSWPROT_VAR(mutex_t bb_building_lock, INIT_LOCK_FREE(bb_building_lock)); /* i#1111: we do not use the lock until the 2nd thread is created */ volatile bool bb_lock_start; #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) static file_t bbdump_file = INVALID_FILE; #endif #ifdef DEBUG DECLARE_NEVERPROT_VAR(uint debug_bb_count, 0); #endif /* initialization */ void interp_init() { #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { bbdump_file = open_log_file("bbs", NULL, 0); ASSERT(bbdump_file != INVALID_FILE); } #endif } #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG /* don't bother with adding lock */ static int num_rets_removed; # endif #endif /* cleanup */ void interp_exit() { #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { close_log_file(bbdump_file); } #endif DELETE_LOCK(bb_building_lock); LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total application code seen: %d KB\n", GLOBAL_STAT(app_code_seen) / 1024); #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total rets removed: %d\n", num_rets_removed); # endif #endif } /**************************************************************************** **************************************************************************** * * B A S I C B L O C K B U I L D I N G */ /* we have a lot of data to pass around so we package it in this struct * so we can have separate routines for readability */ typedef struct { /* in */ app_pc start_pc; bool app_interp; /* building bb to interp app, as opposed to for pc * translation or figuring out what pages a bb touches? */ bool for_cache; /* normal to-be-executed build? */ bool record_vmlist; /* should vmareas be updated? */ bool mangle_ilist; /* should bb ilist be mangled? */ bool record_translation; /* store translation info for each instr_t? */ bool has_bb_building_lock; /* usually ==for_cache; used for aborting bb building */ bool checked_start_vmarea; /* caller called check_new_page_start() on start_pc */ file_t outf; /* send disassembly and notes to a file? * we use this mainly for dumping trace origins */ app_pc stop_pc; /* Optional: NULL for normal termination rules. * Only checked for full_decode. */ #ifdef CLIENT_INTERFACE bool pass_to_client; /* pass to client, if a bb hook exists; * we store this up front to avoid race conditions * between full_decode setting and hook calling time. */ bool post_client; /* has the client already processed the bb? */ bool for_trace; /* PR 299808: we tell client if building a trace */ #endif /* in and out */ overlap_info_t *overlap_info; /* if non-null, records overlap information here; * caller must initialize region_start and region_end */ /* out */ instrlist_t *ilist; uint flags; void *vmlist; app_pc end_pc; bool native_exec; /* replace cur ilist with a native_exec version */ bool native_call; /* the gateway is a call */ #ifdef CLIENT_INTERFACE instrlist_t **unmangled_ilist; /* PR 299808: clone ilist pre-mangling */ #endif /* internal usage only */ bool full_decode; /* decode every instruction into a separate instr_t? */ bool follow_direct; /* elide unconditional branches? */ bool check_vm_area; /* whether to call check_thread_vm_area() */ uint num_elide_jmp; uint num_elide_call; app_pc last_page; app_pc cur_pc; app_pc instr_start; app_pc checked_end; /* end of current vmarea checked */ cache_pc exit_target; /* fall-through target of final instr */ uint exit_type; /* indirect branch type */ ibl_branch_type_t ibl_branch_type; /* indirect branch type as an IBL selector */ #ifdef UNIX bool invalid_instr_hack; #endif instr_t *instr; /* the current instr */ int eflags; app_pc pretend_pc; /* selfmod only: decode from separate pc */ #ifdef ARM dr_pred_type_t svc_pred; /* predicate for conditional svc */ #endif DEBUG_DECLARE(bool initialized;) } build_bb_t; /* forward decl */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb); static void init_build_bb(build_bb_t *bb, app_pc start_pc, bool app_interp, bool for_cache, bool mangle_ilist, bool record_translation, file_t outf, uint known_flags, overlap_info_t *overlap_info) { memset(bb, 0, sizeof(*bb)); #if defined(LINUX) && defined(X86_32) /* With SA_RESTART (i#2659) we end up interpreting the int 0x80 in vsyscall, * whose fall-through hits our hook. We avoid interpreting our own hook * by shifting it to the displaced pc. */ if (DYNAMO_OPTION(hook_vsyscall) && start_pc == vsyscall_sysenter_return_pc) start_pc = vsyscall_sysenter_displaced_pc; #endif bb->check_vm_area = true; bb->start_pc = start_pc; bb->app_interp = app_interp; bb->for_cache = for_cache; if (bb->for_cache) bb->record_vmlist = true; bb->mangle_ilist = mangle_ilist; bb->record_translation = record_translation; bb->outf = outf; bb->overlap_info = overlap_info; bb->follow_direct = !TEST(FRAG_SELFMOD_SANDBOXED, known_flags); bb->flags = known_flags; bb->ibl_branch_type = IBL_GENERIC; /* initialization only */ #ifdef ARM bb->svc_pred = DR_PRED_NONE; #endif DODEBUG(bb->initialized = true;); } static void reset_overlap_info(dcontext_t *dcontext, build_bb_t *bb) { bb->overlap_info->start_pc = bb->start_pc; bb->overlap_info->min_pc = bb->start_pc; bb->overlap_info->max_pc = bb->start_pc; bb->overlap_info->contiguous = true; bb->overlap_info->overlap = false; } static void update_overlap_info(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc, bool jmp) { if (new_pc < bb->overlap_info->min_pc) bb->overlap_info->min_pc = new_pc; if (new_pc > bb->overlap_info->max_pc) bb->overlap_info->max_pc = new_pc; /* we get called at end of all contiguous intervals, so ignore jmps */ LOG(THREAD, LOG_ALL, 5, "\t app_bb_overlaps " PFX ".." PFX " %s\n", bb->last_page, new_pc, jmp ? "jmp" : ""); if (!bb->overlap_info->overlap && !jmp) { /* contiguous interval: prev_pc..new_pc (open-ended) */ if (bb->last_page < bb->overlap_info->region_end && new_pc > bb->overlap_info->region_start) { LOG(THREAD_GET, LOG_ALL, 5, "\t it overlaps!\n"); bb->overlap_info->overlap = true; } } if (bb->overlap_info->contiguous && jmp) bb->overlap_info->contiguous = false; } #ifdef DEBUG # define BBPRINT(bb, level, ...) \ do { \ LOG(THREAD, LOG_INTERP, level, __VA_ARGS__); \ if (bb->outf != INVALID_FILE && bb->outf != (THREAD)) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); #else # ifdef INTERNAL # define BBPRINT(bb, level, ...) \ do { \ if (bb->outf != INVALID_FILE) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); # else # define BBPRINT(bb, level, ...) /* nothing */ # endif #endif #ifdef WINDOWS extern void intercept_load_dll(void); extern void intercept_unload_dll(void); # ifdef INTERNAL extern void DllMainThreadAttach(void); # endif #endif /* forward declarations */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb); static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb); static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)); #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc); #endif /*************************************************************************** * Image entry */ static bool reached_image_entry = false; static INLINE_FORCED bool check_for_image_entry(app_pc bb_start) { if (!reached_image_entry && bb_start == get_image_entry()) { LOG(THREAD_GET, LOG_ALL, 1, "Reached image entry point " PFX "\n", bb_start); set_reached_image_entry(); return true; } return false; } void set_reached_image_entry() { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); reached_image_entry = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } bool reached_image_entry_yet() { return reached_image_entry; } /*************************************************************************** * Whether to inline or elide callees */ /* Return true if pc is a call target that should NOT be entered but should * still be mangled. */ static inline bool must_not_be_entered(app_pc pc) { return false #ifdef DR_APP_EXPORTS /* i#1237: DR will change dr_app_running_under_dynamorio return value * on seeing a bb starting at dr_app_running_under_dynamorio. */ || pc == (app_pc)dr_app_running_under_dynamorio #endif ; } /* Return true if pc is a call target that should NOT be inlined and left native. */ static inline bool leave_call_native(app_pc pc) { return ( #ifdef INTERNAL !dynamo_options.inline_calls #else 0 #endif #ifdef WINDOWS || pc == (app_pc)intercept_load_dll || pc == (app_pc)intercept_unload_dll /* we're guaranteed to have direct calls to the next routine since our * own DllMain calls it! */ # ifdef INTERNAL || pc == (app_pc)DllMainThreadAttach # endif /* check for nudge handling escape from cache */ || (pc == (app_pc)generic_nudge_handler) #else /* PR 200203: long-term we want to control loading of client * libs, but for now we have to let the loader call _fini() * in the client, which may end up calling __wrap_free(). * It's simpler to let those be interpreted and make a native * call to the real heap routine here as this is a direct * call whereas we'd need native_exec for the others: */ || pc == (app_pc)global_heap_free #endif ); } /* return true if pc is a direct jmp target that should NOT be elided and followed */ static inline bool must_not_be_elided(app_pc pc) { #ifdef WINDOWS /* Allow only the return jump in the landing pad to be elided, as we * interpret the return path from trampolines. The forward jump leads to * the trampoline and shouldn't be elided. */ if (is_on_interception_initial_route(pc)) return true; #endif return (0 #ifdef WINDOWS /* we insert trampolines by adding direct jmps to our interception code buffer * we don't want to interpret the code in that buffer, as it may swap to the * dstack and mess up a return-from-fcache. * N.B.: if use this routine anywhere else, pay attention to the * hack for is_syscall_trampoline() in the use here! */ || (is_in_interception_buffer(pc)) #else /* UNIX */ #endif ); } #ifdef DR_APP_EXPORTS /* This function allows automatically injected dynamo to ignore * dynamo API routines that would really mess things up */ static inline bool must_escape_from(app_pc pc) { /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's caller-saved * FIXME: is this ok? */ /* Note that we can't just look for direct calls to these functions * because of stubs, etc. that end up doing indirect jumps to them! */ bool res = false # ifdef DR_APP_EXPORTS || (automatic_startup && (pc == (app_pc)dynamorio_app_init || pc == (app_pc)dr_app_start || pc == (app_pc)dynamo_thread_init || pc == (app_pc)dynamorio_app_exit || /* dr_app_stop is a nop already */ pc == (app_pc)dynamo_thread_exit)) # endif ; # ifdef DEBUG if (res) { # ifdef DR_APP_EXPORTS LOG(THREAD_GET, LOG_INTERP, 3, "must_escape_from: found "); if (pc == (app_pc)dynamorio_app_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_init\n"); else if (pc == (app_pc)dr_app_start) LOG(THREAD_GET, LOG_INTERP, 3, "dr_app_start\n"); /* FIXME: are dynamo_thread_* still needed hered? */ else if (pc == (app_pc)dynamo_thread_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_init\n"); else if (pc == (app_pc)dynamorio_app_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_exit\n"); else if (pc == (app_pc)dynamo_thread_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_exit\n"); # endif } # endif return res; } #endif /* DR_APP_EXPORTS */ /* Adds bb->instr, which must be a direct call or jmp, to bb->ilist for native * execution. Makes sure its target is reachable from the code cache, which * is critical for jmps b/c they're native for our hooks of app code which may * not be reachable from the code cache. Also needed for calls b/c in the future * (i#774) the DR lib (and thus our leave_call_native() calls) won't be reachable * from the cache. */ static void bb_add_native_direct_xfer(dcontext_t *dcontext, build_bb_t *bb, bool appended) { #if defined(X86) && defined(X64) /* i#922: we're going to run this jmp from our code cache so we have to * make sure it still reaches its target. We could try to check * reachability from the likely code cache slot, but these should be * rare enough that making them indirect won't matter and then we have * fewer reachability dependences. * We do this here rather than in d_r_mangle() b/c we'd have a hard time * distinguishing native jmp/call due to DR's own operations from a * client's inserted meta jmp/call. */ /* Strategy: write target into xax (DR-reserved) slot and jmp through it. * Alternative would be to embed the target into the code stream. * We don't need to set translation b/c these are meta instrs and they * won't fault. */ ptr_uint_t tgt = (ptr_uint_t)opnd_get_pc(instr_get_target(bb->instr)); opnd_t tls_slot = opnd_create_sized_tls_slot(os_tls_offset(TLS_XAX_SLOT), OPSZ_4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)tgt))); opnd_set_disp(&tls_slot, opnd_get_disp(tls_slot) + 4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)(tgt >> 32)))); if (instr_is_ubr(bb->instr)) { instrlist_meta_append( bb->ilist, INSTR_CREATE_jmp_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); bb->exit_type |= instr_branch_type(bb->instr); } else { ASSERT(instr_is_call_direct(bb->instr)); instrlist_meta_append( bb->ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); } if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; #elif defined(ARM) ASSERT_NOT_IMPLEMENTED(false); /* i#1582 */ #else if (appended) { /* avoid assert about meta w/ translation but no restore_state callback */ instr_set_translation(bb->instr, NULL); } else instrlist_append(bb->ilist, bb->instr); /* Indicate that relative target must be * re-encoded, and that it is not an exit cti. * However, we must mangle this to ensure it reaches (i#992) * which we special-case in d_r_mangle(). */ instr_set_meta(bb->instr); instr_set_raw_bits_valid(bb->instr, false); #endif } /* Perform checks such as looking for dynamo stopping points and bad places * to be. We assume we only have to check after control transfer instructions, * i.e., we assume that all of these conditions are procedures that are only * entered by calling or jumping, never falling through. */ static inline bool check_for_stopping_point(dcontext_t *dcontext, build_bb_t *bb) { #ifdef DR_APP_EXPORTS if (must_escape_from(bb->cur_pc)) { /* x64 will zero-extend to rax, so we use eax here */ reg_id_t reg = IF_X86_ELSE(REG_EAX, DR_REG_R0); BBPRINT(bb, 3, "interp: emergency exit from " PFX "\n", bb->cur_pc); /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's * caller-saved. * FIXME: is this ok? */ /* move 0 into xax/r0 -- our functions return 0 to indicate success */ instrlist_append( bb->ilist, XINST_CREATE_load_int(dcontext, opnd_create_reg(reg), OPND_CREATE_INT32(0))); /* insert a ret instruction */ instrlist_append(bb->ilist, XINST_CREATE_return(dcontext)); /* should this be treated as a real return? */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->exit_target = get_ibl_routine(dcontext, IBL_LINKED, DEFAULT_IBL_BB(), IBL_RETURN); return true; } #endif /* DR_APP_EXPORTS */ #ifdef CHECK_RETURNS_SSE2 if (bb->cur_pc == (app_pc)longjmp) { SYSLOG_INTERNAL_WARNING("encountered longjmp, which will cause ret mismatch!"); } #endif return is_stopping_point(dcontext, bb->cur_pc); } /* Arithmetic eflags analysis to see if sequence of instrs reads an * arithmetic flag prior to writing it. * Usage: first initialize status to 0 and eflags_6 to 0. * Then call this routine for each instr in sequence, assigning result to status. * eflags_6 holds flags written and read so far. * Uses these flags, defined in instr.h, as status values: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-onlY) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information yet * On ARM, Q and GE flags are ignored. */ static inline int eflags_analysis(instr_t *instr, int status, uint *eflags_6) { uint e6 = *eflags_6; /* local copy */ uint e6_w2r = EFLAGS_WRITE_TO_READ(e6); uint instr_eflags = instr_get_arith_flags(instr, DR_QUERY_DEFAULT); /* Keep going until result is non-zero, also keep going if * result is writes to OF to see if later writes to rest of flags * before reading any, and keep going if reads one of the 6 to see * if later writes to OF before reading it. */ if (instr_eflags == 0 || status == EFLAGS_WRITE_ARITH IF_X86(|| status == EFLAGS_READ_OF)) return status; /* we ignore interrupts */ if ((instr_eflags & EFLAGS_READ_ARITH) != 0 && (!instr_opcode_valid(instr) || !instr_is_interrupt(instr))) { /* store the flags we're reading */ e6 |= (instr_eflags & EFLAGS_READ_ARITH); *eflags_6 = e6; if ((e6_w2r | (instr_eflags & EFLAGS_READ_ARITH)) != e6_w2r) { /* we're reading a flag that has not been written yet */ status = EFLAGS_READ_ARITH; /* some read before all written */ LOG(THREAD_GET, LOG_INTERP, 4, "\treads flag before writing it!\n"); #ifdef X86 if ((instr_eflags & EFLAGS_READ_OF) != 0 && (e6 & EFLAGS_WRITE_OF) == 0) { status = EFLAGS_READ_OF; /* reads OF before writing! */ LOG(THREAD_GET, LOG_INTERP, 4, "\t reads OF prior to writing it!\n"); } #endif } } else if ((instr_eflags & EFLAGS_WRITE_ARITH) != 0) { /* store the flags we're writing */ e6 |= (instr_eflags & EFLAGS_WRITE_ARITH); *eflags_6 = e6; /* check if all written but none read yet */ if ((e6 & EFLAGS_WRITE_ARITH) == EFLAGS_WRITE_ARITH && (e6 & EFLAGS_READ_ARITH) == 0) { status = EFLAGS_WRITE_ARITH; /* all written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote all 6 flags now!\n"); } #ifdef X86 /* check if at least OF was written but not read */ else if ((e6 & EFLAGS_WRITE_OF) != 0 && (e6 & EFLAGS_READ_OF) == 0) { status = EFLAGS_WRITE_OF; /* OF written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote overflow flag before reading it!\n"); } #endif } return status; } /* check origins of code for several purposes: * 1) we need list of areas where this thread's fragments come * from, for faster flushing on munmaps * 2) also for faster flushing, each vmarea has a list of fragments * 3) we need to mark as read-only any writable region that * has a fragment come from it, to handle self-modifying code * 4) for PROGRAM_SHEPHERDING restricted code origins for security * 5) for restricted execution environments: not letting bb cross regions */ /* FIXME CASE 7380: since report security violation before execute off bad page, can be false positive due to: - a faulting instruction in middle of bb would have prevented getting there - ignorable syscall in middle - self-mod code would have ended bb sooner than bad page One solution is to have check_thread_vm_area() return false and have bb building stop at checked_end if a violation will occur when we get there. Then we only raise the violation once building a bb starting there. */ static inline void check_new_page_start(dcontext_t *dcontext, build_bb_t *bb) { DEBUG_DECLARE(bool ok;) if (!bb->check_vm_area) return; DEBUG_DECLARE(ok =) check_thread_vm_area(dcontext, bb->start_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, false /*!xfer*/); ASSERT(ok); /* cannot return false on non-xfer */ bb->last_page = bb->start_pc; if (bb->overlap_info != NULL) reset_overlap_info(dcontext, bb); } /* Walk forward in straight line from prev_pc to new_pc. * FIXME: with checked_end we don't need to call this on every contig end * while bb building like we used to. Should revisit the overlap info and * walk_app_bb reasons for keeping those contig() calls and see if we can * optimize them away for bb building at least. * i#993: new_pc points to the last byte of the current instruction and is not * an open-ended endpoint. */ static inline bool check_new_page_contig(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { bool is_first_instr = (bb->instr_start == bb->start_pc); if (!bb->check_vm_area) return true; if (bb->checked_end == NULL) { ASSERT(new_pc == bb->start_pc); } else if (new_pc >= bb->checked_end) { if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, /* i#989: We don't want to fall through to an * incompatible vmarea, so we treat fall * through like a transfer. We can't end the * bb before the first instruction, so we pass * false to forcibly merge in the vmarea * flags. */ !is_first_instr /*xfer*/)) { return false; } } if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, false /*not jmp*/); DOLOG(4, LOG_INTERP, { if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); }); bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } /* Direct cti from prev_pc to new_pc */ static bool check_new_page_jmp(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { /* For tracking purposes, check the last byte of the cti. */ bool ok = check_new_page_contig(dcontext, bb, bb->cur_pc - 1); ASSERT(ok && "should have checked cur_pc-1 in decode loop"); if (!ok) /* Don't follow the jmp in release build. */ return false; /* cur sandboxing doesn't handle direct cti * not good enough to only check this at top of interp -- could walk contig * from non-selfmod to selfmod page, and then do a direct cti, which * check_thread_vm_area would allow (no flag changes on direct cti)! * also not good enough to put this check in check_thread_vm_area, as that * only checks across pages. */ if ((bb->flags & FRAG_SELFMOD_SANDBOXED) != 0) return false; if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); /* do not walk into a native exec dll (we assume not currently there, * though could happen if bypass a gateway -- even then this is a feature * to allow getting back to native ASAP) * FIXME: we could assume that such direct calls only * occur from DGC, and rely on check_thread_vm_area to disallow, * as an (unsafe) optimization */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_dircalls) && !vmvector_empty(native_exec_areas) && is_native_pc(new_pc)) return false; #ifdef CLIENT_INTERFACE /* i#805: If we're crossing a module boundary between two modules that are * and aren't on null_instrument_list, don't elide the jmp. * XXX i#884: if we haven't yet executed from the 2nd module, the client * won't receive the module load event yet and we might include code * from it here. It would be tricky to solve that, and it should only happen * if the client turns on elision, so we leave it. */ if ((!!os_module_get_flag(bb->cur_pc, MODULE_NULL_INSTRUMENT)) != (!!os_module_get_flag(new_pc, MODULE_NULL_INSTRUMENT))) return false; #endif if (!bb->check_vm_area) return true; /* need to check this even if an intra-page jmp b/c we allow sub-page vm regions */ if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, true /*xfer*/)) return false; if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, true /*jmp*/); bb->flags |= FRAG_HAS_DIRECT_CTI; bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } static inline void bb_process_single_step(dcontext_t *dcontext, build_bb_t *bb) { LOG(THREAD, LOG_INTERP, 2, "interp: single step exception bb at " PFX "\n", bb->instr_start); /* FIXME i#2144 : handling a rep string operation. * In this case, we should test if only one iteration is done * before the single step exception. */ instrlist_append(bb->ilist, bb->instr); instr_set_translation(bb->instr, bb->instr_start); /* Mark instruction as special exit. */ instr_branch_set_special_exit(bb->instr, true); bb->exit_type |= LINK_SPECIAL_EXIT; /* Make this bb thread-private and a trace barrier. */ bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } static inline void bb_process_invalid_instr(dcontext_t *dcontext, build_bb_t *bb) { /* invalid instr: end bb BEFORE the instr, we'll throw exception if we * reach the instr itself */ LOG(THREAD, LOG_INTERP, 2, "interp: invalid instr at " PFX "\n", bb->instr_start); /* This routine is called by more than just bb builder, also used * for recreating state, so check bb->app_interp parameter to find out * if building a real app bb to be executed */ if (bb->app_interp && bb->instr_start == bb->start_pc) { /* This is first instr in bb so it will be executed for sure and * we need to generate an invalid instruction exception. * A benefit of being first instr is that the state is easy * to translate. */ #ifdef WINDOWS /* Copying the invalid bytes and having the processor generate * the exception would be cleaner in every way except our fear * of a new processor making those bytes valid and us inadvertently * executing the unexamined instructions afterward, since we do not * know the proper amount of bytes to copy. Copying is cleaner * since Windows splits invalid instructions into different cases, * an invalid lock prefix and maybe some other distinctions * (it's all interrupt 6 to the processor), and it is hard to * duplicate Windows' behavior in our forged exception. */ /* FIXME case 10672: provide a runtime option to specify new * instruction formats to avoid this app exception */ ASSERT(dcontext->bb_build_info == bb); bb_build_abort(dcontext, true /*clean vm area*/, true /*unlock*/); /* FIXME : we use illegal instruction here, even though we * know windows uses different exception codes for different * types of invalid instructions (for ex. STATUS_INVALID_LOCK * _SEQUENCE for lock prefix on a jmp instruction) */ if (TEST(DUMPCORE_FORGE_ILLEGAL_INST, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Encountered Illegal Instruction"); os_forge_exception(bb->instr_start, ILLEGAL_INSTRUCTION_EXCEPTION); ASSERT_NOT_REACHED(); #else /* FIXME: Linux hack until we have a real os_forge_exception implementation: * copy the bytes and have the process generate the exception. * Once remove this, also disable check at top of insert_selfmod_sandbox * FIXME PR 307880: we now have a preliminary * os_forge_exception impl, but I'm leaving this hack until * we're more comfortable w/ our forging. */ uint sz; instrlist_append(bb->ilist, bb->instr); /* pretend raw bits valid to get it encoded * For now we just do 17 bytes, being wary of unreadable pages. * FIXME: better solution is to have decoder guess at length (if * ok opcode just bad lock prefix or something know length, if * bad opcode just bytes up until know it's bad). */ if (!is_readable_without_exception(bb->instr_start, MAX_INSTR_LENGTH)) { app_pc nxt_page = (app_pc)ALIGN_FORWARD(bb->instr_start, PAGE_SIZE); sz = nxt_page - bb->instr_start; } else { sz = MAX_INSTR_LENGTH; } bb->cur_pc += sz; /* just in case, should have a non-self target */ ASSERT(bb->cur_pc > bb->instr_start); /* else still a self target */ instr_set_raw_bits(bb->instr, bb->instr_start, sz); bb->invalid_instr_hack = true; #endif } else { instr_destroy(dcontext, bb->instr); bb->instr = NULL; } } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* returns true to indicate "elide and continue" and false to indicate "end bb now" * should be used both for converted indirect jumps and * FIXME: for direct jumps by bb_process_ubr */ static inline bool follow_direct_jump(dcontext_t *dcontext, build_bb_t *bb, app_pc target) { if (bb->follow_direct && !must_not_be_entered(target) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= target)) { if (check_new_page_jmp(dcontext, bb, target)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = target; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following jmp from " PFX " to " PFX "\n", bb->instr_start, target); } } else { BBPRINT(bb, 3, " NOT attempting to follow jump from " PFX " to " PFX "\n", bb->instr_start, target); } return false; /* stop bb */ } #endif /* X86 */ /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_ubr(dcontext_t *dcontext, build_bb_t *bb) { app_pc tgt = (byte *)opnd_get_pc(instr_get_target(bb->instr)); BBPRINT(bb, 4, "interp: direct jump at " PFX "\n", bb->instr_start); if (must_not_be_elided(tgt)) { #ifdef WINDOWS byte *wrapper_start; if (is_syscall_trampoline(tgt, &wrapper_start)) { /* HACK to avoid entering the syscall trampoline that is meant * only for native syscalls -- we replace the jmp with the * original app mov immed that it replaced */ BBPRINT(bb, 3, "interp: replacing syscall trampoline @" PFX " w/ orig mov @" PFX "\n", bb->instr_start, wrapper_start); instr_reset(dcontext, bb->instr); /* leave bb->cur_pc unchanged */ decode(dcontext, wrapper_start, bb->instr); /* ASSUMPTION: syscall trampoline puts hooked instruction * (usually mov_imm but can be lea if hooked_deeper) here */ ASSERT(instr_get_opcode(bb->instr) == OP_mov_imm || (instr_get_opcode(bb->instr) == OP_lea && DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER)); instrlist_append(bb->ilist, bb->instr); /* translation should point to the trampoline at the * original application address */ if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); if (instr_get_opcode(bb->instr) == OP_lea) { app_pc translation = bb->instr_start + instr_length(dcontext, bb->instr); ASSERT_CURIOSITY(instr_length(dcontext, bb->instr) == 4); /* we hooked deep need to add the int 2e instruction */ /* can't use create_syscall_instr because of case 5217 hack */ ASSERT(get_syscall_method() == SYSCALL_METHOD_INT); bb->instr = INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x2e, OPSZ_1)); if (bb->record_translation) instr_set_translation(bb->instr, translation); ASSERT(instr_is_syscall(bb->instr) && instr_get_opcode(bb->instr) == OP_int); instrlist_append(bb->ilist, bb->instr); return bb_process_syscall(dcontext, bb); } return true; /* keep bb going */ } #endif BBPRINT(bb, 3, "interp: NOT following jmp to " PFX "\n", tgt); /* add instruction to instruction list */ bb_add_native_direct_xfer(dcontext, bb, false /*!appended*/); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); return false; /* end bb now */ } else { if (bb->follow_direct && !must_not_be_entered(tgt) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= tgt)) { if (check_new_page_jmp(dcontext, bb, tgt)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = tgt; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); /* pretend never saw this ubr: delete instr, then continue */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following direct jmp from " PFX " to " PFX "\n", bb->instr_start, tgt); } } /* End this bb now */ bb->exit_target = opnd_get_pc(instr_get_target(bb->instr)); instrlist_append(bb->ilist, bb->instr); return false; /* end bb */ } return true; /* keep bb going */ } #ifdef X86 /* returns true if call is elided, * and false if not following due to hitting a limit or other reason */ static bool follow_direct_call(dcontext_t *dcontext, build_bb_t *bb, app_pc callee) { /* FIXME: This code should be reused in bb_process_convertible_indcall() * and in bb_process_call_direct() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going in callee */ } else { BBPRINT(bb, 3, " NOT following direct (or converted) call from " PFX " to " PFX "\n", bb->instr_start, callee); } } else { BBPRINT(bb, 3, " NOT attempting to follow call from " PFX " to " PFX "\n", bb->instr_start, callee); } return false; /* stop bb */ } #endif /* X86 */ static inline void bb_stop_prior_to_instr(dcontext_t *dcontext, build_bb_t *bb, bool appended) { if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; bb->cur_pc = bb->instr_start; } /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_call_direct(dcontext_t *dcontext, build_bb_t *bb) { byte *callee = (byte *)opnd_get_pc(instr_get_target(bb->instr)); #ifdef CUSTOM_TRACES_RET_REMOVAL if (callee == bb->instr_start + 5) { LOG(THREAD, LOG_INTERP, 4, "found call to next instruction\n"); } else dcontext->num_calls++; #endif STATS_INC(num_all_calls); BBPRINT(bb, 4, "interp: direct call at " PFX "\n", bb->instr_start); if (leave_call_native(callee)) { BBPRINT(bb, 3, "interp: NOT inlining or mangling call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti. * If we allow this fragment to be coarse we must kill the freeze * nudge thread! */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); bb_add_native_direct_xfer(dcontext, bb, true /*appended*/); return true; /* keep bb going, w/o inlining call */ } else { if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); return false; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* FIXME: use follow_direct_call() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } } BBPRINT(bb, 3, " NOT following direct call from " PFX " to " PFX "\n", bb->instr_start, callee); /* End this bb now */ if (instr_is_cbr(bb->instr)) { /* Treat as cbr, not call */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); } else { bb->exit_target = callee; } return false; /* end bb now */ } return true; /* keep bb going */ } #ifdef WINDOWS /* We check if the instrs call, mov, and sysenter are * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * and "sysenter". */ bool instr_is_call_sysenter_pattern(instr_t *call, instr_t *mov, instr_t *sysenter) { instr_t *instr; if (call == NULL || mov == NULL || sysenter == NULL) return false; if (instr_is_meta(call) || instr_is_meta(mov) || instr_is_meta(sysenter)) return false; if (instr_get_next(call) != mov || instr_get_next(mov) != sysenter) return false; /* check sysenter */ if (instr_get_opcode(sysenter) != OP_sysenter) return false; /* FIXME Relax the pattern matching on the "mov; call" pair so that small * changes in the register dataflow and call construct are tolerated. */ /* Did we find a "mov %xsp -> %xdx"? */ instr = mov; if (!(instr != NULL && instr_get_opcode(instr) == OP_mov_ld && instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1 && opnd_is_reg(instr_get_dst(instr, 0)) && opnd_get_reg(instr_get_dst(instr, 0)) == REG_XDX && opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XSP)) { return false; } /* Did we find a "call (%xdx) or "call %xdx" that's already marked * for ind->direct call conversion? */ instr = call; if (!(instr != NULL && TEST(INSTR_IND_CALL_DIRECT, instr->flags) && instr_is_call_indirect(instr) && /* The 2nd src operand should always be %xsp. */ opnd_is_reg(instr_get_src(instr, 1)) && opnd_get_reg(instr_get_src(instr, 1)) == REG_XSP && /* Match 'call (%xdx)' for post-SP2. */ ((opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || /* Match 'call %xdx' for pre-SP2. */ (opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XDX)))) { return false; } return true; } /* Walk up from the bb->instr and verify that the preceding instructions * match the pattern that we expect to precede a sysenter. */ static instr_t * bb_verify_sysenter_pattern(dcontext_t *dcontext, build_bb_t *bb) { /* Walk back up 2 instructions and verify that there's a * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * just prior to the sysenter. * We use "xsp" and "xdx" to be ready for x64 sysenter though we don't * expect to see it. */ instr_t *mov, *call; mov = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); if (mov == NULL) return NULL; call = instr_get_prev_expanded(dcontext, bb->ilist, mov); if (call == NULL) return NULL; if (!instr_is_call_sysenter_pattern(call, mov, bb->instr)) { BBPRINT(bb, 3, "bb_verify_sysenter_pattern -- pattern didn't match\n"); return NULL; } return call; } /* Only used for the Borland SEH exemption. */ /* FIXME - we can't really tell a push from a pop since both are typically a * mov to fs:[0], but double processing doesn't hurt. */ /* NOTE we don't see dynamic SEH frame pushes, we only see the first SEH push * per mov -> fs:[0] instruction in the app. So we don't see modified in place * handler addresses (see at_Borland_SEH_rct_exemption()) or handler addresses * that are passed into a shared routine that sets up the frame (not yet seen, * note that MS dlls that have a _SEH_prolog hardcode the handler address in * the _SEH_prolog routine, only the data is passed in). */ static void bb_process_SEH_push(dcontext_t *dcontext, build_bb_t *bb, void *value) { if (value == NULL || value == (void *)PTR_UINT_MINUS_1) { /* could be popping off the last frame (leaving -1) of the SEH stack */ STATS_INC(num_endlist_SEH_write); ASSERT_CURIOSITY(value != NULL); return; } LOG(THREAD, LOG_INTERP, 3, "App moving " PFX " to fs:[0]\n", value); # ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(borland_SEH_rct)) { /* xref case 5752, the Borland compiler SEH implementation uses a push * imm ret motif for fall through to the finally of a try finally block * (very similar to what the Microsoft NT at_SEH_rct_exception() is * doing). The layout will always look like this : * push e: (imm32) (e should be in the .E/.F table) * a: * ... * b: ret * c: jmp rel32 (c should be in the .E/.F table) * d: jmp a: (rel8/32) * ... (usually nothing) * e: * (where ret at b is targeting e, or a valid after call). The * exception dispatcher calls c (the SEH frame has c as the handler) * which jmps to the exception handler which, in turn, calls d to * execute the finally block. Fall through is as shown above. So, * we see a .E violation for the handlers call to d and a .C violation * for the fall trough case of the ret @ b targeting e. We may also * see a .E violation for a call to a as sometimes the handler computes * the target of the jmp @ d an passes that to a different exception * handler. * * For try-except we see the following layout : * I've only seen jmp ind in the case that led to needing * at_Borland_SEH_rct_exemption() to be added, not that * it makes any difference. * [ jmp z: (rel8/32) || (rarely) ret || (very rarely) jmp ind] * x: jmp rel32 (x should be in the .E/.F table) * y: * ... * call rel32 * [z: ... || ret ] * Though there may be other optimized layouts (the ret instead of the * jmp z: is one such) so we may not want to rely on anything other * then x y. The exception dispatcher calls x (the SEH frame has x as * the handler) which jmps to the exception handler which, in turn, * jmps to y to execute the except block. We see a .F violation from * the handler's jmp to y. at_Borland_SEH_rct_exemption() covers a * case where the address of x (and thus y) in an existing SEH frame * is changed in place instead of popping and pushing a new frame. * * All addresses (rel and otherwise) should be in the same module. So * we need to recognize the patter and add d:/y: to the .E/.F table * as well as a: (sometimes the handler calculates the target of d and * passes that up to a higher level routine, though I don't see the * point) and add e: to the .C table. * * It would be preferable to handle these exemptions reactively at * the violation point, but unfortunately, by the time we get to the * violation the SEH frame information has been popped off the stack * and is lost, so we have to do it pre-emptively here (pattern * matching at violation time has proven to difficult in the face of * certain compiler optimizations). See at_Borland_SEH_rct_exemption() * in callback.c, that could handle all ind branches to y and ind calls * to d (see below) at an acceptable level of security if we desired. * Handling the ret @ b to e reactively would require the ability to * recreate the exact src cti (so we can use the addr of the ret to * pattern match) at the violation point (something that can't always * currently be done, reset flushing etc.). Handling the ind call to * a (which I've never acutally seen, though I've seen the address * computed and it looks like it could likely be hit) reactively is * more tricky. Prob. the only way to handle that is to allow .E/.F * transistions to any address after a push imm32 of an address in the * same module, but that might be too permissive. FIXME - should still * revisit doing the exemptions reactively at some point, esp. once we * can reliably get the src cti. */ extern bool seen_Borland_SEH; /* set for callback.c */ /* First read in the SEH frame, this is the observed structure and * the first two fields (which are all that we use) are constrained by * ntdll exception dispatcher (see EXCEPTION_REGISTRATION decleration * in ntdll.h). */ /* FIXME - could just use EXCEPTION_REGISTRATION period since all we * need is the handler address and it would allow simpler curiosity * [see 8181] below. If, as is expected, other options make use of * this routine we'll probably have one shared get of the SEH frame * anyways. */ typedef struct _borland_seh_frame_t { EXCEPTION_REGISTRATION reg; reg_t xbp; /* not used by us */ } borland_seh_frame_t; borland_seh_frame_t frame; /* will hold [b,e] or [x-1,y] */ byte target_buf[RET_0_LENGTH + 2 * JMP_LONG_LENGTH]; app_pc handler_jmp_target = NULL; if (!d_r_safe_read(value, sizeof(frame), &frame)) { /* We already checked for NULL and -1 above so this should be * a valid SEH frame. Xref 8181, borland_seh_frame_t struct is * bigger then EXCEPTION_REGISTRATION (which is all that is * required) so verify smaller size is readable. */ ASSERT_CURIOSITY( sizeof(EXCEPTION_REGISTRATION) < sizeof(frame) && d_r_safe_read(value, sizeof(EXCEPTION_REGISTRATION), &frame)); goto post_borland; } /* frame.reg.handler is c or y, read extra prior bytes to look for b */ if (!d_r_safe_read((app_pc)frame.reg.handler - RET_0_LENGTH, sizeof(target_buf), target_buf)) { goto post_borland; } if (is_jmp_rel32(&target_buf[RET_0_LENGTH], (app_pc)frame.reg.handler, &handler_jmp_target)) { /* we have a possible match, now do the more expensive checking */ app_pc base; LOG(THREAD, LOG_INTERP, 3, "Read possible borland SEH frame @" PFX "\n\t" "next=" PFX " handler=" PFX " xbp=" PFX "\n\t", value, frame.reg.prev, frame.reg.handler, frame.xbp); DOLOG(3, LOG_INTERP, { dump_buffer_as_bytes(THREAD, target_buf, sizeof(target_buf), 0); }); /* optimize check if we've already processed this frame once */ if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED) && rct_ind_branch_target_lookup( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH)) { /* we already processed this SEH frame once, this is prob. a * frame pop, no need to continue */ STATS_INC(num_borland_SEH_dup_frame); LOG(THREAD, LOG_INTERP, 3, "Processing duplicate Borland SEH frame\n"); goto post_borland; } base = get_module_base((app_pc)frame.reg.handler); STATS_INC(num_borland_SEH_initial_match); /* Perf opt, we use the cheaper get_allocation_base() below instead * of get_module_base(). We are checking the result against a * known module base (base) so no need to duplicate the is module * check. FIXME - the checks prob. aren't even necessary given the * later is_in_code_section checks. Xref case 8171. */ /* FIXME - (perf) we could cache the region from the first * is_in_code_section() call and check against that before falling * back on is_in_code_section in case of multiple code sections. */ if (base != NULL && get_allocation_base(handler_jmp_target) == base && get_allocation_base(bb->instr_start) == base && /* FIXME - with -rct_analyze_at_load we should be able to * verify that frame->handler (x: c:) is on the .E/.F * table already. We could also try to match known pre x: * post y: patterns. */ is_in_code_section(base, bb->instr_start, NULL, NULL) && is_in_code_section(base, handler_jmp_target, NULL, NULL) && is_range_in_code_section(base, (app_pc)frame.reg.handler, (app_pc)frame.reg.handler + JMP_LONG_LENGTH + 1, NULL, NULL)) { app_pc finally_target; byte push_imm_buf[PUSH_IMM32_LENGTH]; DEBUG_DECLARE(bool ok;) /* we have a match, add handler+JMP_LONG_LENGTH (y: d:) * to .E/.F table */ STATS_INC(num_borland_SEH_try_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH frame adding " PFX " to .E/.F table\n", (app_pc)frame.reg.handler + JMP_LONG_LENGTH); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH); d_r_mutex_unlock(&rct_module_lock); } /* we set this as an enabler for another exemption in * callback .C, see notes there */ if (!seen_Borland_SEH) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); seen_Borland_SEH = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } /* case 8648: used to decide which RCT entries to persist */ DEBUG_DECLARE(ok =) os_module_set_flag(base, MODULE_HAS_BORLAND_SEH); ASSERT(ok); /* look for .C addresses for try finally */ if (target_buf[0] == RAW_OPCODE_ret && (is_jmp_rel32(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target) || is_jmp_rel8(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target)) && d_r_safe_read(finally_target - sizeof(push_imm_buf), sizeof(push_imm_buf), push_imm_buf) && push_imm_buf[0] == RAW_OPCODE_push_imm32) { app_pc push_val = *(app_pc *)&push_imm_buf[1]; /* do a few more, expensive, sanity checks */ /* FIXME - (perf) see earlier note on get_allocation_base() * and is_in_code_section() usage. */ if (get_allocation_base(finally_target) == base && is_in_code_section(base, finally_target, NULL, NULL) && get_allocation_base(push_val) == base && /* FIXME - could also check that push_val is in * .E/.F table, at least for -rct_analyze_at_load */ is_in_code_section(base, push_val, NULL, NULL)) { /* Full match, add push_val (e:) to the .C table * and finally_target (a:) to the .E/.F table */ STATS_INC(num_borland_SEH_finally_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH finally frame adding " PFX " to" " .C table and " PFX " to .E/.F table\n", push_val, finally_target); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target(dcontext, finally_target); d_r_mutex_unlock(&rct_module_lock); } if (DYNAMO_OPTION(ret_after_call)) { fragment_add_after_call(dcontext, push_val); } } else { ASSERT_CURIOSITY(false && "partial borland seh finally match"); } } } } } post_borland: # endif /* RETURN_AFTER_CALL */ return; } /* helper routine for bb_process_fs_ref * return true if bb should be continued, false if it shouldn't */ static bool bb_process_fs_ref_opnd(dcontext_t *dcontext, build_bb_t *bb, opnd_t dst, bool *is_to_fs0) { ASSERT(is_to_fs0 != NULL); *is_to_fs0 = false; if (opnd_is_far_base_disp(dst) && /* FIXME - check size? */ opnd_get_segment(dst) == SEG_FS) { /* is a write to fs:[*] */ if (bb->instr_start != bb->start_pc) { /* Not first instruction in the bb, end bb before this * instruction, so we can see it as the first instruction of a * new bb where we can use the register state. */ /* As is, always ending the bb here has a mixed effect on mem usage * with default options. We do end up with slightly more bb's * (and associated bookeeping costs), but frequently with MS dlls * we reduce code cache dupliaction from jmp/call ellision * (_SEH_[Pro,Epi]log otherwise ends up frequently duplicated for * instance). */ /* FIXME - we must stop the bb here even if there's already * a bb built for the next instruction, as we have to have * reproducible bb building for recreate app state. We should * only get here through code duplication (typically jmp/call * inlining, though can also be through multiple entry points into * the same block of non cti instructions). */ bb_stop_prior_to_instr(dcontext, bb, false /*not appended yet*/); return false; /* stop bb */ } /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { /* check is write to fs:[0] */ /* XXX: this won't identify all memory references (need to switch to * instr_compute_address_ex_priv() in order to handle VSIB) but the * current usage is just to identify the Borland pattern so that's ok. */ if (opnd_compute_address_priv(dst, get_mcontext(dcontext)) == NULL) { /* we have new mov to fs:[0] */ *is_to_fs0 = true; } } } return true; } /* While currently only used for Borland SEH exemptions, this analysis could * also be helpful for other SEH tasks (xref case 5824). */ static bool bb_process_fs_ref(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); /* If this is the first instruction of a bb for the cache we * want to fully decode it, check if it's pushing an SEH frame * and, if so, pass it to the SEH checking routines (currently * just used for the Borland SEH rct handling). If this is not * the first instruction of the bb then we want to stop the bb * just before this instruction so that when we do process this * instruction it will be the first in the bb (allowing us to * use the register state). */ if (!bb->full_decode) { instr_decode(dcontext, bb->instr); /* is possible this is an invalid instr that made it through the fast * decode, FIXME is there a better way to handle this? */ if (!instr_valid(bb->instr)) { ASSERT_NOT_TESTED(); if (bb->cur_pc == NULL) bb->cur_pc = bb->instr_start; bb_process_invalid_instr(dcontext, bb); return false; /* stop bb */ } ASSERT(instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); } /* expect to see only simple mov's to fs:[0] for new SEH frames * FIXME - might we see other types we'd want to intercept? * do we want to proccess pop instructions (usually just for removing * a frame)? */ if (instr_get_opcode(bb->instr) == OP_mov_st) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, 0); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) return false; /* end bb */ /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { if (is_to_fs0) { ptr_int_t value = 0; opnd_t src = instr_get_src(bb->instr, 0); if (opnd_is_immed_int(src)) { value = opnd_get_immed_int(src); } else if (opnd_is_reg(src)) { value = reg_get_value_priv(opnd_get_reg(src), get_mcontext(dcontext)); } else { ASSERT_NOT_REACHED(); } STATS_INC(num_SEH_pushes_processed); LOG(THREAD, LOG_INTERP, 3, "found mov to fs:[0] @ " PFX "\n", bb->instr_start); bb_process_SEH_push(dcontext, bb, (void *)value); } else { STATS_INC(num_fs_movs_not_SEH); } } } # if defined(DEBUG) && defined(INTERNAL) else if (INTERNAL_OPTION(check_for_SEH_push)) { /* Debug build Sanity check that we aren't missing SEH frame pushes */ int i; int num_dsts = instr_num_dsts(bb->instr); for (i = 0; i < num_dsts; i++) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, i); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) { STATS_INC(num_process_SEH_bb_early_terminate_debug); return false; /* end bb */ } /* common case is pop instructions to fs:[0] when popping an * SEH frame stored on tos */ if (is_to_fs0) { if (instr_get_opcode(bb->instr) == OP_pop) { LOG(THREAD, LOG_INTERP, 4, "found pop to fs:[0] @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_pop_fs0); } else { /* an unexpected SEH frame push */ LOG(THREAD, LOG_INTERP, 1, "found unexpected write to fs:[0] @" PFX "\n", bb->instr_start); DOLOG(1, LOG_INTERP, { d_r_loginst(dcontext, 1, bb->instr, ""); }); ASSERT_CURIOSITY(!is_to_fs0); } } } } # endif return true; /* continue bb */ } #endif /* win32 */ #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) /* The basic strategy for mangling mov_seg instruction is: * For mov fs/gs => reg/[mem], simply mangle it to write * the app's fs/gs selector value into dst. * For mov reg/mem => fs/gs, we make it as the first instruction * of bb, and mark that bb not linked and has mov_seg instr, * and change that instruction to be a nop. * Then whenever before entering code cache, we check if that's the bb * has mov_seg. If yes, we will update the information we maintained * about the app's fs/gs. */ /* check if the basic block building should continue on a mov_seg instr. */ static bool bb_process_mov_seg(dcontext_t *dcontext, build_bb_t *bb) { reg_id_t seg; if (!INTERNAL_OPTION(mangle_app_seg)) return true; /* continue bb */ /* if it is a read, we only need mangle the instruction. */ ASSERT(instr_num_srcs(bb->instr) == 1); if (opnd_is_reg(instr_get_src(bb->instr, 0)) && reg_is_segment(opnd_get_reg(instr_get_src(bb->instr, 0)))) return true; /* continue bb */ /* it is an update, we need set to be the first instr of bb */ ASSERT(instr_num_dsts(bb->instr) == 1); ASSERT(opnd_is_reg(instr_get_dst(bb->instr, 0))); seg = opnd_get_reg(instr_get_dst(bb->instr, 0)); ASSERT(reg_is_segment(seg)); /* we only need handle fs/gs */ if (seg != SEG_GS && seg != SEG_FS) return true; /* continue bb */ /* if no private loader, we only need mangle the non-tls seg */ if (seg == IF_X64_ELSE(SEG_FS, SEG_FS) && IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true)) return true; /* continue bb */ if (bb->instr_start == bb->start_pc) { /* the first instruction, we can continue build bb. */ /* this bb cannot be part of trace! */ bb->flags |= FRAG_CANNOT_BE_TRACE; bb->flags |= FRAG_HAS_MOV_SEG; return true; /* continue bb */ } LOG(THREAD, LOG_INTERP, 3, "ending bb before mov_seg\n"); /* Set cur_pc back to the start of this instruction and delete this * instruction from the bb ilist. */ bb->cur_pc = instr_get_raw_bits(bb->instr); instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace * breaking traces here shouldn't be a perf issue b/c this is so rare, * it should happen only once per thread on setting up tls. */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* stop bb here */ } #endif /* UNIX && X86 */ /* Returns true to indicate that ignorable syscall processing is completed * with *continue_bb indicating if the bb should be continued or not. * When returning false, continue_bb isn't pertinent. */ static bool bb_process_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum, bool *continue_bb) { STATS_INC(ignorable_syscalls); BBPRINT(bb, 3, "found ignorable system call 0x%04x\n", sysnum); #ifdef WINDOWS if (get_syscall_method() != SYSCALL_METHOD_SYSENTER) { DOCHECK(1, { if (get_syscall_method() == SYSCALL_METHOD_WOW64) ASSERT_NOT_TESTED(); }); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* Can we continue interp after the sysenter at the instruction * after the call to sysenter? */ instr_t *call = bb_verify_sysenter_pattern(dcontext, bb); if (call != NULL) { /* If we're continuing code discovery at the after-call address, * change the cur_pc to continue at the after-call addr. This is * safe since the preceding call is in the fragment and * %xsp/(%xsp) hasn't changed since the call. Obviously, we assume * that the sysenter breaks control flow in fashion such any * instruction that follows it isn't reached by DR. */ if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) { bb->cur_pc = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* End this bb now. We set the exit target so that control * skips the vsyscall 'ret' that's executed natively after the * syscall and ends up at the correct place. */ /* FIXME Assigning exit_target causes the fragment to end * with a direct exit stub to the after-call address, which * is fine. If bb->exit_target < bb->start_pc, the future * fragment for exit_target is marked as a trace head which * isn't intended. A potentially undesirable side effect * is that exit_target's fragment can't be included in * trace for start_pc. */ bb->exit_target = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = false; return true; } } STATS_INC(ignorable_syscalls_failed_sysenter_pattern); /* Pattern match failed but the syscall is ignorable so maybe we * can try shared syscall? */ /* Decrement the stat to prevent double counting. We rarely expect to hit * this case. */ STATS_DEC(ignorable_syscalls); return false; } #elif defined(MACOS) if (instr_get_opcode(bb->instr) == OP_sysenter) { /* To continue after the sysenter we need to go to the ret ibl, as user-mode * sysenter wrappers put the retaddr into edx as the post-kernel continuation. */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->ibl_branch_type = IBL_RETURN; bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "sysenter exit target = " PFX "\n", bb->exit_target); if (continue_bb != NULL) *continue_bb = false; } else if (continue_bb != NULL) *continue_bb = true; return true; #else if (continue_bb != NULL) *continue_bb = true; return true; #endif } #ifdef WINDOWS /* Process a syscall that is executed via shared syscall. */ static void bb_process_shared_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { ASSERT(DYNAMO_OPTION(shared_syscalls)); DODEBUG({ if (ignorable_system_call(sysnum, bb->instr, NULL)) STATS_INC(ignorable_syscalls); else STATS_INC(optimizable_syscalls); }); BBPRINT(bb, 3, "found %soptimizable system call 0x%04x\n", INTERNAL_OPTION(shared_eq_ignore) ? "ignorable-" : "", sysnum); LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & NOT removing the interrupt itself\n"); /* Mark the instruction as pointing to shared syscall */ bb->instr->flags |= INSTR_SHARED_SYSCALL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; /* we redirect all optimizable syscalls to a single shared piece of code. * Once a fragment reaches the shared syscall code, it can be safely * deleted, for example, if the thread is interrupted for a callback and * DR needs to delete fragments for cache management. * * Note that w/shared syscall, syscalls can be executed from TWO * places -- shared_syscall and do_syscall. */ bb->exit_target = shared_syscall_routine(dcontext); /* make sure translation for ending jmp ends up right, mangle will * remove this instruction, so set to NULL so translation does the * right thing */ bb->instr = NULL; } #endif /* WINDOWS */ #ifdef ARM /* This routine walks back to find the IT instr for the current IT block * and the position of instr in the current IT block, and returns whether * instr is the last instruction in the block. */ static bool instr_is_last_in_it_block(instr_t *instr, instr_t **it_out, uint *pos_out) { instr_t *it; int num_instrs; ASSERT(instr != NULL && instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB && instr_is_predicated(instr) && instr_is_app(instr)); /* walk backward to find the IT instruction */ for (it = instr_get_prev(instr), num_instrs = 1; /* meta and app instrs are treated identically here */ it != NULL && num_instrs <= 4 /* max 4 instr in an IT block */; it = instr_get_prev(it)) { if (instr_is_label(it)) continue; if (instr_get_opcode(it) == OP_it) break; num_instrs++; } ASSERT(it != NULL && instr_get_opcode(it) == OP_it); ASSERT(num_instrs <= instr_it_block_get_count(it)); if (it_out != NULL) *it_out = it; if (pos_out != NULL) *pos_out = num_instrs - 1; /* pos starts from 0 */ if (num_instrs == instr_it_block_get_count(it)) return true; return false; } static void adjust_it_instr_for_split(dcontext_t *dcontext, instr_t *it, uint pos) { dr_pred_type_t block_pred[IT_BLOCK_MAX_INSTRS]; uint i, block_count = instr_it_block_get_count(it); byte firstcond[2], mask[2]; DEBUG_DECLARE(bool ok;) ASSERT(pos < instr_it_block_get_count(it) - 1); for (i = 0; i < block_count; i++) block_pred[i] = instr_it_block_get_pred(it, i); DOCHECK(CHKLVL_ASSERTS, { instr_t *instr; for (instr = instr_get_next_app(it), i = 0; instr != NULL; instr = instr_get_next_app(instr)) { ASSERT(instr_is_predicated(instr) && i <= pos); ASSERT(block_pred[i++] == instr_get_predicate(instr)); } }); DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[0], (pos > 0) ? block_pred[1] : DR_PRED_NONE, (pos > 1) ? block_pred[2] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[0], &mask[0]); ASSERT(ok); DOCHECK(CHKLVL_ASSERTS, { DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[pos + 1], (block_count > pos + 2) ? block_pred[pos + 2] : DR_PRED_NONE, (block_count > pos + 3) ? block_pred[pos + 3] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[1], &mask[1]); ASSERT(ok); }); /* firstcond should be unchanged */ ASSERT(opnd_get_immed_int(instr_get_src(it, 0)) == firstcond[0]); instr_set_src(it, 1, OPND_CREATE_INT(mask[0])); LOG(THREAD, LOG_INTERP, 3, "ending bb in an IT block & adjusting the IT instruction\n"); /* FIXME i#1669: NYI on passing split it block info to next bb */ ASSERT_NOT_IMPLEMENTED(false); } #endif /* ARM */ static bool bb_process_non_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { BBPRINT(bb, 3, "found non-ignorable system call 0x%04x\n", sysnum); STATS_INC(non_ignorable_syscalls); bb->exit_type |= LINK_NI_SYSCALL; /* destroy the interrupt instruction */ LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & removing the interrupt itself\n"); /* Indicate that this is a non-ignorable syscall so mangle will remove */ /* FIXME i#1551: maybe we should union int80 and svc as both are inline syscall? */ #ifdef UNIX if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { # if defined(MACOS) && defined(X86) int num = instr_get_interrupt_number(bb->instr); if (num == 0x81 || num == 0x82) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->instr->flags |= INSTR_BRANCH_SPECIAL_EXIT; } else { ASSERT(num == 0x80); # endif /* MACOS && X86 */ bb->exit_type |= LINK_NI_SYSCALL_INT; bb->instr->flags |= INSTR_NI_SYSCALL_INT; # ifdef MACOS } # endif } else #endif bb->instr->flags |= INSTR_NI_SYSCALL; #ifdef ARM /* we assume all conditional syscalls are treated as non-ignorable */ if (instr_is_predicated(bb->instr)) { instr_t *it; uint pos; ASSERT(instr_is_syscall(bb->instr)); bb->svc_pred = instr_get_predicate(bb->instr); if (instr_get_isa_mode(bb->instr) == DR_ISA_ARM_THUMB && !instr_is_last_in_it_block(bb->instr, &it, &pos)) { /* FIXME i#1669: we violate the transparency and clients will see * modified IT instr. We should adjust the IT instr at mangling * stage after client instrumentation, but that is complex. */ adjust_it_instr_for_split(dcontext, it, pos); } } #endif /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* end bb now */ } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb) { int sysnum; #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. */ if (bb->pass_to_client && !bb->post_client) return false; #endif #ifdef DGC_DIAGNOSTICS if (TEST(FRAG_DYNGEN, bb->flags) && !is_dyngen_vsyscall(bb->instr_start)) { LOG(THREAD, LOG_INTERP, 1, "WARNING: syscall @ " PFX " in dyngen code!\n", bb->instr_start); } #endif BBPRINT(bb, 4, "interp: syscall @ " PFX "\n", bb->instr_start); check_syscall_method(dcontext, bb->instr); bb->flags |= FRAG_HAS_SYSCALL; /* if we can identify syscall number and it is an ignorable syscall, * we let bb keep going, else we end bb and flag it */ sysnum = find_syscall_num(dcontext, bb->ilist, bb->instr); #ifdef VMX86_SERVER DOSTATS({ if (instr_get_opcode(bb->instr) == OP_int && instr_get_interrupt_number(bb->instr) == VMKUW_SYSCALL_GATEWAY) { STATS_INC(vmkuw_syscall_sites); LOG(THREAD, LOG_SYSCALLS, 2, "vmkuw system call site: #=%d\n", sysnum); } }); #endif BBPRINT(bb, 3, "syscall # is %d\n", sysnum); #ifdef CLIENT_INTERFACE if (sysnum != -1 && instrument_filter_syscall(dcontext, sysnum)) { BBPRINT(bb, 3, "client asking to intercept => pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif #ifdef ARM if (sysnum != -1 && instr_is_predicated(bb->instr)) { BBPRINT(bb, 3, "conditional system calls cannot be inlined => " "pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif if (sysnum != -1 && DYNAMO_OPTION(ignore_syscalls) && ignorable_system_call(sysnum, bb->instr, NULL) #ifdef X86 /* PR 288101: On Linux we do not yet support inlined sysenter instrs as we * do not have in-cache support for the post-sysenter continuation: we rely * for now on very simple sysenter handling where d_r_dispatch uses asynch_target * to know where to go next. */ IF_LINUX(&&instr_get_opcode(bb->instr) != OP_sysenter) #endif /* X86 */ ) { bool continue_bb; if (bb_process_ignorable_syscall(dcontext, bb, sysnum, &continue_bb)) { if (!DYNAMO_OPTION(inline_ignored_syscalls)) continue_bb = false; return continue_bb; } } #ifdef WINDOWS if (sysnum != -1 && DYNAMO_OPTION(shared_syscalls) && optimizable_system_call(sysnum)) { bb_process_shared_syscall(dcontext, bb, sysnum); return false; } #endif /* Fall thru and handle as a non-ignorable syscall. */ return bb_process_non_ignorable_syscall(dcontext, bb, sysnum); } /* Case 3922: for wow64 we treat "call *fs:0xc0" as a system call. * Only sets continue_bb if it returns true. */ static bool bb_process_indcall_syscall(dcontext_t *dcontext, build_bb_t *bb, bool *continue_bb) { ASSERT(continue_bb != NULL); #ifdef WINDOWS if (instr_is_wow64_syscall(bb->instr)) { /* we could check the preceding instrs but we don't bother */ *continue_bb = bb_process_syscall(dcontext, bb); return true; } #endif return false; } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_interrupt(dcontext_t *dcontext, build_bb_t *bb) { #if defined(DEBUG) || defined(INTERNAL) || defined(WINDOWS) int num = instr_get_interrupt_number(bb->instr); #endif #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. * PR 550752: we cannot end at int 0x2d: we live w/ client consequences */ if (bb->pass_to_client && !bb->post_client IF_WINDOWS(&&num != 0x2d)) return false; #endif BBPRINT(bb, 3, "int 0x%x @ " PFX "\n", num, bb->instr_start); #ifdef WINDOWS if (num == 0x2b) { /* interrupt 0x2B signals return from callback */ /* end block here and come back to dynamo to perform interrupt */ bb->exit_type |= LINK_CALLBACK_RETURN; BBPRINT(bb, 3, "ending bb at cb ret & removing the interrupt itself\n"); /* Set instr to NULL in order to get translation of exit cti * correct. mangle will destroy the instruction */ bb->instr = NULL; bb->flags |= FRAG_MUST_END_TRACE; STATS_INC(num_int2b); return false; } else { SYSLOG_INTERNAL_INFO_ONCE("non-syscall, non-int2b 0x%x @ " PFX " from " PFX, num, bb->instr_start, bb->start_pc); } #endif /* WINDOWS */ return true; } /* If the current instr in the BB is an indirect call that can be converted into a * direct call, process it and return true, else, return false. * FIXME PR 288327: put in linux call* to vsyscall page */ static bool bb_process_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ instr_t *instr; opnd_t src0; instr_t *call_instr; int call_src_reg; app_pc callee; bool vsyscall = false; /* Check if this BB can be extended and the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) return false; /* Check if we have a "mov <imm> -> %reg; call %reg" or a * "mov <imm> -> %reg; call (%reg)" pair. First check for the call. */ /* The 'if' conditions are broken up to make the code more readable * while #ifdef-ing the WINDOWS case. It's still ugly though. */ instr = bb->instr; if (!( # ifdef WINDOWS /* Match 'call (%xdx)' for a post-SP2 indirect call to sysenter. */ (opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || # endif /* Match 'call %reg'. */ opnd_is_reg(instr_get_src(instr, 0)))) return false; /* If there's no CTI in the BB, we can check if there are 5+ preceding * bytes and if they could hold a "mov" instruction. */ if (!TEST(FRAG_HAS_DIRECT_CTI, bb->flags) && bb->instr_start - 5 >= bb->start_pc) { byte opcode = *((byte *)bb->instr_start - 5); /* Check the opcode. Do we see a "mov ... -> %reg"? Valid opcodes are in * the 0xb8-0xbf range (Intel IA-32 ISA ref, v.2) and specify the * destination register, i.e., 0xb8 means that %xax is the destination. */ if (opcode < 0xb8 || opcode > 0xbf) return false; } /* Check the previous instruction -- is it really a "mov"? */ src0 = instr_get_src(instr, 0); call_instr = instr; instr = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); call_src_reg = opnd_is_near_base_disp(src0) ? opnd_get_base(src0) : opnd_get_reg(src0); if (instr == NULL || instr_get_opcode(instr) != OP_mov_imm || opnd_get_reg(instr_get_dst(instr, 0)) != call_src_reg) return false; /* For the general case, we don't try to optimize a call * thru memory -- just check that the call uses a register. */ callee = NULL; if (opnd_is_reg(src0)) { /* Extract the target address. */ callee = (app_pc)opnd_get_immed_int(instr_get_src(instr, 0)); # ifdef WINDOWS # ifdef PROGRAM_SHEPHERDING /* FIXME - is checking for on vsyscall page better or is checking == to * VSYSCALL_BOOTSTRAP_ADDR? Both are hacky. */ if (is_dyngen_vsyscall((app_pc)opnd_get_immed_int(instr_get_src(instr, 0)))) { LOG(THREAD, LOG_INTERP, 4, "Pre-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); vsyscall = true; ASSERT(opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR); ASSERT(!use_ki_syscall_routines()); /* double check our determination */ } else # endif # endif STATS_INC(num_convertible_indcalls); } # ifdef WINDOWS /* Match the "call (%xdx)" to sysenter case for SP2-patched os's. Memory at * address VSYSCALL_BOOTSTRAP_ADDR (0x7ffe0300) holds the address of * KiFastSystemCall or (FIXME - not handled) on older platforms KiIntSystemCall. * FIXME It's unsavory to hard-code 0x7ffe0300, but the constant has little * context in an SP2 os. It's a hold-over from pre-SP2. */ else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && call_src_reg == REG_XDX && opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR) { /* Extract the target address. We expect that the memory read using the * value in the immediate field is ok as it's the vsyscall page * which 1) cannot be made unreadable and 2) cannot be made writable so * the stored value will not change. Of course, it's possible that the * os could change the page contents. */ callee = (app_pc) * ((ptr_uint_t *)opnd_get_immed_int(instr_get_src(instr, 0))); if (get_app_sysenter_addr() == NULL) { /* For the first call* we've yet to decode an app syscall, yet we * cannot have later recreations have differing behavior, so we must * handle that case (even though it doesn't matter performance-wise * as the first call* is usually in runtime init code that's * executed once). So we do a raw byte compare to: * ntdll!KiFastSystemCall: * 7c82ed50 8bd4 mov xdx,xsp * 7c82ed52 0f34 sysenter */ uint raw; if (!d_r_safe_read(callee, sizeof(raw), &raw) || raw != 0x340fd48b) callee = NULL; } else { /* The callee should be a 2 byte "mov %xsp -> %xdx" followed by the * sysenter -- check the sysenter's address as 2 bytes past the callee. */ if (callee + 2 != get_app_sysenter_addr()) callee = NULL; } vsyscall = (callee != NULL); ASSERT(use_ki_syscall_routines()); /* double check our determination */ DODEBUG({ if (callee == NULL) ASSERT_CURIOSITY(false && "call* to vsyscall unexpected mismatch"); else { LOG(THREAD, LOG_INTERP, 4, "Post-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); } }); } # endif /* Check if register dataflow matched and we were able to extract * the callee address. */ if (callee == NULL) return false; if (vsyscall) { /* Case 8917: abandon coarse-grainness in favor of performance */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_indcall); } LOG(THREAD, LOG_INTERP, 4, "interp: possible convertible" " indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); if (leave_call_native(callee) || must_not_be_entered(callee)) { BBPRINT(bb, 3, " NOT inlining indirect call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); ASSERT_CURIOSITY_ONCE(!vsyscall && "leaving call* to vsyscall"); /* no need for bb_add_native_direct_xfer() b/c it's already indirect */ return true; /* keep bb going, w/o inlining call */ } if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { /* FIXME This is identical to the code for evaluating a * direct call's callee. If such code appears in another * (3rd) place, we should outline it. * FIXME: use follow_direct_call() */ if (vsyscall) { /* As a flag to allow our xfer from now-non-coarse to coarse * (for vsyscall-in-ntdll) we pre-emptively mark as has-syscall. */ ASSERT(!TEST(FRAG_HAS_SYSCALL, bb->flags)); bb->flags |= FRAG_HAS_SYSCALL; } if (check_new_page_jmp(dcontext, bb, callee)) { if (vsyscall) /* Restore */ bb->flags &= ~FRAG_HAS_SYSCALL; bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; /* FIXME: when using follow_direct_call don't forget to set this */ call_instr->flags |= INSTR_IND_CALL_DIRECT; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } if (vsyscall) { /* Case 8917: Restore, just in case, though we certainly expect to have * this flag set as soon as we decode a few more instrs and hit the * syscall itself -- but for pre-sp2 we currently could be elsewhere on * the same page, so let's be safe here. */ bb->flags &= ~FRAG_HAS_SYSCALL; } } /* FIXME: we're also not converting to a direct call - was this intended? */ BBPRINT(bb, 3, " NOT following indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); DODEBUG({ if (vsyscall) { DO_ONCE({ /* Case 9095: don't complain so loudly if user asked for no elision */ if (DYNAMO_OPTION(max_elide_call) <= 2) SYSLOG_INTERNAL_WARNING("leaving call* to vsyscall"); else ASSERT_CURIOSITY(false && "leaving call* to vsyscall"); }); } }); ; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86 */ return false; /* stop bb */ } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* if we make the IAT sections unreadable we will need to map to proper location */ static inline app_pc read_from_IAT(app_pc iat_reference) { /* FIXME: we should have looked up where the real IAT should be at * the time of checking whether is_in_IAT */ return *(app_pc *)iat_reference; } /* returns whether target is an IAT of a module that we convert. Note * users still have to check the referred to value to verify targeting * a native module. */ static bool is_targeting_convertible_IAT(dcontext_t *dcontext, instr_t *instr, app_pc *iat_reference /* OUT */) { /* FIXME: we could give up on optimizing a particular module, * if too many writes to its IAT are found, * even 1 may be too much to handle! */ /* We only allow constant address, * any registers used for effective address calculation * can not be guaranteed to be constant dynamically. */ /* FIXME: yet a 'call %reg' if that value is an export would be a * good sign that we should go backwards and look for a possible * mov IAT[func] -> %reg and then optimize that as well - case 1948 */ app_pc memory_reference = NULL; opnd_t opnd = instr_get_target(instr); LOG(THREAD, LOG_INTERP, 4, "is_targeting_convertible_IAT: "); /* A typical example of a proper call * ff 15 8810807c call dword ptr [kernel32+0x1088 (7c801088)] * where * [7c801088] = 7c90f04c ntdll!RtlAnsiStringToUnicodeString * * The ModR/M byte for a displacement only with no SIB should be * 15 for CALL, 25 for JMP, (no far versions for IAT) */ if (opnd_is_near_base_disp(opnd)) { /* FIXME PR 253930: pattern-match x64 IAT calls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); memory_reference = (app_pc)(ptr_uint_t)opnd_get_disp(opnd); /* now should check all other fields */ if (opnd_get_base(opnd) != REG_NULL || opnd_get_index(opnd) != REG_NULL) { /* this is not a pure memory reference, can't be IAT */ return false; } ASSERT(opnd_get_scale(opnd) == 0); } else { return false; } LOG(THREAD, LOG_INTERP, 3, "is_targeting_convertible_IAT: memory_reference " PFX "\n", memory_reference); /* FIXME: if we'd need some more additional structures those can * be looked up in a separate hashtable based on the IAT base, or * we'd have to extend the vmareas with custom fields */ ASSERT(DYNAMO_OPTION(IAT_convert)); if (vmvector_overlap(IAT_areas, memory_reference, memory_reference + 1)) { /* IAT has to be in the same module as current instruction, * but even in the unlikely reference by address from another * module there is really no problem, so not worth checking */ ASSERT_CURIOSITY(get_module_base(instr->bytes) == get_module_base(memory_reference)); /* FIXME: now that we know it is in IAT/GOT, * we have to READ the contents and return that * safely to the caller so they can convert accordingly */ /* FIXME: we would want to add the IAT section to the vmareas * of a region that has a converted block. Then on a write to * IAT we can flush efficiently only blocks affected by a * particular module, for a first hack though flushing * everything on a hooker will do. */ *iat_reference = memory_reference; return true; } else { /* plain global function * e.g. ntdll!RtlUnicodeStringToAnsiString+0x4c: * ff15c009917c call dword ptr [ntdll!RtlAllocateStringRoutine (7c9109c0)] */ return false; } } #endif /* X86 */ /* If the current instr in the BB is an indirect call through IAT that * can be converted into a direct call, process it and return true, * else, return false. */ static bool bb_process_IAT_convertible_indjmp(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* Check if the instr is a (near) indirect jump */ if (instr_get_opcode(bb->instr) != OP_jmp_ind) { ASSERT_CURIOSITY(false && "far ind jump"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { DOSTATS({ if (EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* see how often we mark as likely a PLT a JMP which in * fact is not going through IAT */ STATS_INC(num_indirect_jumps_PLT_not_IAT); LOG(THREAD, LOG_INTERP, 3, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT instr=" PFX "\n", bb->instr->bytes); } }); return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_jumps_IAT); DOSTATS({ if (!EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* count any other known uses for an indirect jump to go * through the IAT other than PLT uses, although a block * reaching max_elide_call would prevent the above * match */ STATS_INC(num_indirect_jumps_IAT_not_PLT); /* FIXME: case 6459 for further inquiry */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT target=" PFX "\n", target); } }); if (must_not_be_elided(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect jmp to must_not_be_elided " PFX "\n", target); return false; /* do not convert indirect jump, will stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ /* IAT_elide should definitely not touch native_exec modules. * * FIXME: we also prevent IAT_convert from optimizing imports in * native_exec_list DLLs, although we could let that convert to a * direct jump and require native_exec_dircalls to be always on to * intercept those jmps. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect jump to native exec module " PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_native); return false; /* do not convert indirect jump, stop bb */ } /* mangle mostly as such as direct jumps would be mangled in * bb_process_ubr(dcontext, bb) but note bb->instr has already * been appended so has to reverse some of its actions */ /* pretend never saw an indirect JMP, we'll either add a new direct JMP or we'll just continue in target */ instrlist_remove(bb->ilist, bb->instr); /* bb->instr has been appended already */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct jmp would have been elided */ /* We could have used follow_direct_call instead since * commonly this really is a disguised CALL*. Yet for PLT use * of the form of CALL PLT[foo]; JMP* IAT[foo] we would have * already counted the CALL. If we have tail call elimination * that converts a CALL* into a JMP* it is also OK to treat as * a JMP instead of a CALL just as if sharing tails. */ if (follow_direct_jump(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: eliding jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct jump without eliding */ /* we set bb->instr to NULL so unlike bb_process_ubr * we get the final exit_target added by build_bb_ilist * FIXME: case 85: which will work only when we're using bb->mangle_ilist * FIXME: what are callers supposed to see when we do NOT mangle? */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: converting jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_converted); /* end basic block with a direct JMP to target */ bb->exit_target = target; *elide_continue = false; /* matching, but should stop bb */ return true; /* matching */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Returns true if the current instr in the BB is an indirect call * through IAT that can be converted into a direct call, process it * and sets elide_continue. Otherwise function return false. * OUT elide_continue is set when bb building should continue in target, * and not set when bb building should be stopped. */ static bool bb_process_IAT_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* FIXME: the code structure is the same as * bb_process_IAT_convertible_indjmp, could fuse the two */ /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ /* Check if the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) { ASSERT_CURIOSITY(false && "far call"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_calls_IAT); /* mangle mostly as such as direct calls are mangled with * bb_process_call_direct(dcontext, bb) */ if (leave_call_native(target) || must_not_be_entered(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect call to leave_call_native " PFX "\n", target); return false; /* do not convert indirect call, stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect call to native exec module " PFX "\n", target); STATS_INC(num_indirect_calls_IAT_native); return false; /* do not convert indirect call, stop bb */ } /* mangle_indirect_call and calculate return address as of * bb->instr and will remove bb->instr * FIXME: it would have been * better to replace in instrlist with a direct call and have * mangle_{in,}direct_call use other than the raw bytes, but this for now does the * job. */ bb->instr->flags |= INSTR_IND_CALL_DIRECT; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct call would have been elided */ if (follow_direct_call(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: eliding call* flags=0x%08x " "target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct call without eliding */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: converting call* flags=0x%08x target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_converted); /* bb->instr has been appended already, and will get removed by * mangle_indirect_call. We don't need to set to NULL, since this * instr is a CTI and the final jump's translation target should * still be the original indirect call. */ bb->exit_target = target; /* end basic block with a direct CALL to target. With default * options it should get mangled to a PUSH; JMP */ *elide_continue = false; /* matching, but should stop bb */ return true; /* converted indirect to direct */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Called on instructions that save the FPU state */ static void bb_process_float_pc(dcontext_t *dcontext, build_bb_t *bb) { /* i#698: for instructions that save the floating-point state * (e.g., fxsave), we go back to d_r_dispatch to translate the fp pc. * We rule out being in a trace (and thus a potential alternative * would be to use a FRAG_ flag). These are rare instructions so that * shouldn't have a significant perf impact: except we've been hitting * libm code that uses fnstenv and is not rare, so we have non-inlined * translation under an option for now. */ if (DYNAMO_OPTION(translate_fpu_pc)) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->flags |= FRAG_CANNOT_BE_TRACE; } /* If we inline the pc update, we can't persist. Simplest to keep fine-grained. */ bb->flags &= ~FRAG_COARSE_GRAIN; } static bool instr_will_be_exit_cti(instr_t *inst) { /* can't use instr_is_exit_cti() on pre-mangled instrs */ return (instr_is_app(inst) && instr_is_cti(inst) && (!instr_is_near_call_direct(inst) || !leave_call_native(instr_get_branch_target_pc(inst))) /* PR 239470: ignore wow64 syscall, which is an ind call */ IF_WINDOWS(&&!instr_is_wow64_syscall(inst))); } #ifdef CLIENT_INTERFACE /* PR 215217: check syscall restrictions */ static bool client_check_syscall(instrlist_t *ilist, instr_t *inst, bool *found_syscall, bool *found_int) { int op_int = IF_X86_ELSE(OP_int, OP_svc); /* We do consider the wow64 call* a syscall here (it is both * a syscall and a call*: PR 240258). */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == op_int) { if (instr_is_syscall(inst) && found_syscall != NULL) *found_syscall = true; /* Xref PR 313869 - we should be ignoring int 3 here. */ if (instr_get_opcode(inst) == op_int && found_int != NULL) *found_int = true; /* For linux an ignorable syscall is not a problem. Our * pre-syscall-exit jmp is added post client mangling so should * be robust. * FIXME: now that we have -no_inline_ignored_syscalls should * we assert on ignorable also? Probably we'd have to have * an exception for the middle of a trace? */ if (IF_UNIX(TEST(INSTR_NI_SYSCALL, inst->flags)) /* PR 243391: only block-ending interrupt 2b matters */ IF_WINDOWS(instr_is_syscall(inst) || ((instr_get_opcode(inst) == OP_int && instr_get_interrupt_number(inst) == 0x2b)))) { /* This check means we shouldn't hit the exit_type flags * check below but we leave it in place in case we add * other flags in future */ if (inst != instrlist_last(ilist)) { CLIENT_ASSERT(false, "a syscall or interrupt must terminate the block"); return false; } /* should we forcibly delete the subsequent instrs? * or the client has to deal w/ bad behavior in release build? */ } } return true; } /* Pass bb to client, and afterward check for criteria we require and rescan for * eflags and other flags that might have changed. * Returns true normally; returns false to indicate "go native". */ static bool client_process_bb(dcontext_t *dcontext, build_bb_t *bb) { dr_emit_flags_t emitflags = DR_EMIT_DEFAULT; instr_t *inst; bool found_exit_cti = false; bool found_syscall = false; bool found_int = false; # ifdef ANNOTATIONS app_pc trailing_annotation_pc = NULL, instrumentation_pc = NULL; bool found_instrumentation_pc = false; instr_t *annotation_label = NULL; # endif instr_t *last_app_instr = NULL; /* This routine is called by more than just bb builder, also used * for recreating state, so only call if caller requested it * (usually that coincides w/ bb->app_interp being set, but not * when recreating state on a fault (PR 214962)). * FIXME: hot patches shouldn't be injected during state recreations; * does predicating on bb->app_interp take care of this issue? */ if (!bb->pass_to_client) return true; /* i#995: DR may build a bb with one invalid instruction, which won't be * passed to cliennt. * FIXME: i#1000, we should present the bb to the client. * i#1000-c#1: the bb->ilist could be empty. */ if (instrlist_first(bb->ilist) == NULL) return true; if (!instr_opcode_valid(instrlist_first(bb->ilist)) && /* For -fast_client_decode we can have level 0 instrs so check * to ensure this is a single-instr bb that was built just to * raise the fault for us. * XXX i#1000: shouldn't we pass this to the client? It might not handle an * invalid instr properly though. */ instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { return true; } /* Call the bb creation callback(s) */ if (!instrument_basic_block(dcontext, /* DrMem#1735: pass app pc, not selfmod copy pc */ (bb->pretend_pc == NULL ? bb->start_pc : bb->pretend_pc), bb->ilist, bb->for_trace, !bb->app_interp, &emitflags)) { /* although no callback was called we must process syscalls/ints (PR 307284) */ } if (bb->for_cache && TEST(DR_EMIT_GO_NATIVE, emitflags)) { LOG(THREAD, LOG_INTERP, 2, "client requested that we go native\n"); SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native at client request", d_r_get_thread_id()); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); return false; } bb->post_client = true; /* FIXME: instrumentor may totally mess us up -- our flags * or syscall info might be wrong. xref PR 215217 */ /* PR 215217, PR 240265: * We need to check for client changes that require a new exit * target. We can't practically analyze the instrlist to decipher * the exit, so we'll search backwards and require that the last * cti is the exit cti. Typically, the last instruction in the * block should be the exit. Post-mbr and post-syscall positions * are particularly fragile, as our mangling code sets state up for * the exit that could be messed up by instrs inserted after the * mbr/syscall. We thus disallow such instrs (except for * dr_insert_mbr_instrumentation()). xref cases 10503, 10782, 10784 * * Here's what we support: * - more than one exit cti; all but the last must be a ubr * - an exit cbr or call must be the final instr in the block * - only one mbr; must be the final instr in the block and the exit target * - clients can't change the exit of blocks ending in a syscall * (or int), and the syscall must be the final instr in the block; * client can, however, remove the syscall and then add a different exit * - client can't add a translation target that's outside of the original * source code bounds, or else our cache consistency breaks down * (the one exception to this is that a jump can translate to its target) */ /* we set to NULL to have a default of fall-through */ bb->exit_target = NULL; bb->exit_type = 0; /* N.B.: we're walking backward */ for (inst = instrlist_last(bb->ilist); inst != NULL; inst = instr_get_prev(inst)) { if (!instr_opcode_valid(inst)) continue; # ifdef X86 if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_may_write_zmm_register(inst)) d_r_set_avx512_code_in_use(true); } } # endif if (instr_is_cti(inst) && inst != instrlist_last(bb->ilist)) { /* PR 213005: coarse_units can't handle added ctis (meta or not) * since decode_fragment(), used for state recreation, can't * distinguish from exit cti. * i#665: we now support intra-fragment meta ctis * to make persistence usable for clients */ if (!opnd_is_instr(instr_get_target(inst)) || instr_is_app(inst)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (instr_is_meta(inst)) { # ifdef ANNOTATIONS /* Save the trailing_annotation_pc in case a client truncated the bb there. */ if (is_annotation_label(inst) && last_app_instr == NULL) { dr_instr_label_data_t *label_data = instr_get_label_data_area(inst); trailing_annotation_pc = GET_ANNOTATION_APP_PC(label_data); instrumentation_pc = GET_ANNOTATION_INSTRUMENTATION_PC(label_data); annotation_label = inst; } # endif continue; } # ifdef ANNOTATIONS if (instrumentation_pc != NULL && !found_instrumentation_pc && instr_get_translation(inst) == instrumentation_pc) found_instrumentation_pc = true; # endif /* in case bb was truncated, find last non-meta fall-through */ if (last_app_instr == NULL) last_app_instr = inst; /* PR 215217: client should not add new source code regions, else our * cache consistency (both page prot and selfmod) will fail */ ASSERT(!bb->for_cache || bb->vmlist != NULL); /* For selfmod recreation we don't check vmareas so we don't have vmlist. * We live w/o the checks there. */ CLIENT_ASSERT( !bb->for_cache || vm_list_overlaps(dcontext, bb->vmlist, instr_get_translation(inst), instr_get_translation(inst) + 1) || (instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && instr_get_translation(inst) == opnd_get_pc(instr_get_target(inst))) /* the displaced code and jmp return from intercept buffer * has translation fields set to hooked app routine */ IF_WINDOWS(|| dr_fragment_app_pc(bb->start_pc) != bb->start_pc), "block's app sources (instr_set_translation() targets) " "must remain within original bounds"); # ifdef AARCH64 if (instr_get_opcode(inst) == OP_isb) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "OP_isb must be last instruction in block"); } # endif /* PR 307284: we didn't process syscalls and ints pre-client * so do so now to get bb->flags and bb->exit_type set */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == IF_X86_ELSE(OP_int, OP_svc)) { instr_t *tmp = bb->instr; bb->instr = inst; if (instr_is_syscall(bb->instr)) bb_process_syscall(dcontext, bb); else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ bb_process_interrupt(dcontext, bb); } if (inst != instrlist_last(bb->ilist)) bb->instr = tmp; } /* ensure syscall/int2b terminates block */ client_check_syscall(bb->ilist, inst, &found_syscall, &found_int); if (instr_will_be_exit_cti(inst)) { if (!found_exit_cti) { /* We're about to clobber the exit_type and could lose any * special flags set above, even if the client doesn't change * the exit target. We undo such flags after this ilist walk * to support client removal of syscalls/ints. * EXIT_IS_IND_JMP_PLT() is used for -IAT_{convert,elide}, which * is off by default for CI; it's also used for native_exec, * but we're not sure if we want to support that with CI. * xref case 10846 and i#198 */ CLIENT_ASSERT( !TEST(~(LINK_DIRECT | LINK_INDIRECT | LINK_CALL | LINK_RETURN | LINK_JMP | LINK_NI_SYSCALL_ALL | LINK_SPECIAL_EXIT IF_WINDOWS(| LINK_CALLBACK_RETURN)), bb->exit_type) && !EXIT_IS_IND_JMP_PLT(bb->exit_type), "client unsupported block exit type internal error"); found_exit_cti = true; bb->instr = inst; if ((instr_is_near_ubr(inst) || instr_is_near_call_direct(inst)) /* conditional OP_bl needs the cbr code below */ IF_ARM(&&!instr_is_cbr(inst))) { CLIENT_ASSERT(instr_is_near_ubr(inst) || inst == instrlist_last(bb->ilist) || /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0, "an exit call must terminate the block"); /* a ubr need not be the final instr */ if (inst == last_app_instr) { bb->exit_target = instr_get_branch_target_pc(inst); bb->exit_type = instr_branch_type(inst); } } else if (instr_is_mbr(inst) || instr_is_far_cti(inst) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(inst) == OP_blx)) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit mbr or far cti must terminate the block"); bb->exit_type = instr_branch_type(inst); # ifdef ARM if (instr_get_opcode(inst) == OP_blx) bb->ibl_branch_type = IBL_INDCALL; else # endif bb->ibl_branch_type = get_ibl_branch_type(inst); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); } else { ASSERT(instr_is_cbr(inst)); CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit cbr must terminate the block"); /* A null exit target specifies a cbr (see below). */ bb->exit_target = NULL; bb->exit_type = 0; instr_exit_branch_set_type(bb->instr, instr_branch_type(inst)); } /* since we're walking backward, at the first exit cti * we can check for post-cti code */ if (inst != instrlist_last(bb->ilist)) { if (TEST(FRAG_COARSE_GRAIN, bb->flags)) { /* PR 213005: coarse can't handle code beyond ctis */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } /* decode_fragment can't handle code beyond ctis */ if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; } } /* Case 10784: Clients can confound trace building when they * introduce more than one exit cti; we'll just disable traces * for these fragments. * PR 215179: we're currently later marking them no-trace for pad_jmps * reasons as well. */ else { CLIENT_ASSERT(instr_is_near_ubr(inst) || (instr_is_near_call_direct(inst) && /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0), "a second exit cti must be a ubr"); if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; /* our cti check above should have already turned off coarse */ ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags)); } } } /* To handle the client modifying syscall numbers we cannot inline * syscalls in the middle of a bb. */ ASSERT(!DYNAMO_OPTION(inline_ignored_syscalls)); ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && found_syscall) || (!TEST(FRAG_HAS_SYSCALL, bb->flags) && !found_syscall)); IF_WINDOWS(ASSERT(!TEST(LINK_CALLBACK_RETURN, bb->exit_type) || found_int)); /* Note that we do NOT remove, or set, FRAG_HAS_DIRECT_CTI based on * client modifications: setting it for a selfmod fragment could * result in an infinite loop, and it is mainly used for elision, which we * are not doing for client ctis. Clients are not supposed add new * app source regions (PR 215217). */ /* Client might have truncated: re-set fall-through, accounting for annotations. */ if (last_app_instr != NULL) { bool adjusted_cur_pc = false; app_pc xl8 = instr_get_translation(last_app_instr); # ifdef ANNOTATIONS if (annotation_label != NULL) { if (found_instrumentation_pc) { /* i#1613: if the last app instruction precedes an annotation, extend the * translation footprint of `bb` to include the annotation (such that * the next bb starts after the annotation, avoiding duplication). */ bb->cur_pc = trailing_annotation_pc; adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends immediately prior to an annotation. " "Setting `bb->cur_pc` (for fall-through) to " PFX " so that the " "annotation will be included.\n", bb->cur_pc); } else { /* i#1613: the client removed the app instruction prior to an annotation. * We infer that the client wants to skip the annotation. Remove it now. */ instr_t *annotation_next = instr_get_next(annotation_label); instrlist_remove(bb->ilist, annotation_label); instr_destroy(dcontext, annotation_label); if (is_annotation_return_placeholder(annotation_next)) { instrlist_remove(bb->ilist, annotation_next); instr_destroy(dcontext, annotation_next); } } } # endif # if defined(WINDOWS) && !defined(STANDALONE_DECODER) /* i#1632: if the last app instruction was taken from an intercept because it was * occluded by the corresponding hook, `bb->cur_pc` should point to the original * app pc (where that instruction was copied from). Cannot use `decode_next_pc()` * on the original app pc because it is now in the middle of the hook. */ if (!adjusted_cur_pc && could_be_hook_occluded_pc(xl8)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( xl8, true /* occlusions only */, false /* exclude start */); if (intercept_pc != NULL) { app_pc next_intercept_pc = decode_next_pc(dcontext, intercept_pc); bb->cur_pc = xl8 + (next_intercept_pc - intercept_pc); adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends in the middle of an intercept. " "Offsetting `bb->cur_pc` (for fall-through) to " PFX " in parallel " "to intercept instr at " PFX "\n", intercept_pc, bb->cur_pc); } } # endif /* We do not take instr_length of what the client put in, but rather * the length of the translation target */ if (!adjusted_cur_pc) { bb->cur_pc = decode_next_pc(dcontext, xl8); LOG(THREAD, LOG_INTERP, 3, "setting cur_pc (for fall-through) to " PFX "\n", bb->cur_pc); } /* don't set bb->instr if last instr is still syscall/int. * FIXME: I'm not 100% convinced the logic here covers everything * build_bb_ilist does. * FIXME: what about if last instr was invalid, or if client adds * some invalid instrs: xref bb_process_invalid_instr() */ if (bb->instr != NULL || (!found_int && !found_syscall)) bb->instr = last_app_instr; } else bb->instr = NULL; /* no app instrs left */ /* PR 215217: re-scan for accurate eflags. * FIXME: should we not do eflags tracking while decoding, then, and always * do it afterward? */ /* for -fast_client_decode, we don't support the client changing the app code */ if (!INTERNAL_OPTION(fast_client_decode)) { bb->eflags = forward_eflags_analysis(dcontext, bb->ilist, instrlist_first(bb->ilist)); } if (TEST(DR_EMIT_STORE_TRANSLATIONS, emitflags)) { /* PR 214962: let client request storage instead of recreation */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; /* if we didn't have record on from start, can't store translation info */ CLIENT_ASSERT(!INTERNAL_OPTION(fast_client_decode), "-fast_client_decode not compatible with " "DR_EMIT_STORE_TRANSLATIONS"); ASSERT(bb->record_translation && bb->full_decode); } if (DYNAMO_OPTION(coarse_enable_freeze)) { /* If we're not persisting, ignore the presence or absence of the flag * so we avoid undoing savings from -opt_memory with a tool that * doesn't support persistence. */ if (!TEST(DR_EMIT_PERSISTABLE, emitflags)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (TEST(DR_EMIT_MUST_END_TRACE, emitflags)) { /* i#848: let client terminate traces */ bb->flags |= FRAG_MUST_END_TRACE; } return true; } #endif /* CLIENT_INTERFACE */ #ifdef DR_APP_EXPORTS static void mangle_pre_client(dcontext_t *dcontext, build_bb_t *bb) { if (bb->start_pc == (app_pc)dr_app_running_under_dynamorio) { /* i#1237: set return value to be true in dr_app_running_under_dynamorio */ instr_t *ret = instrlist_last(bb->ilist); instr_t *mov = instr_get_prev(ret); LOG(THREAD, LOG_INTERP, 3, "Found dr_app_running_under_dynamorio\n"); ASSERT(ret != NULL && instr_is_return(ret) && mov != NULL && IF_X86(instr_get_opcode(mov) == OP_mov_imm &&) IF_ARM(instr_get_opcode(mov) == OP_mov && OPND_IS_IMMED_INT(instr_get_src(mov, 0)) &&) IF_AARCH64(instr_get_opcode(mov) == OP_movz &&)( bb->start_pc == instr_get_raw_bits(mov) || /* the translation field might be NULL */ bb->start_pc == instr_get_translation(mov))); /* i#1998: ensure the instr is Level 3+ */ instr_decode(dcontext, mov); instr_set_src(mov, 0, OPND_CREATE_INT32(1)); } } #endif /* DR_APP_EXPORTS */ /* This routine is called from build_bb_ilist when the number of instructions reaches or * exceeds max_bb_instr. It checks if bb is safe to stop after instruction stop_after. * On ARM, we do not stop bb building in the middle of an IT block unless there is a * conditional syscall. */ static bool bb_safe_to_stop(dcontext_t *dcontext, instrlist_t *ilist, instr_t *stop_after) { #ifdef ARM ASSERT(ilist != NULL && instrlist_last(ilist) != NULL); /* only thumb mode could have IT blocks */ if (dr_get_isa_mode(dcontext) != DR_ISA_ARM_THUMB) return true; if (stop_after == NULL) stop_after = instrlist_last_app(ilist); if (instr_get_opcode(stop_after) == OP_it) return false; if (!instr_is_predicated(stop_after)) return true; if (instr_is_cti(stop_after) /* must be the last instr if in IT block */ || /* we do not stop in the middle of an IT block unless it is a syscall */ instr_is_syscall(stop_after) || instr_is_interrupt(stop_after)) return true; return instr_is_last_in_it_block(stop_after, NULL, NULL); #endif /* ARM */ return true; } /* Interprets the application's instructions until the end of a basic * block is found, and prepares the resulting instrlist for creation of * a fragment, but does not create the fragment, just returns the instrlist. * Caller is responsible for freeing the list and its instrs! * * Input parameters in bb control aspects of creation: * If app_interp is true, this is considered real app code. * If pass_to_client is true, * calls instrument routine on bb->ilist before mangling * If mangle_ilist is true, mangles the ilist, else leaves it in app form * If record_vmlist is true, updates the vmareas data structures * If for_cache is true, bb building lock is assumed to be held. * record_vmlist should also be true. * Caller must set and later clear dcontext->bb_build_info. * For !for_cache, build_bb_ilist() sets and clears it, making the * assumption that the caller is doing no other reading from the region. * If record_translation is true, records translation for inserted instrs * If outf != NULL, does full disassembly with comments to outf * If overlap_info != NULL, records overlap information for the block in * the overlap_info (caller must fill in region_start and region_end). * * FIXME: now that we have better control over following direct ctis, * should we have adaptive mechanism to decided whether to follow direct * ctis, since some bmarks are better doing so (gap, vortex, wupwise) * and others are worse (apsi, perlbmk)? */ static void build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { /* Design decision: we will not try to identify branches that target * instructions in this basic block, when we take those branches we will * just make a new basic block and duplicate part of this one */ int total_branches = 0; uint total_instrs = 0; /* maximum number of instructions for current basic block */ uint cur_max_bb_instrs = DYNAMO_OPTION(max_bb_instrs); uint total_writes = 0; /* only used for selfmod */ instr_t *non_cti; /* used if !full_decode */ byte *non_cti_start_pc; /* used if !full_decode */ uint eflags_6 = 0; /* holds arith eflags written so far (in read slots) */ #ifdef HOT_PATCHING_INTERFACE bool hotp_should_inject = false, hotp_injected = false; #endif app_pc page_start_pc = (app_pc)NULL; bool bb_build_nested = false; /* Caller will free objects allocated here so we must use the passed-in * dcontext for allocation; we need separate var for non-global dcontext. */ dcontext_t *my_dcontext = get_thread_private_dcontext(); DEBUG_DECLARE(bool regenerated = false;) bool stop_bb_on_fallthrough = false; ASSERT(bb->initialized); /* note that it's ok for bb->start_pc to be NULL as our check_new_page_start * will catch it */ /* vmlist must start out empty (or N/A) */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); ASSERT(!bb->for_cache || bb->record_vmlist); /* for_cache assumes record_vmlist */ #ifdef CUSTOM_TRACES_RET_REMOVAL my_dcontext->num_calls = 0; my_dcontext->num_rets = 0; #endif /* Support bb abort on decode fault */ if (my_dcontext != NULL) { if (bb->for_cache) { /* Caller should have set! */ ASSERT(bb == (build_bb_t *)my_dcontext->bb_build_info); } else if (my_dcontext->bb_build_info == NULL) { my_dcontext->bb_build_info = (void *)bb; } else { /* For nested we leave the original, which should be the only vmlist, * and we give up on freeing dangling instr_t and instrlist_t from this * decode. * We need the original's for_cache so we know to free the bb_building_lock. * FIXME: use TRY to handle decode exceptions locally? Shouldn't have * violation remediations on a !for_cache build. */ ASSERT(bb->vmlist == NULL && !bb->for_cache && ((build_bb_t *)my_dcontext->bb_build_info)->for_cache); /* FIXME: add nested as a field so we can have stat on nested faults */ bb_build_nested = true; } } else ASSERT(dynamo_exited); if ((bb->record_translation IF_CLIENT_INTERFACE( &&!INTERNAL_OPTION(fast_client_decode))) || !bb->for_cache /* to split riprel, need to decode every instr */ /* in x86_to_x64, need to translate every x86 instr */ IF_X64(|| DYNAMO_OPTION(coarse_split_riprel) || DYNAMO_OPTION(x86_to_x64)) IF_CLIENT_INTERFACE(|| INTERNAL_OPTION(full_decode))) bb->full_decode = true; else { #if defined(STEAL_REGISTER) || defined(CHECK_RETURNS_SSE2) bb->full_decode = true; #endif } LOG(THREAD, LOG_INTERP, 3, "\ninterp%s: ", IF_X86_64_ELSE(X64_MODE_DC(dcontext) ? "" : " (x86 mode)", "")); BBPRINT(bb, 3, "start_pc = " PFX "\n", bb->start_pc); DOSTATS({ if (bb->app_interp) { if (fragment_lookup_deleted(dcontext, bb->start_pc)) { /* this will look up private 1st, so yes we will get * dup stats if multiple threads have regnerated the * same private tag, or if a shared tag is deleted and * multiple privates created */ regenerated = true; STATS_INC(num_fragments_deja_vu); } } }); /* start converting instructions into IR */ if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); #if defined(WINDOWS) && !defined(STANDALONE_DECODER) && defined(CLIENT_INTERFACE) /* i#1632: if `bb->start_pc` points into the middle of a DR intercept hook, change * it so instructions are taken from the intercept instead (note that * `instr_set_translation` will hide this adjustment from the client). N.B.: this * must follow `check_new_page_start()` (above) or `bb.vmlist` will be wrong. */ if (could_be_hook_occluded_pc(bb->start_pc)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( bb->start_pc, true /* occlusions only */, true /* exclude start pc */); if (intercept_pc != NULL) { LOG(THREAD, LOG_INTERP, 3, "Changing start_pc from hook-occluded app pc " PFX " to intercept pc " PFX "\n", bb->start_pc, intercept_pc); bb->start_pc = intercept_pc; } } #endif bb->cur_pc = bb->start_pc; /* for translation in case we break out of loop before decoding any * instructions, (i.e. check_for_stopping_point()) */ bb->instr_start = bb->cur_pc; /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory -- though we now properly clean up and won't leak * on unreadable on any check_thread_vm_area call */ bb->ilist = instrlist_create(dcontext); bb->instr = NULL; /* avoid discrepancy in finding invalid instructions between fast decode * and the full decode of sandboxing by doing full decode up front */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { bb->full_decode = true; bb->follow_direct = false; } if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) { bb->full_decode = true; bb->record_translation = true; } if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->start_pc) { /* Decodes only one instruction because of single step exception. */ cur_max_bb_instrs = 1; } KSTART(bb_decoding); while (true) { if (check_for_stopping_point(dcontext, bb)) { BBPRINT(bb, 3, "interp: found DynamoRIO stopping point at " PFX "\n", bb->cur_pc); break; } /* fill in a new instr structure and update bb->cur_pc */ bb->instr = instr_create(dcontext); /* if !full_decode: * All we need to decode are control-transfer instructions * For efficiency, put all non-cti into a single instr_t structure */ non_cti_start_pc = bb->cur_pc; do { /* If the thread's vmareas aren't being added to, indicate the * page that's being decoded. */ if (!bb->record_vmlist && page_start_pc != (app_pc)PAGE_START(bb->cur_pc)) { page_start_pc = (app_pc)PAGE_START(bb->cur_pc); set_thread_decode_page_start(my_dcontext == NULL ? dcontext : my_dcontext, page_start_pc); } bb->instr_start = bb->cur_pc; if (bb->full_decode) { /* only going through this do loop once! */ bb->cur_pc = IF_AARCH64_ELSE(decode_with_ldstex, decode)(dcontext, bb->cur_pc, bb->instr); if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); } else { /* must reset, may go through loop multiple times */ instr_reset(dcontext, bb->instr); bb->cur_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, bb->cur_pc, bb->instr); #if defined(ANNOTATIONS) && !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_encoded_valgrind_annotation_tail(bb->instr_start)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { /* Valgrind annotation needs full decode; clean up and repeat. */ KSTOP(bb_decoding); instr_destroy(dcontext, bb->instr); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->full_decode = true; build_bb_ilist(dcontext, bb); return; } } #endif } ASSERT(!bb->check_vm_area || bb->checked_end != NULL); if (bb->check_vm_area && bb->cur_pc != NULL && bb->cur_pc - 1 >= bb->checked_end) { /* We're beyond the vmarea allowed -- so check again. * Ideally we'd want to check BEFORE we decode from the * subsequent page, as it could be inaccessible, but not worth * the time estimating the size from a variable number of bytes * before the page boundary. Instead we rely on other * mechanisms to handle faults while decoding, which we need * anyway to handle racy unmaps by the app. */ uint old_flags = bb->flags; DEBUG_DECLARE(bool is_first_instr = (bb->instr_start == bb->start_pc)); if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { /* i#989: Stop bb building before falling through to an * incompatible vmarea. */ ASSERT(!is_first_instr); bb->cur_pc = NULL; stop_bb_on_fallthrough = true; break; } if (!TEST(FRAG_SELFMOD_SANDBOXED, old_flags) && TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { /* Restart the decode loop with full_decode and * !follow_direct, which are needed for sandboxing. This * can't happen more than once because sandboxing is now on. */ ASSERT(is_first_instr); bb->full_decode = true; bb->follow_direct = false; bb->cur_pc = bb->instr_start; instr_reset(dcontext, bb->instr); continue; } } total_instrs++; DOELOG(3, LOG_INTERP, { disassemble_with_bytes(dcontext, bb->instr_start, THREAD); }); #if defined(INTERNAL) || defined(CLIENT_INTERFACE) if (bb->outf != INVALID_FILE) disassemble_with_bytes(dcontext, bb->instr_start, bb->outf); #endif /* INTERNAL || CLIENT_INTERFACE */ if (!instr_valid(bb->instr)) break; /* before eflags analysis! */ #ifdef X86 /* If the next instruction at bb->cur_pc fires a debug register, * then we should stop this basic block before getting to it. */ if (my_dcontext != NULL && debug_register_fire_on_addr(bb->instr_start)) { stop_bb_on_fallthrough = true; break; } if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_get_prefix_flag(bb->instr, PREFIX_EVEX)) { /* For AVX-512 detection in bb builder, we're checking only for * the prefix flag, which for example can be set by decode_cti. In * client_process_bb, post-client instructions are checked with * instr_may_write_zmm_register. */ d_r_set_avx512_code_in_use(true); } } } #endif /* Eflags analysis: * We do this even if -unsafe_ignore_eflags_prefix b/c it doesn't cost that * much and we can use the analysis to detect any bb that reads a flag * prior to writing it. */ if (bb->eflags != EFLAGS_WRITE_ARITH IF_X86(&&bb->eflags != EFLAGS_READ_OF)) bb->eflags = eflags_analysis(bb->instr, bb->eflags, &eflags_6); /* stop decoding at an invalid instr (tested above) or a cti *(== opcode valid) or a possible SEH frame push (if * -process_SEH_push). */ #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { STATS_INC(num_bb_build_fs); break; } #endif #ifdef X64 if (instr_has_rel_addr_reference(bb->instr)) { /* PR 215397: we need to split these out for re-relativization */ break; } #endif #if defined(UNIX) && defined(X86) if (INTERNAL_OPTION(mangle_app_seg) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS | PREFIX_SEG_GS)) { /* These segment prefix flags are not persistent and are * only used as hints just after decoding. * They are not accurate later and can be misleading. * This can only be used right after decoding for quick check, * and a walk of operands should be performed to look for * actual far mem refs. */ /* i#107, mangle reference with segment register */ /* we up-decode the instr when !full_decode to make sure it will * pass the instr_opcode_valid check in mangle and be mangled. */ instr_get_opcode(bb->instr); break; } #endif /* i#107, opcode mov_seg will be set in decode_cti, * so instr_opcode_valid(bb->instr) is true, and terminates the loop. */ } while (!instr_opcode_valid(bb->instr) && total_instrs <= cur_max_bb_instrs); if (bb->cur_pc == NULL) { /* invalid instr or vmarea change: reset bb->cur_pc, will end bb * after updating stats */ bb->cur_pc = bb->instr_start; } /* We need the translation when mangling calls and jecxz/loop*. * May as well set it for all cti's since there's * really no extra overhead in doing so. Note that we go * through the above loop only once for cti's, so it's safe * to set the translation here. */ if (instr_opcode_valid(bb->instr) && (instr_is_cti(bb->instr) || bb->record_translation)) instr_set_translation(bb->instr, bb->instr_start); #ifdef HOT_PATCHING_INTERFACE /* If this lookup succeeds then the current bb needs to be patched. * In hotp_inject(), address lookup will be done for each instruction * pc in this bb and patching will be done if an exact match is found. * * Hot patching should be done only for app interp and recreating * pc, not for reproducing app code. Hence we use mangle_ilist. * See case 5981. * * FIXME: this lookup can further be reduced by determining whether or * not the current bb's module needs patching via check_new_page* */ if (DYNAMO_OPTION(hot_patching) && bb->mangle_ilist && !hotp_should_inject) { /* case 8780: we may hold the lock; FIXME: figure out if this can * be avoided - messy to hold hotp_vul_table lock like this for * unnecessary operations. */ bool owns_hotp_lock = self_owns_write_lock(hotp_get_lock()); if (hotp_does_region_need_patch(non_cti_start_pc, bb->cur_pc, owns_hotp_lock)) { BBPRINT(bb, 2, "hotpatch match in " PFX ": " PFX "-" PFX "\n", bb->start_pc, non_cti_start_pc, bb->cur_pc); hotp_should_inject = true; /* Don't elide if we are going to hot patch this bb because * the patch point can be a direct cti; eliding would result * in the patch not being applied. See case 5901. * FIXME: we could make this more efficient by only turning * off follow_direct if the instr is direct cti. */ bb->follow_direct = false; DOSTATS({ if TEST(FRAG_HAS_DIRECT_CTI, bb->flags) STATS_INC(hotp_num_frag_direct_cti); }); } } #endif if (bb->full_decode) { if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && instr_valid(bb->instr) && instr_writes_memory(bb->instr)) { /* to allow tailing non-writes, end prior to the write beyond the max */ total_writes++; if (total_writes > DYNAMO_OPTION(selfmod_max_writes)) { BBPRINT(bb, 3, "reached selfmod write limit %d, stopping\n", DYNAMO_OPTION(selfmod_max_writes)); STATS_INC(num_max_selfmod_writes_enforced); bb_stop_prior_to_instr(dcontext, bb, false /*not added to bb->ilist*/); break; } } } else if (bb->instr_start != non_cti_start_pc) { /* instr now holds the cti, so create an instr_t for the non-cti */ non_cti = instr_create(dcontext); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(bb->instr_start - non_cti_start_pc))); instr_set_raw_bits(non_cti, non_cti_start_pc, (uint)(bb->instr_start - non_cti_start_pc)); if (bb->record_translation) instr_set_translation(non_cti, non_cti_start_pc); /* add non-cti instructions to instruction list */ instrlist_append(bb->ilist, non_cti); } DOSTATS({ /* This routine is also called for recreating state, we only want * to count app code when we build new bbs, which is indicated by * the bb->app_interp parameter */ if (bb->app_interp && !regenerated) { /* avoid double-counting for adaptive working set */ /* FIXME - ubr ellision leads to double couting. We also * double count when we have multiple entry points into the * same block of cti free instructinos. */ STATS_ADD(app_code_seen, (bb->cur_pc - non_cti_start_pc)); LOG(THREAD, LOG_INTERP, 5, "adding %d bytes to total app code seen\n", bb->cur_pc - non_cti_start_pc); } }); if (!instr_valid(bb->instr)) { bb_process_invalid_instr(dcontext, bb); break; } if (stop_bb_on_fallthrough) { bb_stop_prior_to_instr(dcontext, bb, false /*not appended*/); break; } #ifdef ANNOTATIONS # if !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_decoded_valgrind_annotation_tail(bb->instr)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { instrument_valgrind_annotation(dcontext, bb->ilist, bb->instr, bb->instr_start, bb->cur_pc, total_instrs); continue; } } else /* Top-level annotation recognition is unambiguous (xchg vs. jmp). */ # endif if (is_annotation_jump_over_dead_code(bb->instr)) { instr_t *substitution = NULL; if (instrument_annotation( dcontext, &bb->cur_pc, &substitution _IF_WINDOWS_X64(bb->cur_pc < bb->checked_end))) { instr_destroy(dcontext, bb->instr); if (substitution == NULL) continue; /* ignore annotation if no handlers are registered */ else bb->instr = substitution; } } #endif #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { DEBUG_DECLARE(ssize_t dbl_count = bb->cur_pc - bb->instr_start); if (!bb_process_fs_ref(dcontext, bb)) { DOSTATS({ if (bb->app_interp) { LOG(THREAD, LOG_INTERP, 3, "stopping bb at fs-using instr @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_bb_early_terminate); /* don't double count the fs instruction itself * since we removed it from this bb */ if (!regenerated) STATS_ADD(app_code_seen, -dbl_count); } }); break; } } #else # ifdef X86 if (instr_get_prefix_flag(bb->instr, (SEG_TLS == SEG_GS) ? PREFIX_SEG_GS : PREFIX_SEG_FS) /* __errno_location is interpreted when global, though it's hidden in TOT */ IF_UNIX(&&!is_in_dynamo_dll(bb->instr_start)) && /* i#107 allows DR/APP using the same segment register. */ !INTERNAL_OPTION(mangle_app_seg)) { /* On linux we use a segment register and do not yet * support the application using the same register! */ CLIENT_ASSERT(false, "no support yet for application using non-NPTL segment"); ASSERT_BUG_NUM(205276, false); } # endif /* X86 */ #endif /* WINDOWS */ if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { bb_process_single_step(dcontext, bb); /* Stops basic block right now. */ break; } /* far direct is treated as indirect (i#823) */ if (instr_is_near_ubr(bb->instr)) { if (bb_process_ubr(dcontext, bb)) continue; else { if (bb->instr != NULL) /* else, bb_process_ubr() set exit_type */ bb->exit_type |= instr_branch_type(bb->instr); break; } } else instrlist_append(bb->ilist, bb->instr); #ifdef RETURN_AFTER_CALL if (bb->app_interp && dynamo_options.ret_after_call) { if (instr_is_call(bb->instr)) { /* add after call instruction to valid return targets */ add_return_target(dcontext, bb->instr_start, bb->instr); } } #endif /* RETURN_AFTER_CALL */ #ifdef X64 /* must be prior to mbr check since mbr location could be rip-rel */ if (DYNAMO_OPTION(coarse_split_riprel) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags) && instr_has_rel_addr_reference(bb->instr)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have ref be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-instr fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_riprel); } } #endif if (instr_is_near_call_direct(bb->instr)) { if (!bb_process_call_direct(dcontext, bb)) { if (bb->instr != NULL) bb->exit_type |= instr_branch_type(bb->instr); break; } } else if (instr_is_mbr(bb->instr) /* including indirect calls */ IF_X86( /* far direct is treated as indirect (i#823) */ || instr_get_opcode(bb->instr) == OP_jmp_far || instr_get_opcode(bb->instr) == OP_call_far) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(bb->instr) == OP_blx)) { /* Manage the case where we don't need to perform 'normal' * indirect branch processing. */ bool normal_indirect_processing = true; bool elide_and_continue_if_converted = true; if (instr_is_return(bb->instr)) { bb->ibl_branch_type = IBL_RETURN; STATS_INC(num_returns); } else if (instr_is_call_indirect(bb->instr)) { STATS_INC(num_all_calls); STATS_INC(num_indirect_calls); if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* If the indirect call can be converted into a direct one, * bypass normal indirect call processing. * First, check for a call* that we treat as a syscall. */ if (bb_process_indcall_syscall(dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else if (DYNAMO_OPTION(indcall2direct) && bb_process_convertible_indcall(dcontext, bb)) { normal_indirect_processing = false; elide_and_continue_if_converted = true; } else if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indcall( dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else bb->ibl_branch_type = IBL_INDCALL; #ifdef X86 } else if (instr_get_opcode(bb->instr) == OP_jmp_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDJMP; } else if (instr_get_opcode(bb->instr) == OP_call_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDCALL; #elif defined(ARM) } else if (instr_get_opcode(bb->instr) == OP_blx) { /* mode-changing direct call is treated as indirect */ bb->ibl_branch_type = IBL_INDCALL; #endif /* X86 */ } else { /* indirect jump */ /* was prev instr a direct call? if so, this is a PLT-style ind call */ instr_t *prev = instr_get_prev(bb->instr); if (prev != NULL && instr_opcode_valid(prev) && instr_is_call_direct(prev)) { bb->exit_type |= INSTR_IND_JMP_PLT_EXIT; /* just because we have a CALL to JMP* makes it only a _likely_ PLT call, we still have to make sure it goes through IAT - see case 4269 */ STATS_INC(num_indirect_jumps_likely_PLT); } elide_and_continue_if_converted = true; if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indjmp(dcontext, bb, &elide_and_continue_if_converted)) { /* Clear the IND_JMP_PLT_EXIT flag since we've converted * the PLT to a direct transition (and possibly elided). * Xref case 7867 for why leaving this flag in the eliding * case can cause later failures. */ bb->exit_type &= ~INSTR_CALL_EXIT; /* leave just JMP */ normal_indirect_processing = false; } else /* FIXME: this can always be set */ bb->ibl_branch_type = IBL_INDJMP; STATS_INC(num_indirect_jumps); } #ifdef CUSTOM_TRACES_RET_REMOVAL if (instr_is_return(bb->instr)) my_dcontext->num_rets++; else if (instr_is_call_indirect(bb->instr)) my_dcontext->num_calls++; #endif /* set exit type since this instruction will get mangled */ if (normal_indirect_processing) { bb->exit_type |= instr_branch_type(bb->instr); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "mbr exit target = " PFX "\n", bb->exit_target); break; } else { /* decide whether to stop bb here */ if (!elide_and_continue_if_converted) break; /* fall through for -max_bb_instrs check */ } } else if (instr_is_cti(bb->instr) && (!instr_is_call(bb->instr) || instr_is_cbr(bb->instr))) { total_branches++; if (total_branches >= BRANCH_LIMIT) { /* set type of 1st exit cti for cbr (bb->exit_type is for fall-through) */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); break; } } else if (instr_is_syscall(bb->instr)) { if (!bb_process_syscall(dcontext, bb)) break; } /* end syscall */ else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ if (!bb_process_interrupt(dcontext, bb)) break; } #ifdef AARCH64 /* OP_isb, when mangled, has a potential side exit. */ else if (instr_get_opcode(bb->instr) == OP_isb) break; #endif #if 0 /*i#1313, i#1314*/ else if (instr_get_opcode(bb->instr) == OP_getsec) { /* XXX i#1313: if we support CPL0 in the future we'll need to * dynamically handle the leaf functions here, which can change eip * and other state. We'll need OP_getsec in decode_cti(). */ } else if (instr_get_opcode(bb->instr) == OP_xend || instr_get_opcode(bb->instr) == OP_xabort) { /* XXX i#1314: support OP_xend failing and setting eip to the * fallback pc recorded by OP_xbegin. We'll need both in decode_cti(). */ } #endif #ifdef CHECK_RETURNS_SSE2 /* There are SSE and SSE2 instrs that operate on MMX instead of XMM, but * we perform a simple coarse-grain check here. */ else if (instr_is_sse_or_sse2(bb->instr)) { FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_XMM_USED, 2, get_application_name(), get_application_pid()); } #endif #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) else if (instr_get_opcode(bb->instr) == OP_mov_seg) { if (!bb_process_mov_seg(dcontext, bb)) break; } #endif else if (instr_saves_float_pc(bb->instr)) { bb_process_float_pc(dcontext, bb); break; } if (bb->cur_pc == bb->stop_pc) { /* We only check stop_pc for full_decode, so not in inner loop. */ BBPRINT(bb, 3, "reached end pc " PFX ", stopping\n", bb->stop_pc); break; } if (total_instrs > DYNAMO_OPTION(max_bb_instrs)) { /* this could be an enormous basic block, or it could * be some degenerate infinite-loop case like a call * to a function that calls exit() and then calls itself, * so just end it here, we'll pick up where we left off * if it's legit */ BBPRINT(bb, 3, "reached -max_bb_instrs(%d): %d, ", DYNAMO_OPTION(max_bb_instrs), total_instrs); if (bb_safe_to_stop(dcontext, bb->ilist, NULL)) { BBPRINT(bb, 3, "stopping\n"); STATS_INC(num_max_bb_instrs_enforced); break; } else { /* XXX i#1669: cannot stop bb now, what's the best way to handle? * We can either roll-back and find previous safe stop point, or * simply extend the bb with a few more instructions. * We can always lower the -max_bb_instrs to offset the additional * instructions. In contrast, roll-back seems complex and * potentially problematic. */ BBPRINT(bb, 3, "cannot stop, continuing\n"); } } } /* end of while (true) */ KSTOP(bb_decoding); #ifdef DEBUG_MEMORY /* make sure anyone who destroyed also set to NULL */ ASSERT(bb->instr == NULL || (bb->instr->bytes != (byte *)HEAP_UNALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_ALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_PAD_PTR_UINT)); #endif if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { ASSERT(false && "Should have checked cur_pc-1 in decode loop"); } bb->end_pc = bb->cur_pc; BBPRINT(bb, 3, "end_pc = " PFX "\n\n", bb->end_pc); /* We could put this in check_new_page_jmp where it already checks * for native_exec overlap, but selfmod ubrs don't even call that routine */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_callcall) && !vmvector_empty(native_exec_areas) && bb->app_interp && bb->instr != NULL && (instr_is_near_ubr(bb->instr) || instr_is_near_call_direct(bb->instr)) && instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { /* Case 4564/3558: handle .NET COM method table where a call* targets * a call to a native_exec dll -- we need to put the gateway at the * call* to avoid retaddr mangling of the method table call. * As a side effect we can also handle call*, jmp. * We don't actually verify or care that it was specifically a call*, * whatever at_native_exec_gateway() requires to assure itself that we're * at a return-address-clobberable point. */ app_pc tgt = opnd_get_pc(instr_get_target(bb->instr)); if (is_native_pc(tgt) && at_native_exec_gateway(dcontext, tgt, &bb->native_call _IF_DEBUG(true /*xfer tgt*/))) { /* replace this ilist w/ a native exec one */ LOG(THREAD, LOG_INTERP, 2, "direct xfer @gateway @" PFX " to native_exec module " PFX "\n", bb->start_pc, tgt); bb->native_exec = true; /* add this ubr/call to the native_exec_list, both as an optimization * for future entrances and b/c .NET changes its method table call * from targeting a native_exec image to instead target DGC directly, * thwarting our gateway! * FIXME: if heap region de-allocated, we'll remove, but what if re-used * w/o going through syscalls? Just written over w/ something else? * We'll keep it on native_exec_list... */ ASSERT(bb->end_pc == bb->start_pc + DIRECT_XFER_LENGTH); vmvector_add(native_exec_areas, bb->start_pc, bb->end_pc, NULL); DODEBUG({ report_native_module(dcontext, tgt); }); STATS_INC(num_native_module_entrances_callcall); return; } } #ifdef UNIX /* XXX: i#1247: After a call to a native module throught plt, DR * loses control of the app b/c of _dl_runtime_resolve */ int ret_imm; if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_opt) && bb->app_interp && bb->instr != NULL && instr_is_return(bb->instr) && at_dl_runtime_resolve_ret(dcontext, bb->start_pc, &ret_imm)) { dr_insert_clean_call(dcontext, bb->ilist, bb->instr, (void *)native_module_at_runtime_resolve_ret, false, 2, opnd_create_reg(REG_XSP), OPND_CREATE_INT32(ret_imm)); } #endif STATS_TRACK_MAX(max_instrs_in_a_bb, total_instrs); #ifdef UNIX if (bb->invalid_instr_hack) { /* turn off selfmod -- we assume bb will hit exception right away */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; /* decode_fragment() can't handle invalid instrs, so store translations */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; } #endif if (stop_bb_on_fallthrough && TEST(FRAG_HAS_DIRECT_CTI, bb->flags)) { /* If we followed a direct cti to an instruction straddling a vmarea * boundary, we can't actually do the elision. See the * sandbox_last_byte() test case in security-common/sandbox.c. Restart * bb building without follow_direct. Alternatively, we could check the * vmareas of the targeted instruction before performing elision. */ /* FIXME: a better assert is needed because this can trigger if * hot patching turns off follow_direct, the current bb was elided * earlier and is marked as selfmod. hotp_num_frag_direct_cti will * track this for now. */ ASSERT(bb->follow_direct); /* else, infinite loop possible */ BBPRINT(bb, 2, "*** must rebuild bb to avoid following direct cti to " "incompatible vmarea\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } /* Remove FRAG_HAS_DIRECT_CTI, since we're turning off follow_direct. * Try to keep the known flags. We stopped the bb before merging in any * incompatible flags. */ bb->flags &= ~FRAG_HAS_DIRECT_CTI; bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ build_bb_ilist(dcontext, bb); return; } if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { ASSERT(bb->full_decode); ASSERT(!bb->follow_direct); ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); } #ifdef HOT_PATCHING_INTERFACE /* CAUTION: This can't be moved below client interface as the basic block * can be changed by the client. This will mess up hot patching. * The same is true for mangling. */ if (hotp_should_inject) { ASSERT(DYNAMO_OPTION(hot_patching)); hotp_injected = hotp_inject(dcontext, bb->ilist); /* Fix for 5272. Hot patch injection uses dr clean call api which * accesses dcontext fields directly, so the injected bbs can't be * shared until that is changed or the clean call mechanism is replaced * with bb termination to execute hot patchces. * Case 9995 assumes that hotp fragments are fine-grained, which we * achieve today by being private; if we make shared we must explicitly * prevent from being coarse-grained. */ if (hotp_injected) { bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } } #endif /* Until we're more confident in our decoder/encoder consistency this is * at the default debug build -checklevel 2. */ IF_ARM(DOCHECK(2, check_encode_decode_consistency(dcontext, bb->ilist);)); #ifdef DR_APP_EXPORTS /* changes by DR that are visible to clients */ mangle_pre_client(dcontext, bb); #endif /* DR_APP_EXPORTS */ #ifdef DEBUG /* This is a special debugging feature */ if (bb->for_cache && INTERNAL_OPTION(go_native_at_bb_count) > 0 && debug_bb_count++ >= INTERNAL_OPTION(go_native_at_bb_count)) { SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native @%d bbs to " PFX, d_r_get_thread_id(), debug_bb_count - 1, bb->start_pc); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); /* i#1582: required for now on ARM */ IF_UNIX(os_swap_context_go_native(dcontext, DR_STATE_GO_NATIVE)); /* i#1921: for now we do not support re-attach, so remove handlers */ os_process_not_under_dynamorio(dcontext); bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } #endif #ifdef CLIENT_INTERFACE if (!client_process_bb(dcontext, bb)) { bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } /* i#620: provide API to set fall-through and retaddr targets at end of bb */ if (instrlist_get_return_target(bb->ilist) != NULL || instrlist_get_fall_through_target(bb->ilist) != NULL) { CLIENT_ASSERT(instr_is_cbr(instrlist_last(bb->ilist)) || instr_is_call(instrlist_last(bb->ilist)), "instr_set_return_target/instr_set_fall_through_target" " can only be used in a bb ending with call/cbr"); /* the bb cannot be added to a trace */ bb->flags |= FRAG_CANNOT_BE_TRACE; } if (bb->unmangled_ilist != NULL) *bb->unmangled_ilist = instrlist_clone(dcontext, bb->ilist); #endif if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_far_cti(bb->instr)) { /* Simplify far_ibl (i#823) vs trace_cmp ibl as well as * cross-mode direct stubs varying in a trace by disallowing * far cti in middle of trace */ bb->flags |= FRAG_MUST_END_TRACE; /* Simplify coarse by not requiring extra prefix stubs */ bb->flags &= ~FRAG_COARSE_GRAIN; } /* create a final instruction that will jump to the exit stub * corresponding to the fall-through of the conditional branch or * the target of the final indirect branch (the indirect branch itself * will get mangled into a non-cti) */ if (bb->exit_target == NULL) { /* not set by ind branch, etc. */ /* fall-through pc */ #ifdef CLIENT_INTERFACE /* i#620: provide API to set fall-through target at end of bb */ bb->exit_target = instrlist_get_fall_through_target(bb->ilist); #endif /* CLIENT_INTERFACE */ if (bb->exit_target == NULL) bb->exit_target = (cache_pc)bb->cur_pc; #ifdef CLIENT_INTERFACE else { LOG(THREAD, LOG_INTERP, 3, "set fall-throught target " PFX " by client\n", bb->exit_target); } #endif /* CLIENT_INTERFACE */ if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_cbr(bb->instr) && (int)(bb->exit_target - bb->start_pc) <= SHRT_MAX && (int)(bb->exit_target - bb->start_pc) >= SHRT_MIN && /* rule out jecxz, etc. */ !instr_is_cti_loop(bb->instr)) bb->flags |= FRAG_CBR_FALLTHROUGH_SHORT; } /* we share all basic blocks except selfmod (since want no-synch quick deletion) * or syscall-containing ones (to bound delay on threads exiting shared cache, * for cache management, both consistency and capacity) * bbs injected with hot patches are also not shared (see case 5272). */ if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && !TEST(FRAG_TEMP_PRIVATE, bb->flags) #ifdef HOT_PATCHING_INTERFACE && !hotp_injected #endif && (my_dcontext == NULL || my_dcontext->single_step_addr != bb->instr_start)) { /* If the fragment doesn't have a syscall or contains a * non-ignorable one -- meaning that the frag will exit the cache * to execute the syscall -- it can be shared. * We don't support ignorable syscalls in shared fragments, as they * don't set at_syscall and so are incompatible w/ -syscalls_synch_flush. */ if (!TEST(FRAG_HAS_SYSCALL, bb->flags) || TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type) || TEST(LINK_SPECIAL_EXIT, bb->exit_type)) bb->flags |= FRAG_SHARED; #ifdef WINDOWS /* A fragment can be shared if it contains a syscall that will be * executed via the version of shared syscall that can be targetted by * shared frags. */ else if (TEST(FRAG_HAS_SYSCALL, bb->flags) && DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)) bb->flags |= FRAG_SHARED; else { ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && (DYNAMO_OPTION(ignore_syscalls) || (!DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)))) && "BB not shared for unknown reason"); } #endif } else if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { /* Field exit_type might have been cleared by client_process_bb. */ bb->exit_type |= LINK_SPECIAL_EXIT; } if (TEST(FRAG_COARSE_GRAIN, bb->flags) && (!TEST(FRAG_SHARED, bb->flags) || /* Ignorable syscalls on linux are mangled w/ intra-fragment jmps, which * decode_fragment() cannot handle -- and on win32 this overlaps w/ * FRAG_MUST_END_TRACE and LINK_NI_SYSCALL */ TEST(FRAG_HAS_SYSCALL, bb->flags) || TEST(FRAG_MUST_END_TRACE, bb->flags) || TEST(FRAG_CANNOT_BE_TRACE, bb->flags) || TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) || /* PR 214142: coarse units does not support storing translations */ TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) || /* FRAG_HAS_DIRECT_CTI: we never elide (assert is below); * not-inlined call/jmp: we turn off FRAG_COARSE_GRAIN up above */ #ifdef WINDOWS TEST(LINK_CALLBACK_RETURN, bb->exit_type) || #endif TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type))) { /* Currently not supported in a coarse unit */ STATS_INC(num_fine_in_coarse); DOSTATS({ if (!TEST(FRAG_SHARED, bb->flags)) STATS_INC(coarse_prevent_private); else if (TEST(FRAG_HAS_SYSCALL, bb->flags)) STATS_INC(coarse_prevent_syscall); else if (TEST(FRAG_MUST_END_TRACE, bb->flags)) STATS_INC(coarse_prevent_end_trace); else if (TEST(FRAG_CANNOT_BE_TRACE, bb->flags)) STATS_INC(coarse_prevent_no_trace); else if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) STATS_INC(coarse_prevent_selfmod); else if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) STATS_INC(coarse_prevent_translation); else if (IF_WINDOWS_ELSE_0(TEST(LINK_CALLBACK_RETURN, bb->exit_type))) STATS_INC(coarse_prevent_cbret); else if (TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type)) STATS_INC(coarse_prevent_syscall); else ASSERT_NOT_REACHED(); }); bb->flags &= ~FRAG_COARSE_GRAIN; } ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags) || !TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); /* now that we know whether shared, ensure we have the right ibl routine */ if (!TEST(FRAG_SHARED, bb->flags) && TEST(LINK_INDIRECT, bb->exit_type)) { ASSERT(bb->exit_target == get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type)); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), IBL_BB_PRIVATE, bb->ibl_branch_type); } if (bb->mangle_ilist && (bb->instr == NULL || !instr_opcode_valid(bb->instr) || !instr_is_near_ubr(bb->instr) || instr_is_meta(bb->instr))) { instr_t *exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) { app_pc translation = NULL; if (bb->instr == NULL || !instr_opcode_valid(bb->instr)) { /* we removed (or mangle will remove) the last instruction * for special handling (invalid/syscall/int 2b) or there were * no instructions added (i.e. check_stopping_point in which * case instr_start == cur_pc), use last instruction's start * address for the translation */ translation = bb->instr_start; } else if (instr_is_cti(bb->instr)) { /* last instruction is a cti, consider the exit jmp part of * the mangling of the cti (since we might not know the target * if, for ex., its indirect) */ translation = instr_get_translation(bb->instr); } else { /* target is the instr after the last instr in the list */ translation = bb->cur_pc; ASSERT(bb->cur_pc == bb->exit_target); } ASSERT(translation != NULL); instr_set_translation(exit_instr, translation); } /* PR 214962: we need this jmp to be marked as "our mangling" so that * we won't relocate a thread there and re-do a ret pop or call push */ instr_set_our_mangling(exit_instr, true); /* here we need to set exit_type */ LOG(THREAD, LOG_EMIT, 3, "exit_branch_type=0x%x bb->exit_target=" PFX "\n", bb->exit_type, bb->exit_target); instr_exit_branch_set_type(exit_instr, bb->exit_type); instrlist_append(bb->ilist, exit_instr); #ifdef ARM if (bb->svc_pred != DR_PRED_NONE) { /* we have a conditional syscall, add predicate to current exit */ instr_set_predicate(exit_instr, bb->svc_pred); /* add another ubr exit as the fall-through */ exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) instr_set_translation(exit_instr, bb->cur_pc); instr_set_our_mangling(exit_instr, true); instr_exit_branch_set_type(exit_instr, LINK_DIRECT | LINK_JMP); instrlist_append(bb->ilist, exit_instr); /* XXX i#1734: instr svc.cc will be deleted later in mangle_syscall, * so we need reset encode state to avoid holding a dangling pointer. */ encode_reset_it_block(dcontext); } #endif } /* set flags */ #ifdef DGC_DIAGNOSTICS /* no traces in dyngen code, that would mess up our exit tracking */ if (TEST(FRAG_DYNGEN, bb->flags)) bb->flags |= FRAG_CANNOT_BE_TRACE; #endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_prefix) IF_X64(|| !INTERNAL_OPTION(unsafe_ignore_eflags_trace))) { bb->flags |= instr_eflags_to_fragment_eflags(bb->eflags); if (TEST(FRAG_WRITES_EFLAGS_OF, bb->flags)) { LOG(THREAD, LOG_INTERP, 4, "fragment writes OF prior to reading it!\n"); STATS_INC(bbs_eflags_writes_of); } else if (TEST(FRAG_WRITES_EFLAGS_6, bb->flags)) { IF_X86(ASSERT(TEST(FRAG_WRITES_EFLAGS_OF, bb->flags))); LOG(THREAD, LOG_INTERP, 4, "fragment writes all 6 flags prior to reading any\n"); STATS_INC(bbs_eflags_writes_6); } else { DOSTATS({ if (bb->eflags == EFLAGS_READ_ARITH) { /* Reads a flag before writing any. Won't get here if * reads one flag and later writes OF, or writes OF and * later reads one flag before writing that flag. */ STATS_INC(bbs_eflags_reads); } else { STATS_INC(bbs_eflags_writes_none); if (TEST(LINK_INDIRECT, bb->exit_type)) STATS_INC(bbs_eflags_writes_none_ind); } }); } } /* can only have proactive translation info if flag was set from the beginning */ if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) && (!bb->record_translation || !bb->full_decode)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; /* if for_cache, caller must clear once done emitting (emitting can deref * app memory so we wait until all done) */ if (!bb_build_nested && !bb->for_cache && my_dcontext != NULL) { ASSERT(my_dcontext->bb_build_info == (void *)bb); my_dcontext->bb_build_info = NULL; } bb->instr = NULL; /* mangle the instruction list */ if (!bb->mangle_ilist) { /* do not mangle! * caller must use full_decode to find invalid instrs and avoid * a discrepancy w/ for_cache case that aborts b/c of selfmod sandbox * returning false (in code below) */ return; } if (!mangle_bb_ilist(dcontext, bb)) { /* have to rebuild bb w/ new bb flags set by mangle_bb_ilist */ build_bb_ilist(dcontext, bb); return; } } /* Call when about to throw exception or other drastic action in the * middle of bb building, in order to free resources */ void bb_build_abort(dcontext_t *dcontext, bool clean_vmarea, bool unlock) { ASSERT(dcontext->bb_build_info != NULL); /* caller should check */ if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* free instr memory */ if (bb->instr != NULL && bb->ilist != NULL && instrlist_last(bb->ilist) != bb->instr) instr_destroy(dcontext, bb->instr); /* not added to bb->ilist yet */ DODEBUG({ bb->instr = NULL; }); if (bb->ilist != NULL) { instrlist_clear_and_destroy(dcontext, bb->ilist); DODEBUG({ bb->ilist = NULL; }); } if (clean_vmarea) { /* Free the vmlist and any locks held (we could have been in * the middle of check_thread_vm_area and had a decode fault * during code origins checking!) */ check_thread_vm_area_abort(dcontext, &bb->vmlist, bb->flags); } /* else we were presumably called from vmarea so caller does cleanup */ if (unlock) { /* Assumption: bb building lock is held iff bb->for_cache, * and on a nested app bb build where !bb->for_cache we do keep the * original bb info in dcontext (see build_bb_ilist()). */ if (bb->has_bb_building_lock) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); SHARED_BB_UNLOCK(); KSTOP_REWIND(bb_building); } else ASSERT_DO_NOT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); } dcontext->bb_build_info = NULL; } } bool expand_should_set_translation(dcontext_t *dcontext) { if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* Expanding to a higher level should set the translation to * the raw bytes if we're building a bb where we can assume * the raw byte pointer is the app pc. */ return bb->record_translation; } return false; } /* returns false if need to rebuild bb: in that case this routine will * set the bb flags needed to ensure successful mangling 2nd time around */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { byte *selfmod_start, *selfmod_end; /* sandbox requires that bb have no direct cti followings! * check_thread_vm_area should have ensured this for us */ ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); LOG(THREAD, LOG_INTERP, 2, "fragment overlaps selfmod area, inserting sandboxing\n"); /* only reason can't be trace is don't have mechanism set up * to store app code for each trace bb and update sandbox code * to point there */ bb->flags |= FRAG_CANNOT_BE_TRACE; if (bb->pretend_pc != NULL) { selfmod_start = bb->pretend_pc; selfmod_end = bb->pretend_pc + (bb->cur_pc - bb->start_pc); } else { selfmod_start = bb->start_pc; selfmod_end = bb->cur_pc; } if (!insert_selfmod_sandbox(dcontext, bb->ilist, bb->flags, selfmod_start, selfmod_end, bb->record_translation, bb->for_cache)) { /* have to rebuild bb using full decode -- it has invalid instrs * in middle, which we don't want to deal w/ for sandboxing! */ ASSERT(!bb->full_decode); /* else, how did we get here??? */ LOG(THREAD, LOG_INTERP, 2, "*** must rebuild bb to avoid invalid instr in middle ***\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->flags = FRAG_SELFMOD_SANDBOXED; /* lose all other flags */ bb->full_decode = true; /* full decode this time! */ bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ return false; } STATS_INC(num_sandboxed_fragments); } #endif /* X86 */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist before mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); d_r_mangle(dcontext, bb->ilist, &bb->flags, true, bb->record_translation); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist after mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); return true; } /* Interprets the application's instructions until the end of a basic * block is found, following all the rules that build_bb_ilist follows * with regard to terminating the block. Does no mangling or anything of * the app code, though -- this routine is intended only for building the * original code! * Caller is responsible for freeing the list and its instrs! * If outf != INVALID_FILE, does full disassembly with comments to outf. */ instrlist_t * build_app_bb_ilist(dcontext_t *dcontext, byte *start_pc, file_t outf) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, outf, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); return bb.ilist; } #ifdef CLIENT_INTERFACE /* Client routine to decode instructions at an arbitrary app address, * following all the rules that DynamoRIO follows internally for * terminating basic blocks. Note that DynamoRIO does not validate * that start_pc is actually the first instruction of a basic block. * \note Caller is reponsible for freeing the list and its instrs! */ instrlist_t * decode_as_bb(void *drcontext, byte *start_pc) { build_bb_t bb; /* Case 10009: When we hook ntdll functions, we hide the jump to * the interception buffer from the client BB callback. If the * client asks to decode that address here, we need to decode the * instructions in the interception buffer instead so that we * again hide our hooking. * We will have the jmp from the buffer back to after the hooked * app code visible to the client (just like it is for the * real bb built there, so at least we're consistent). */ # ifdef WINDOWS byte *real_pc; if (is_intercepted_app_pc((app_pc)start_pc, &real_pc)) start_pc = real_pc; # endif init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, true /* translation; xref case 10070 where this * currently turns on full decode; today we * provide no way to turn that off, as IR * expansion routines are not exported (PR 200409). */ , INVALID_FILE, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist((dcontext_t *)drcontext, &bb); return bb.ilist; } /* Client routine to decode a trace. We return the instructions in * the original app code, i.e., no client modifications. */ instrlist_t * decode_trace(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *frag = fragment_lookup(dcontext, tag); /* We don't support asking about other threads, for synch purposes * (see recreate_fragment_ilist() synch notes) */ if (get_thread_private_dcontext() != dcontext) return NULL; if (frag != NULL && TEST(FRAG_IS_TRACE, frag->flags)) { instrlist_t *ilist; bool alloc_res; /* Support being called from bb/trace hook (couldbelinking) or * from cache clean call (nolinking). We disallow asking about * another thread's private traces. */ if (!is_couldbelinking(dcontext)) d_r_mutex_lock(&thread_initexit_lock); ilist = recreate_fragment_ilist(dcontext, NULL, &frag, &alloc_res, false /*no mangling*/ _IF_CLIENT(false /*do not re-call client*/)); ASSERT(!alloc_res); if (!is_couldbelinking(dcontext)) d_r_mutex_unlock(&thread_initexit_lock); return ilist; } return NULL; } #endif app_pc find_app_bb_end(dcontext_t *dcontext, byte *start_pc, uint flags) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); return bb.end_pc; } bool app_bb_overlaps(dcontext_t *dcontext, byte *start_pc, uint flags, byte *region_start, byte *region_end, overlap_info_t *info_res) { build_bb_t bb; overlap_info_t info; info.region_start = region_start; info.region_end = region_end; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, &info); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); info.bb_end = bb.end_pc; if (info_res != NULL) *info_res = info; return info.overlap; } #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc) { char name[MAX_MODNAME_INTERNAL]; const char *modname = name; if (os_get_module_name_buf(modpc, name, BUFFER_SIZE_ELEMENTS(name)) == 0) { /* for native_exec_callcall we do end up putting DGC on native_exec_list */ ASSERT(DYNAMO_OPTION(native_exec_callcall)); modname = "<DGC>"; } LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "module %s is on native list, executing natively\n", modname); STATS_INC(num_native_module_entrances); SYSLOG_INTERNAL_WARNING_ONCE("module %s set up for native execution", modname); } #endif /* WARNING: breaks all kinds of rules, like ret addr transparency and * assuming app stack and not doing calls out of the cache and not having * control during dll loads, etc... */ static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb) { instr_t *in; opnd_t jmp_tgt; #if defined(X86) && defined(X64) bool reachable = rel32_reachable_from_vmcode(bb->start_pc); #endif DEBUG_DECLARE(bool ok;) /* if we ever protect from simultaneous thread attacks then this will * be a hole -- for now should work, all protected while native until * another thread goes into DR */ /* Create a bb that changes the return address on the app stack such that we * will take control when coming back, and then goes native. * N.B.: we ASSUME we reached this moduled via a call -- * build_basic_block_fragment needs to make sure, since we can't verify here * w/o trying to decode backward from retaddr, and if we're wrong we'll * clobber the stack and never regain control! * We also assume this bb is never reached later through a non-call. */ ASSERT(bb->initialized); ASSERT(bb->app_interp); ASSERT(!bb->record_translation); ASSERT(bb->start_pc != NULL); /* vmlist must start out empty (or N/A). For clients it may have started early. */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; bb->native_exec = true; BBPRINT(bb, IF_DGCDIAG_ELSE(1, 2), "build_native_exec_bb @" PFX "\n", bb->start_pc); DOLOG(2, LOG_INTERP, { dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); }); if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory * WARNING: do not add any app instructions to this ilist! * If you do you must enable selfmod below. */ bb->ilist = instrlist_create(dcontext); /* FIXME PR 303413: we won't properly translate a fault in our app * stack references here. We mark as our own mangling so we'll at * least return failure from our translate routine. */ instrlist_set_our_mangling(bb->ilist, true); /* get dcontext to xdi, for prot-dcontext, xsi holds upcontext too */ insert_shared_get_dcontext(dcontext, bb->ilist, NULL, true /*save xdi*/); instrlist_append(bb->ilist, instr_create_save_to_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); /* need some cleanup prior to native: turn off asynch, clobber trace, etc. * Now that we have a stack of native retaddrs, we save the app retaddr in C * code. */ if (bb->native_call) { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)call_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 1, opnd_create_reg(REG_XSP)); } else { if (DYNAMO_OPTION(native_exec_opt)) { insert_return_to_native(dcontext, bb->ilist, NULL, REG_NULL /* default */, SCRATCH_REG0); } else { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)return_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 0); } } #if defined(X86) && defined(X64) if (!reachable) { /* best to store the target at the end of the bb, to keep it readonly, * but that requires a post-pass to patch its value: since native_exec * is already hacky we just go through TLS and ignore multi-thread selfmod. */ instrlist_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG0), OPND_CREATE_INTPTR((ptr_int_t)bb->start_pc))); if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64_ibl_opt)) { jmp_tgt = opnd_create_reg(REG_R9); } else { jmp_tgt = opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)); } instrlist_append( bb->ilist, INSTR_CREATE_mov_st(dcontext, jmp_tgt, opnd_create_reg(REG_XAX))); } else #endif { jmp_tgt = opnd_create_pc(bb->start_pc); } instrlist_append(bb->ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); insert_shared_restore_dcontext_reg(dcontext, bb->ilist, NULL); #ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ #else /* this is the jump to native code */ instrlist_append(bb->ilist, opnd_is_pc(jmp_tgt) ? XINST_CREATE_jump(dcontext, jmp_tgt) : XINST_CREATE_jump_mem(dcontext, jmp_tgt)); #endif /* mark all as do-not-mangle, so selfmod, etc. will leave alone (in absence * of selfmod only really needed for the jmp to native code) */ for (in = instrlist_first(bb->ilist); in != NULL; in = instr_get_next(in)) instr_set_meta(in); /* this is a jump for a dummy exit cti */ instrlist_append(bb->ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(bb->start_pc))); if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_TEMP_PRIVATE, bb->flags)) bb->flags |= FRAG_SHARED; /* Can't be coarse-grain since has non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_native_exec); /* We exclude the bb from trace to avoid going native in the process of * building a trace for simplicity. * XXX i#1239: DR needs to be able to unlink native exec gateway bbs for * proper cache consistency and signal handling, in which case we could * use FRAG_MUST_END_TRACE here instead. */ bb->flags |= FRAG_CANNOT_BE_TRACE; /* We support mangling here, though currently we don't need it as we don't * include any app code (although we mark this bb as belonging to the start * pc, so we'll get flushed if this region does), and even if target is * selfmod we're running it natively no matter how it modifies itself. We * only care that transition to target is via a call or call* so we can * clobber the retaddr and regain control, and that no retaddr mangling * happens while native before coming back out. While the former does not * depend on the target at all, unfortunately we cannot verify the latter. */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; DEBUG_DECLARE(ok =) mangle_bb_ilist(dcontext, bb); ASSERT(ok); #ifdef DEBUG DOLOG(3, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 3, "native_exec_bb @" PFX "\n", bb->start_pc); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); #endif } static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)) { /* ASSUMPTION: transfer to another module will always be by indirect call * or non-inlined direct call from a fragment that will not be flushed. * For now we will only go native if last_exit was * a call, a true call*, or a PLT-style call,jmp* (and we detect the latter only * if the call is inlined, so if the jmp* table is in a DGC-marked region * or if -no_inline_calls we will miss these: FIXME). * FIXME: what if have PLT-style but no GOT indirection: call,jmp ?!? * * We try to identify funky call* constructions (like * call*,...,jmp* in case 4269) by examining TOS to see whether it's a * retaddr -- we do this if last_exit is a jmp* or is unknown (for the * target_delete ibl path). * * FIXME: we will fail to identify a delay-loaded indirect xfer! * Need to know dynamic link patchup code to look for. * * FIXME: we will fail to take over w/ non-call entrances to a dll, like * NtContinue or direct jmp from DGC. * we could try to take the top-of-stack value and see if it's a retaddr by * decoding the prev instr to see if it's a call. decode backwards may have * issues, and if really want everything will have to do this on every bb, * not just if lastexit is ind xfer. * * We count up easy-to-identify cases we've missed in the DOSTATS below. */ bool native_exec_bb = false; /* We can get here if we start interpreting native modules. */ ASSERT(start != (app_pc)back_from_native && start != (app_pc)native_module_callout && "interpreting return from native module?"); ASSERT(is_call != NULL); *is_call = false; if (DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { /* do we KNOW that we came from an indirect call? */ if (TEST(LINK_CALL /*includes IND_JMP_PLT*/, dcontext->last_exit->flags) && /* only check direct calls if native_exec_dircalls is on */ (DYNAMO_OPTION(native_exec_dircalls) || LINKSTUB_INDIRECT(dcontext->last_exit->flags))) { STATS_INC(num_native_entrance_checks); /* we do the overlap check last since it's more costly */ if (is_native_pc(start)) { native_exec_bb = true; *is_call = true; DOSTATS({ if (EXIT_IS_CALL(dcontext->last_exit->flags)) { if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) STATS_INC(num_native_module_entrances_indcall); else STATS_INC(num_native_module_entrances_call); } else STATS_INC(num_native_module_entrances_plt); }); } } /* can we GUESS that we came from an indirect call? */ else if (DYNAMO_OPTION(native_exec_guess_calls) && (/* FIXME: require jmp* be in separate module? */ (LINKSTUB_INDIRECT(dcontext->last_exit->flags) && EXIT_IS_JMP(dcontext->last_exit->flags)) || LINKSTUB_FAKE(dcontext->last_exit))) { /* if unknown last exit, or last exit was jmp*, examine TOS and guess * whether it's a retaddr */ app_pc *tos = (app_pc *)get_mcontext(dcontext)->xsp; STATS_INC(num_native_entrance_TOS_checks); /* vector check cheaper than is_readable syscall, etc. so do it before them, * but after last_exit checks above since overlap is more costly */ if (is_native_pc(start) && is_readable_without_exception((app_pc)tos, sizeof(app_pc))) { enum { MAX_CALL_CONSIDER = 6 /* ignore prefixes */ }; app_pc retaddr = *tos; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "at native_exec target: checking TOS " PFX " => " PFX " for retaddr\n", tos, retaddr); #ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(ret_after_call)) { native_exec_bb = is_observed_call_site(dcontext, retaddr); *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: *TOS is %sa call site in ret-after-call table\n", native_exec_bb ? "" : "NOT "); } else { #endif /* try to decode backward -- make sure readable for decoding */ if (is_readable_without_exception(retaddr - MAX_CALL_CONSIDER, MAX_CALL_CONSIDER + MAX_INSTR_LENGTH)) { /* ind calls have variable length and form so we decode * each byte rather than searching for ff and guessing length */ app_pc pc, next_pc; instr_t instr; instr_init(dcontext, &instr); for (pc = retaddr - MAX_CALL_CONSIDER; pc < retaddr; pc++) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 3, "native_exec: decoding @" PFX " looking for call\n", pc); instr_reset(dcontext, &instr); next_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, &instr); STATS_INC(num_native_entrance_TOS_decodes); if (next_pc == retaddr && instr_is_call(&instr)) { native_exec_bb = true; *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: found call @ pre-*TOS " PFX "\n", pc); break; } } instr_free(dcontext, &instr); } #ifdef RETURN_AFTER_CALL } #endif DOSTATS({ if (native_exec_bb) { if (LINKSTUB_FAKE(dcontext->last_exit)) STATS_INC(num_native_module_entrances_TOS_unknown); else STATS_INC(num_native_module_entrances_TOS_jmp); } }); } } /* i#2381: Only now can we check things that might preempt the * "guess" code above. */ /* Is this a return from a non-native module into a native module? */ if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && TEST(LINK_RETURN, dcontext->last_exit->flags)) { if (is_native_pc(start)) { /* XXX: check that this is the return address of a known native * callsite where we took over on a module transition. */ STATS_INC(num_native_module_entrances_ret); native_exec_bb = true; *is_call = false; } } #ifdef UNIX /* Is this the entry point of a native ELF executable? The entry point * (usually _start) cannot return as there is no retaddr. */ else if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && start == get_image_entry()) { if (is_native_pc(start)) { native_exec_bb = true; *is_call = false; } } #endif DOSTATS({ /* did we reach a native dll w/o going through an ind call caught above? */ if (!xfer_target /* else we'll re-check at the target itself */ && !native_exec_bb && is_native_pc(start)) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "WARNING: pc " PFX " is on native list but reached bypassing " "gateway!\n", start); STATS_INC(num_native_entrance_miss); /* do-once since once get into dll past gateway may xfer * through a bunch of lastexit-null or indjmp to same dll */ ASSERT_CURIOSITY_ONCE(false && "inside native_exec dll"); } }); } return native_exec_bb; } /* Use when calling build_bb_ilist with for_cache = true. * Must hold bb_building_lock. */ static inline void init_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb, app_pc start, uint initial_flags _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK() && !TEST(FRAG_TEMP_PRIVATE, initial_flags), &bb_building_lock); /* We need to set up for abort prior to native exec and other checks * that can crash */ ASSERT(dcontext->bb_build_info == NULL); /* This won't make us be nested b/c for bb.for_cache caller is supposed * to set this up */ dcontext->bb_build_info = (void *)bb; init_build_bb( bb, start, true /*real interp*/, true /*for cache*/, true /*mangle*/, false /* translation: set below for clients */, INVALID_FILE, initial_flags | (INTERNAL_OPTION(store_translations) ? FRAG_HAS_TRANSLATION_INFO : 0), NULL /*no overlap*/); if (!TEST(FRAG_TEMP_PRIVATE, initial_flags)) bb->has_bb_building_lock = true; #ifdef CLIENT_INTERFACE /* We avoid races where there is no hook when we start building a * bb (and hence we don't record translation or do full decode) yet * a hook when we're ready to call one by storing whether there is a * hook at translation/decode decision time: now. */ if (dr_bb_hook_exists()) { /* i#805: Don't instrument code on the null instru list. * Because the module load event is now on 1st exec, we need to trigger * it now so the client can adjust the null instru list: */ check_new_page_start(dcontext, bb); bb->checked_start_vmarea = true; if (!os_module_get_flag(bb->start_pc, MODULE_NULL_INSTRUMENT)) bb->pass_to_client = true; } /* PR 299808: even if no bb hook, for a trace hook we need to * record translation and do full decode. It's racy to check * dr_trace_hook_exists() here so we rely on trace building having * set unmangled_ilist. */ if (bb->pass_to_client || unmangled_ilist != NULL) { /* case 10009/214444: For client interface builds, store the translation. * by default. This ensures clients can get the correct app address * of any instruction. We also rely on this for allowing the client * to return DR_EMIT_STORE_TRANSLATIONS and setting the * FRAG_HAS_TRANSLATION_INFO flag after decoding the app code. * * FIXME: xref case 10070/214505. Currently this means that all * instructions are fully decoded for client interface builds. */ bb->record_translation = true; /* PR 200409: If a bb hook exists, we always do a full decode. * Note that we currently do this anyway to get * translation fields, but once we fix case 10070 it * won't be that way. * We do not let the client turn this off (the runtime * option is not dynamic, and off by default anyway), as we * do not export level-handling instr_t routines like *_expand * for walking instrlists and instr_decode(). */ bb->full_decode = !INTERNAL_OPTION(fast_client_decode); /* PR 299808: we give client chance to re-add instrumentation */ bb->for_trace = for_trace; } /* we need to clone the ilist pre-mangling */ bb->unmangled_ilist = unmangled_ilist; #endif } static inline void exit_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(dcontext->bb_build_info == (void *)bb); /* Caller's responsibility to clean up since bb.for_cache */ dcontext->bb_build_info = NULL; /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, bb->ilist); } /* Interprets the application's instructions until the end of a basic * block is found, and then creates a fragment for the basic block. * DOES NOT look in the hashtable to see if such a fragment already exists! */ fragment_t * build_basic_block_fragment(dcontext_t *dcontext, app_pc start, uint initial_flags, bool link, bool visible _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { fragment_t *f; build_bb_t bb; dr_where_am_i_t wherewasi = dcontext->whereami; bool image_entry; KSTART(bb_building); dcontext->whereami = DR_WHERE_INTERP; /* Neither thin_client nor hotp_only should be building any bbs. */ ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* ASSUMPTION: image entry is reached via indirect transfer and * so will be the start of a bb */ image_entry = check_for_image_entry(start); init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); if (at_native_exec_gateway(dcontext, start, &bb.native_call _IF_DEBUG(false /*not xfer tgt*/))) { DODEBUG({ report_native_module(dcontext, bb.start_pc); }); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting translation * info, but it also doesn't pass the built bb to the client (it * contains no app code) so we don't need it. */ bb.record_translation = false; #endif build_native_exec_bb(dcontext, &bb); } else { build_bb_ilist(dcontext, &bb); if (dcontext->bb_build_info == NULL) { /* going native */ f = NULL; goto build_basic_block_fragment_done; } if (bb.native_exec) { /* change bb to be a native_exec gateway */ bool is_call = bb.native_call; LOG(THREAD, LOG_INTERP, 2, "replacing built bb with native_exec bb\n"); instrlist_clear_and_destroy(dcontext, bb.ilist); vm_area_destroy_list(dcontext, bb.vmlist); dcontext->bb_build_info = NULL; init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting * translation info, but it also doesn't pass the built bb to the * client (it contains no app code) so we don't need it. */ bb.record_translation = false; #endif bb.native_call = is_call; build_native_exec_bb(dcontext, &bb); } } /* case 9652: we do not want to persist the image entry point, so we keep * it fine-grained */ if (image_entry) bb.flags &= ~FRAG_COARSE_GRAIN; if (DYNAMO_OPTION(opt_jit) && visible && is_jit_managed_area(bb.start_pc)) { ASSERT(bb.overlap_info == NULL || bb.overlap_info->contiguous); jitopt_add_dgc_bb(bb.start_pc, bb.end_pc, TEST(FRAG_IS_TRACE_HEAD, bb.flags)); } /* emit fragment into fcache */ KSTART(bb_emit); f = emit_fragment_ex(dcontext, start, bb.ilist, bb.flags, bb.vmlist, link, visible); KSTOP(bb_emit); #ifdef CUSTOM_TRACES_RET_REMOVAL f->num_calls = dcontext->num_calls; f->num_rets = dcontext->num_rets; #endif #ifdef DGC_DIAGNOSTICS if ((f->flags & FRAG_DYNGEN)) { LOG(THREAD, LOG_INTERP, 1, "new bb is DGC:\n"); DOLOG(1, LOG_INTERP, { disassemble_app_bb(dcontext, start, THREAD); }); DOLOG(3, LOG_INTERP, { disassemble_fragment(dcontext, f, false); }); } #endif DOLOG(2, LOG_INTERP, { disassemble_fragment(dcontext, f, d_r_stats->loglevel <= 3); }); DOLOG(4, LOG_INTERP, { if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) { LOG(THREAD, LOG_INTERP, 4, "\nXXXX sandboxed fragment! original code:\n"); disassemble_app_bb(dcontext, f->tag, THREAD); LOG(THREAD, LOG_INTERP, 4, "code cache code:\n"); disassemble_fragment(dcontext, f, false); } }); #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { disassemble_fragment_header(dcontext, f, bbdump_file); } #endif #ifdef INTERNAL DODEBUG({ if (INTERNAL_OPTION(stress_recreate_pc)) { /* verify recreation */ stress_test_recreate(dcontext, f, bb.ilist); } }); #endif exit_interp_build_bb(dcontext, &bb); build_basic_block_fragment_done: dcontext->whereami = wherewasi; KSTOP(bb_building); return f; } /* Builds an instrlist_t as though building a bb from pretend_pc, but decodes * from pc. * Use recreate_fragment_ilist() for building an instrlist_t for a fragment. * If check_vm_area is false, Does NOT call check_thread_vm_area()! * Make sure you know it will terminate at the right spot. It does * check selfmod and native_exec for elision, but otherwise will * follow ubrs to the limit. Currently used for * record_translation_info() (case 3559). * If vmlist!=NULL and check_vm_area, returns the vmlist, which the * caller must free by calling vm_area_destroy_list. */ instrlist_t * recreate_bb_ilist(dcontext_t *dcontext, byte *pc, byte *pretend_pc, app_pc stop_pc, uint flags, uint *res_flags OUT, uint *res_exit_type OUT, bool check_vm_area, bool mangle, void **vmlist_out OUT _IF_CLIENT(bool call_client) _IF_CLIENT(bool for_trace)) { build_bb_t bb; /* don't know full range -- just do simple check now */ if (!is_readable_without_exception(pc, 4)) { LOG(THREAD, LOG_INTERP, 3, "recreate_bb_ilist: cannot read memory at " PFX "\n", pc); return NULL; } LOG(THREAD, LOG_INTERP, 3, "\nbuilding bb instrlist now *********************\n"); init_build_bb(&bb, pc, false /*not interp*/, false /*not for cache*/, mangle, true /*translation*/, INVALID_FILE, flags, NULL /*no overlap*/); /* We support a stop pc to ensure selfmod matches how it was originally built, * w/o having to include the next instr which might have triggered the bb * termination but not been included in the bb (i#1441). * It only applies to full_decode. */ bb.stop_pc = stop_pc; bb.check_vm_area = check_vm_area; if (check_vm_area && vmlist_out != NULL) bb.record_vmlist = true; #ifdef CLIENT_INTERFACE if (check_vm_area && !bb.record_vmlist) bb.record_vmlist = true; /* for xl8 region checks */ /* PR 214962: we call bb hook again, unless the client told us * DR_EMIT_STORE_TRANSLATIONS, in which case we shouldn't come here, * except for traces (see below): */ bb.pass_to_client = (DYNAMO_OPTION(code_api) && call_client && /* i#843: This flag cannot be changed dynamically, so * its current value should match the value used at * ilist building time. Alternatively, we could store * bb->pass_to_client in the fragment. */ !os_module_get_flag(pc, MODULE_NULL_INSTRUMENT)); /* PR 299808: we call bb hook again when translating a trace that * didn't have DR_EMIT_STORE_TRANSLATIONS on itself (or on any * for_trace bb if there was no trace hook). */ bb.for_trace = for_trace; /* instrument_basic_block, called by build_bb_ilist, verifies that all * non-meta instrs have translation fields */ #endif if (pretend_pc != pc) bb.pretend_pc = pretend_pc; build_bb_ilist(dcontext, &bb); LOG(THREAD, LOG_INTERP, 3, "\ndone building bb instrlist *********************\n\n"); if (res_flags != NULL) *res_flags = bb.flags; if (res_exit_type != NULL) *res_exit_type = bb.exit_type; if (check_vm_area && vmlist_out != NULL) *vmlist_out = bb.vmlist; else if (bb.record_vmlist) vm_area_destroy_list(dcontext, bb.vmlist); return bb.ilist; } /* Re-creates an ilist of the fragment that currently contains the * passed-in code cache pc, also returns the fragment. * * Exactly one of pc and (f_res or *f_res) must be NULL: * If pc==NULL, assumes that *f_res is the fragment to use; * else, looks up the fragment, allocating it if necessary. * If f_res!=NULL, the fragment is returned and whether it was allocated * is returned in the alloc_res param. * If f_res==NULL, if the fragment was allocated it is freed here. * * NOTE : does not add prefix instructions to the created ilist, if we change * this to add them be sure to check recreate_app_* for compatibility (for ex. * adding them and setting their translation to pc would break current * implementation, also setting translation to NULL would trigger an assert) * * Returns NULL if unable to recreate the fragment ilist (fragment not found * or fragment is pending deletion and app memory might have changed). * In that case f_res is still pointed at the fragment if it was found, and * alloc is valid. * * For proper synchronization : * If caller is the dcontext's owner then needs to be couldbelinking, otherwise * the dcontext's owner should be suspended and the callers should own the * thread_initexit_lock */ instrlist_t * recreate_fragment_ilist(dcontext_t *dcontext, byte *pc, /*IN/OUT*/ fragment_t **f_res, /*OUT*/ bool *alloc_res, bool mangle _IF_CLIENT(bool call_client)) { fragment_t *f; uint flags = 0; instrlist_t *ilist; bool alloc = false, ok; monitor_data_t md = { 0, }; dr_isa_mode_t old_mode = DEFAULT_ISA_MODE; /* check synchronization, we need to make sure no one flushes the * fragment we just looked up while we are recreating it, if it's the * caller's dcontext then just need to be couldbelinking, otherwise need * the thread_initexit_lock since then we are looking up in someone else's * table (the dcontext's owning thread would also need to be suspended) */ ASSERT((dcontext != GLOBAL_DCONTEXT && d_r_get_thread_id() == dcontext->owning_thread && is_couldbelinking(dcontext)) || (ASSERT_OWN_MUTEX(true, &thread_initexit_lock), true)); STATS_INC(num_recreated_fragments); if (pc == NULL) { ASSERT(f_res != NULL && *f_res != NULL); f = *f_res; } else { /* Ensure callers don't give us both valid f and valid pc */ ASSERT(f_res == NULL || *f_res == NULL); LOG(THREAD, LOG_INTERP, 3, "recreate_fragment_ilist: looking up pc " PFX "\n", pc); f = fragment_pclookup_with_linkstubs(dcontext, pc, &alloc); LOG(THREAD, LOG_INTERP, 3, "\tfound F%d\n", f == NULL ? -1 : f->id); if (f_res != NULL) *f_res = f; /* ref case 3559, others, we won't be able to reliably recreate if * target is pending flush, original memory might no longer be there or * the memory might have changed. caller should use the stored * translation info instead. */ if (f == NULL || TEST(FRAG_WAS_DELETED, f->flags)) { ASSERT(f != NULL || !alloc); /* alloc shouldn't be set if no f */ ilist = NULL; goto recreate_fragment_done; } } /* Recreate in same mode as original fragment */ ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); ASSERT(ok); if ((f->flags & FRAG_IS_TRACE) == 0) { /* easy case: just a bb */ ilist = recreate_bb_ilist( dcontext, (byte *)f->tag, (byte *)f->tag, NULL /*default stop*/, 0 /*no pre flags*/, &flags, NULL, true /*check vm area*/, mangle, NULL _IF_CLIENT(call_client) _IF_CLIENT(false /*not for_trace*/)); ASSERT(ilist != NULL); if (ilist == NULL) /* a race */ goto recreate_fragment_done; if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); goto recreate_fragment_done; } else { /* build trace up one bb at a time */ instrlist_t *bb; byte *apc; trace_only_t *t = TRACE_FIELDS(f); uint i; instr_t *last; bool mangle_at_end = mangle_trace_at_end(); if (mangle_at_end) { /* we need an md for mangle_trace */ md.trace_tag = f->tag; /* be sure we ask for translation fields */ md.trace_flags = f->flags | FRAG_HAS_TRANSLATION_INFO; md.num_blks = t->num_bbs; md.blk_info = (trace_bb_build_t *)HEAP_ARRAY_ALLOC( dcontext, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); #ifdef CLIENT_INTERFACE md.pass_to_client = true; #endif } ilist = instrlist_create(dcontext); STATS_INC(num_recreated_traces); ASSERT(t->bbs != NULL); for (i = 0; i < t->num_bbs; i++) { void *vmlist = NULL; apc = (byte *)t->bbs[i].tag; bb = recreate_bb_ilist(dcontext, apc, apc, NULL /*default stop*/, 0 /*no pre flags*/, &flags, &md.final_exit_flags, true /*check vm area*/, !mangle_at_end, (mangle_at_end ? &vmlist : NULL)_IF_CLIENT(call_client) _IF_CLIENT(true /*for_trace*/)); ASSERT(bb != NULL); if (bb == NULL) { instrlist_clear_and_destroy(dcontext, ilist); vm_area_destroy_list(dcontext, vmlist); ilist = NULL; goto recreate_fragment_done; } if (mangle_at_end) md.blk_info[i].info = t->bbs[i]; last = instrlist_last(bb); ASSERT(last != NULL); #ifdef CLIENT_INTERFACE if (mangle_at_end) { md.blk_info[i].vmlist = vmlist; md.blk_info[i].final_cti = instr_is_cti(instrlist_last(bb)); } #endif /* PR 299808: we need to duplicate what we did when we built the trace. * While if there's no client trace hook we could mangle and fixup as we * go, for simplicity we mangle at the end either way (in either case our * code here is not exactly what we did when we made it anyway) * PR 333597: we can't use mangle_trace if we have elision on. */ if (mangle && !mangle_at_end) { /* To duplicate the trace-building logic: * - call fixup_last_cti() * - retarget the ibl routine just like extend_trace() does */ app_pc target = (last != NULL) ? opnd_get_pc(instr_get_target(last)) : NULL; /* FIXME: is it always safe */ /* convert a basic block IBL, and retarget it to IBL_TRACE* */ if (target != NULL && is_indirect_branch_lookup_routine(dcontext, target)) { target = get_alternate_ibl_routine(dcontext, target, f->flags); ASSERT(target != NULL); LOG(THREAD, LOG_MONITOR, 3, "recreate_fragment_ilist: replacing ibl_routine to target=" PFX "\n", target); instr_set_target(last, opnd_create_pc(target)); } if (DYNAMO_OPTION(pad_jmps) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, bb _IF_DEBUG(true)); } if (instrlist_last(ilist) != NULL) { fixup_last_cti(dcontext, ilist, (app_pc)apc, flags, f->flags, NULL, NULL, true /* record translation */, NULL, NULL, NULL); } } instrlist_append(ilist, instrlist_first(bb)); instrlist_init(bb); /* to clear fields to make destroy happy */ instrlist_destroy(dcontext, bb); } #ifdef CLIENT_INTERFACE /* PR 214962: re-apply client changes, this time storing translation * info for modified instrs */ if (call_client) /* else it's decode_trace() who is not really recreating */ instrument_trace(dcontext, f->tag, ilist, true); /* instrument_trace checks that all non-meta instrs have translation fields */ #endif if (mangle) { if (mangle_at_end) { if (!mangle_trace(dcontext, ilist, &md)) { instrlist_clear_and_destroy(dcontext, ilist); ilist = NULL; goto recreate_fragment_done; } } /* else we mangled one bb at a time up above */ #ifdef INTERNAL /* we only optimize traces */ if (dynamo_options.optimize) { /* re-apply all optimizations to ilist * assumption: all optimizations are deterministic and stateless, * so we can exactly replicate their results */ LOG(THREAD_GET, LOG_INTERP, 2, "\tre-applying optimizations to F%d\n", f->id); # ifdef SIDELINE if (dynamo_options.sideline) { if (!TEST(FRAG_DO_NOT_SIDELINE, f->flags)) optimize_trace(dcontext, f->tag, ilist); /* else, never optimized */ } else # endif optimize_trace(dcontext, f->tag, ilist); } #endif /* FIXME: case 4718 append_trace_speculate_last_ibl(true) * should be called as well */ if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); } } recreate_fragment_done: if (md.blk_info != NULL) { uint i; for (i = 0; i < md.num_blks; i++) { vm_area_destroy_list(dcontext, md.blk_info[i].vmlist); md.blk_info[i].vmlist = NULL; } HEAP_ARRAY_FREE(dcontext, md.blk_info, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); } if (alloc_res != NULL) *alloc_res = alloc; if (f_res == NULL && alloc) fragment_free(dcontext, f); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); return ilist; } /*** TRACE BUILDING ROUTINES *****************************************************/ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)) { if (PAD_FRAGMENT_JMPS(flags) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, ilist _IF_DEBUG(recreating)); } } #ifdef CUSTOM_EXIT_STUBS /* * Builds custom exit stub instrlist for exit_cti, whose stub is l * Assumes that intra-fragment cti's in the custom stub only target other * instructions in the same stub, never in the body of the fragment or * in other stubs. FIXME: is this too restrictive? If change this, * change the comment in instr_set_exit_stub_code's declaration. */ static void regenerate_custom_exit_stub(dcontext_t *dcontext, instr_t *exit_cti, linkstub_t *l, fragment_t *f) { /* need to decode and restore custom stub instrlist */ byte *cspc = EXIT_STUB_PC(dcontext, f, l); byte *stop = EXIT_FIXED_STUB_PC(dcontext, f, l); instr_t *in, *cti; instrlist_t intra_ctis; instrlist_t *cil = instrlist_create(dcontext); cache_pc start_pc = FCACHE_ENTRY_PC(f); ASSERT(DYNAMO_OPTION(indirect_stubs)); if (l->fixed_stub_offset == 0) return; /* has no custom exit stub */ LOG(THREAD, LOG_INTERP, 3, "in regenerate_custom_exit_stub\n"); instrlist_init(&intra_ctis); while (cspc < stop) { in = instr_create(dcontext); cspc = decode(dcontext, cspc, in); ASSERT(cspc != NULL); /* our own code! */ if (instr_is_cti(in)) { if (!instr_is_return(in) && opnd_is_near_pc(instr_get_target(in)) && (opnd_get_pc(instr_get_target(in)) < start_pc || opnd_get_pc(instr_get_target(in)) > start_pc + f->size)) { d_r_loginst(dcontext, 3, in, "\tcti has off-fragment target"); /* indicate that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(in); instr_set_raw_bits_valid(in, false); } else if (opnd_is_near_pc(instr_get_target(in))) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, in); /* HACK: use note field! */ instr_set_note(clone, (void *)in); instrlist_append(&intra_ctis, clone); } } instrlist_append(cil, in); } /* must fix up intra-ilist cti's to have instr_t targets * assumption: they only target other instrs in custom stub * FIXME: allow targeting other instrs? */ for (in = instrlist_first(cil); in != NULL; in = instr_get_next(in)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { if (opnd_get_pc(instr_get_target(cti)) == instr_get_raw_bits(in)) { /* cti targets this instr */ instr_t *real_cti = (instr_t *)instr_get_note(cti); /* Do not preserve raw bits just in case instrlist changes * and the instr target moves (xref PR 333691) */ instr_set_target(real_cti, opnd_create_instr(in)); d_r_loginst(dcontext, 3, real_cti, "\tthis cti: "); d_r_loginst(dcontext, 3, in, "\t targets intra-stub instr"); break; } } } instrlist_clear(dcontext, &intra_ctis); instr_set_exit_stub_code(exit_cti, cil); } #endif /* Combines instrlist_preinsert to ilist and the size calculation of the addition */ static inline int tracelist_add(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; #if defined(X86) && defined(X64) if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } #endif size = instr_length(dcontext, inst); instrlist_preinsert(ilist, where, inst); return size; } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* Combines instrlist_postinsert to ilist and the size calculation of the addition */ static inline int tracelist_add_after(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; # ifdef X64 if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } # endif size = instr_length(dcontext, inst); instrlist_postinsert(ilist, where, inst); return size; } #endif /* X86 */ #ifdef HASHTABLE_STATISTICS /* increments a given counter - assuming XCX/R2 is dead */ int insert_increment_stat_counter(dcontext_t *dcontext, instrlist_t *trace, instr_t *next, uint *counter_address) { int added_size = 0; /* incrementing a branch-type specific thread private counter */ opnd_t private_branchtype_counter = OPND_CREATE_ABSMEM(counter_address, OPSZ_4); /* using LEA to avoid clobbering eflags in a simple load-increment-store */ /*>>> movl counter, %ecx */ /*>>> lea 1(%ecx), %ecx */ /*>>> movl %ecx, counter */ /* x64: the counter is still 32 bits */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), private_branchtype_counter)); added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_add(dcontext, opnd_create_reg(SCRATCH_REG2), OPND_CREATE_INT8(1))); added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_store(dcontext, private_branchtype_counter, opnd_create_reg(SCRATCH_REG2))); return added_size; } #endif /* HASHTABLE_STATISTICS */ /* inserts proper instruction(s) to restore XCX spilled on indirect branch mangling * assumes target instrlist is a trace! * returns size to be added to trace */ static inline int insert_restore_spilled_xcx(dcontext_t *dcontext, instrlist_t *trace, instr_t *next) { int added_size = 0; if (DYNAMO_OPTION(private_ib_in_tls)) { #ifdef X86 if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && IF_X64_ELSE(DYNAMO_OPTION(x86_to_x64_ibl_opt), false)) { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R9))); } else #endif { added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load( dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)))); } } else { /* We need to restore XCX from TLS for shared fragments, but from * mcontext for private fragments, and all traces are private */ added_size += tracelist_add(dcontext, trace, next, instr_create_restore_from_dcontext( dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS)); } return added_size; } bool instr_is_trace_cmp(dcontext_t *dcontext, instr_t *inst) { if (!instr_is_our_mangling(inst)) return false; #ifdef X86 return # ifdef X64 instr_get_opcode(inst) == OP_mov_imm || /* mov %rax -> xbx-tls-spill-slot */ instr_get_opcode(inst) == OP_mov_st || instr_get_opcode(inst) == OP_lahf || instr_get_opcode(inst) == OP_seto || instr_get_opcode(inst) == OP_cmp || instr_get_opcode(inst) == OP_jnz || instr_get_opcode(inst) == OP_add || instr_get_opcode(inst) == OP_sahf # else instr_get_opcode(inst) == OP_lea || instr_get_opcode(inst) == OP_jecxz || instr_get_opcode(inst) == OP_jmp # endif ; #elif defined(AARCHXX) /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(DYNAMO_OPTION(disable_traces)); return false; #endif } /* 32-bit only: inserts a comparison to speculative_tag with no side effect and * if value is matched continue target is assumed to be immediately * after targeter (which must be < 127 bytes away). * returns size to be added to trace */ static int insert_transparent_comparison(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, /* exit CTI */ app_pc speculative_tag) { int added_size = 0; #ifdef X86 instr_t *jecxz; instr_t *continue_label = INSTR_CREATE_label(dcontext); /* instead of: * cmp ecx,const * we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * * we have to use the landing pad b/c we don't know whether the * stub will be <128 away */ /* lea requires OPSZ_lea operand */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp(REG_ECX, REG_NULL, 0, -((int)(ptr_int_t)speculative_tag), OPSZ_lea))); jecxz = INSTR_CREATE_jecxz(dcontext, opnd_create_instr(continue_label)); /* do not treat jecxz as exit cti! */ instr_set_meta(jecxz); added_size += tracelist_add(dcontext, trace, targeter, jecxz); /* need to recover address in ecx */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp( REG_ECX, REG_NULL, 0, ((int)(ptr_int_t)speculative_tag), OPSZ_lea))); added_size += tracelist_add_after(dcontext, trace, targeter, continue_label); #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif return added_size; } #if defined(X86) && defined(X64) static int mangle_x64_ib_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag) { int added_size = 0; if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } else { ASSERT(X64_CACHE_MODE_DC(dcontext)); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_R8), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_R10), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } /* saving in the trace and restoring in ibl means that * -unsafe_ignore_eflags_{trace,ibl} must be equivalent */ if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)), opnd_create_reg(REG_XAX))); } /* FIXME: share w/ insert_save_eflags() */ added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lahf(dcontext)); if (!INTERNAL_OPTION(unsafe_ignore_overflow)) { /* OF needs saving */ /* Move OF flags into the OF flag spill slot. */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL))); } if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp( dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R10))); } } else { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_R10))); } /* change jmp into jne to trace cmp entry of ibl routine (special entry * that is after the eflags save) */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ ASSERT(opnd_is_pc(instr_get_target(targeter))); instr_set_target(targeter, opnd_create_pc(get_trace_cmp_entry( dcontext, opnd_get_pc(instr_get_target(targeter))))); /* since the target gets lost we need to OR in this flag */ instr_exit_branch_set_type(targeter, instr_exit_branch_type(targeter) | INSTR_TRACE_CMP_EXIT); return added_size; } #endif /* Mangles an indirect branch in a trace where a basic block with tag "tag" * is being added as the next block beyond the indirect branch. * Returns the size of instructions added to trace. */ static int mangle_indirect_branch_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag, uint next_flags, instr_t **delete_after /*OUT*/, instr_t *end_instr) { int added_size = 0; #ifdef X86 instr_t *next = instr_get_next(targeter); /* all indirect branches should be ubrs */ ASSERT(instr_is_ubr(targeter)); /* expecting basic blocks only */ ASSERT((end_instr != NULL && targeter == end_instr) || targeter == instrlist_last(trace)); ASSERT(delete_after != NULL); *delete_after = (next == NULL || (end_instr != NULL && targeter == end_instr)) ? NULL : instr_get_prev(next); STATS_INC(trace_ib_cmp); /* Change jump to indirect_branch_lookup to a conditional jump * based on indirect target not equaling next block in trace * * the bb has already done: * spill xcx to xcx-tls-spill-slot * mov curtarget, xcx * <any other side effects of ind branch, like ret xsp adjust> * * and we now want to accomplish: * cmp ecx,const * * on 32-bit we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * restore ecx * we have to use the landing pad b/c we don't know whether the * stub will be <128 away * * on 64-bit we use (PR 245832): * mov xax, xax-tls-spill-slot * mov $staytarget, xax * if !INTERNAL_OPTION(unsafe_ignore_eflags_{trace,ibl}) * mov xax, xbx-tls-spill-slot * lahf * seto al * cmp xcx, xbx-tls-spill-slot * else * cmp xcx, xax * jne exit * if xcx live: * mov xcx-tls-spill-slot, xcx * if flags live && unsafe options not on: * add 7f, al * sahf * if xax live: * mov xax-tls-spill-slot, xax */ # ifdef CUSTOM_TRACES_RET_REMOVAL IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* try to remove ret * FIXME: also handle ret imm => prev instr is add */ inst = instr_get_prev(targeter); if (dcontext->call_depth >= 0 && instr_raw_bits_valid(inst)) { byte *b = inst->bytes + inst->length - 1; /* 0x40538115 89 0d ec 68 06 40 mov %ecx -> 0x400668ec 0x4053811b 59 pop %esp (%esp) -> %ecx %esp 0x4053811c 83 c4 04 add $0x04 %esp -> %esp */ LOG(THREAD, LOG_MONITOR, 4, "ret removal: *b=0x%x, prev=" PFX ", dcontext=" PFX ", 0x%x\n", *b, *((int *)(b - 4)), dcontext, XCX_OFFSET); if ((*b == 0x59 && *((int *)(b - 4)) == ((uint)dcontext) + XCX_OFFSET) || (*(b - 3) == 0x59 && *((int *)(b - 7)) == ((uint)dcontext) + XCX_OFFSET && *(b - 2) == 0x83 && *(b - 1) == 0xc4)) { uint esp_add; /* already added calls & rets to call depth * if not negative, the call for this ret is earlier in this trace! */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removing ret!\n"); /* delete save ecx and pop */ if (*b == 0x59) { instr_set_raw_bits(inst, inst->bytes, inst->length - 7); esp_add = 4; } else { /* delete add too */ instr_set_raw_bits(inst, inst->bytes, inst->length - 10); esp_add = 4 + (uint)(*b); LOG(THREAD, LOG_MONITOR, 4, "*b=0x%x, esp_add=%d\n", *b, esp_add); } # ifdef DEBUG num_rets_removed++; # endif removed_ret = true; added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ESP), opnd_create_base_disp(REG_ESP, REG_NULL, 0, esp_add, OPSZ_lea))); } } if (removed_ret) { *delete_after = instr_get_prev(targeter); return added_size; } # endif /* CUSTOM_TRACES_RET_REMOVAL */ # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { added_size += mangle_x64_ib_in_trace(dcontext, trace, targeter, next_tag); } else { # endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { /* if equal follow to the next instruction after the exit CTI */ added_size += insert_transparent_comparison(dcontext, trace, targeter, next_tag); /* leave jmp as it is, a jmp to exit stub (thence to ind br * lookup) */ } else { /* assume eflags don't need to be saved across ind branches, * so go ahead and use cmp, jne */ /* FIXME: no way to cmp w/ 64-bit immed */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_ECX), OPND_CREATE_INT32((int)(ptr_int_t)next_tag))); /* Change jmp into jne indirect_branch_lookup */ /* CHECK: is that also going to exit stub */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ } # ifdef X64 } # endif /* X64 */ /* PR 214962: our spill restoration needs this whole sequence marked mangle */ instr_set_our_mangling(targeter, true); LOG(THREAD, LOG_MONITOR, 3, "fixup_last_cti: added cmp vs. " PFX " for ind br\n", next_tag); # ifdef HASHTABLE_STATISTICS /* If we do stay on the trace, increment a counter using dead XCX */ if (INTERNAL_OPTION(stay_on_trace_stats)) { ibl_type_t ibl_type; /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(bool ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(targeter)), &ibl_type); ASSERT(ok); added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_stay_on_trace_stat); } # endif /* HASHTABLE_STATISTICS */ /* If we do stay on the trace, must restore xcx * TODO optimization: check if xcx is live or not in next bb */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { LOG(THREAD, LOG_INTERP, 4, "next_flags for post-ibl-cmp: 0x%x\n", next_flags); if (!TEST(FRAG_WRITES_EFLAGS_6, next_flags) && !INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (!TEST(FRAG_WRITES_EFLAGS_OF, next_flags) && /* OF was saved */ !INTERNAL_OPTION(unsafe_ignore_overflow)) { /* restore OF using add that overflows if OF was on when we did seto */ added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f))); } added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_sahf(dcontext)); } else STATS_INC(trace_ib_no_flag_restore); /* TODO optimization: check if xax is live or not in next bb */ if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, next, INSTR_CREATE_mov_ld( dcontext, opnd_create_reg(REG_XAX), opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_R8))); } } # endif #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return added_size; } /* This routine handles the mangling of the cti at the end of the * previous block when adding a new block (f) to the trace fragment. * If prev_l is not NULL, matches the ordinal of prev_l to the nth * exit cti in the trace instrlist_t. * * If prev_l is NULL: WARNING: this routine assumes that the previous * block can only have a single indirect branch -- otherwise there is * no way to determine which indirect exit targeted the new block! No * assumptions are made about direct exits -- we can walk through them * all to find the one that targeted the new block. * * Returns an upper bound on the size added to the trace with inserted * instructions. * If we change this to add a substantial # of instrs, should update * TRACE_CTI_MANGLE_SIZE_UPPER_BOUND (assert at bottom should notify us) * * If you want to re-add the ability to add the front end of a trace, * revive the now-removed CUSTOM_TRACES_ADD_TRACE define from the attic. */ static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr) { app_pc target_tag; instr_t *inst, *targeter = NULL; /* at end of routine we will delete all instrs after this one: */ instr_t *delete_after = NULL; bool is_indirect = false; /* Added size for transformations done here. * Use tracelist_add to automate adding inserted instr sizes. */ int added_size = 0; uint exits_deleted = 0; /* count exit stubs to get the ordinal of the exit that targeted us * start at prev_l, and count up extraneous exits and blks until end */ uint nth_exit = 0, cur_exit; #ifdef CUSTOM_TRACES_RET_REMOVAL bool removed_ret = false; #endif bool have_ordinal = false; if (prev_l != NULL && prev_l == get_deleted_linkstub(dcontext)) { int last_ordinal = get_last_linkstub_ordinal(dcontext); if (last_ordinal != -1) { nth_exit = last_ordinal; have_ordinal = true; } } if (!have_ordinal && prev_l != NULL && !LINKSTUB_FAKE(prev_l)) { linkstub_t *stub = FRAGMENT_EXIT_STUBS(prev_f); while (stub != prev_l) stub = LINKSTUB_NEXT_EXIT(stub); /* if prev_l is cbr followed by ubr, we'll get 1 for ubr, * but we want 0, so we count prev_l itself, then decrement */ stub = LINKSTUB_NEXT_EXIT(stub); while (stub != NULL) { nth_exit++; stub = LINKSTUB_NEXT_EXIT(stub); } } /* else, we assume it's the final exit */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: looking for %d-th exit cti from bottom\n", nth_exit); if (start_instr != NULL) { ASSERT(end_instr != NULL); } else { start_instr = instrlist_first(trace); end_instr = instrlist_last(trace); } start_instr = instr_get_prev(start_instr); /* get open-ended bound */ cur_exit = nth_exit; /* now match the ordinal to the instrs. * we don't have any way to find boundary with previous-previous block * to make sure we didn't go backwards too far -- does it matter? */ for (inst = end_instr; inst != NULL && inst != start_instr; inst = instr_get_prev(inst)) { if (instr_is_exit_cti(inst)) { if (cur_exit == 0) { ibl_type_t ibl_type; /* exit cti is guaranteed to have pc target */ target_tag = opnd_get_pc(instr_get_target(inst)); is_indirect = get_ibl_routine_type(dcontext, target_tag, &ibl_type); if (is_indirect) { /* this should be a trace exit stub therefore it cannot be IBL_BB* */ ASSERT(IS_IBL_TRACE(ibl_type.source_fragment_type)); targeter = inst; break; } else { if (prev_l != NULL) { /* direct jmp, better point to us */ ASSERT(target_tag == next_tag); targeter = inst; break; } else { /* need to search for targeting jmp */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, inst, "exit==targeter?"); }); LOG(THREAD, LOG_MONITOR, 4, "target_tag = " PFX ", next_tag = " PFX "\n", target_tag, next_tag); if (target_tag == next_tag) { targeter = inst; break; } } } } else if (prev_l != NULL) { LOG(THREAD, LOG_MONITOR, 4, "counting backwards: %d == target_tag = " PFX "\n", cur_exit, opnd_get_pc(instr_get_target(inst))); cur_exit--; } } /* is exit cti */ } ASSERT(targeter != NULL); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(targeter)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, targeter, "\ttargeter"); }); if (is_indirect) { added_size += mangle_indirect_branch_in_trace( dcontext, trace, targeter, next_tag, next_flags, &delete_after, end_instr); } else { /* direct jump or conditional branch */ instr_t *next = targeter->next; if (instr_is_cbr(targeter)) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: inverted logic of cbr\n"); if (next != NULL && instr_is_ubr(next)) { /* cbr followed by ubr: if cbr got us here, reverse cbr and * remove ubr */ instr_invert_cbr(targeter); instr_set_target(targeter, instr_get_target(next)); ASSERT(next == end_instr); delete_after = targeter; LOG(THREAD, LOG_MONITOR, 4, "\tremoved ubr following cbr\n"); } else { ASSERT_NOT_REACHED(); } } else if (instr_is_ubr(targeter)) { #ifndef CUSTOM_TRACES ASSERT(targeter == end_instr); #endif /* remove unnecessary ubr at end of block */ delete_after = instr_get_prev(targeter); if (delete_after != NULL) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removed ubr\n"); } } else ASSERT_NOT_REACHED(); } /* remove all instrs after this cti -- but what if internal * control flow jumps ahead and then comes back? * too expensive to check for such all the time. * FIXME: what to do? * * ifdef CUSTOM_TRACES: * FIXME: rather than adding entire trace on and then chopping off where * we exited, why not add after we know where to stop? */ if (delete_after != NULL) { ASSERT(delete_after != end_instr); delete_after = instr_get_next(delete_after); while (delete_after != NULL) { inst = delete_after; if (delete_after == end_instr) delete_after = NULL; else delete_after = instr_get_next(delete_after); if (instr_is_exit_cti(inst)) { /* assumption: passing in cache target to exit_stub_size works * just as well as linkstub_t target, since only cares whether * targeting ibl */ app_pc target = opnd_get_pc(instr_get_target(inst)); /* we already added all the stub size differences to the trace, * so we subtract the trace size of the stub here */ added_size -= local_exit_stub_size(dcontext, target, trace_flags); exits_deleted++; } else if (instr_opcode_valid(inst) && instr_is_cti(inst)) { LOG(THREAD, LOG_MONITOR, 3, "WARNING: deleting non-exit cti in unused tail of frag added to " "trace\n"); } d_r_loginst(dcontext, 4, inst, "\tdeleting"); instrlist_remove(trace, inst); added_size -= instr_length(dcontext, inst); instr_destroy(dcontext, inst); } } if (num_exits_deleted != NULL) *num_exits_deleted = exits_deleted; if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ #if defined(X86) && defined(X64) DOCHECK(1, { if (FRAG_IS_32(trace_flags)) { instr_t *in; /* in case we missed any in tracelist_add() */ for (in = instrlist_first(trace); in != NULL; in = instr_get_next(in)) { if (instr_is_our_mangling(in)) ASSERT(instr_get_x86_mode(in)); } } }); #endif ASSERT(added_size < TRACE_CTI_MANGLE_SIZE_UPPER_BOUND); return added_size; } /* Add a speculative counter on last IBL exit * Returns additional size to add to trace estimate. */ int append_trace_speculate_last_ibl(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag, bool record_translation) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before last CTI */ instr_t *next = instr_get_next(inst); DEBUG_DECLARE(bool ok;) ASSERT(speculate_next_tag != NULL); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(inst)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ STATS_INC(num_traces_end_at_ibl_speculative_link); #ifdef HASHTABLE_STATISTICS DOSTATS({ if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(SCRATCH_REG2))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* preinsert comparison before exit CTI, but increment of success * statistics after it */ /* we need to compare to speculate_next_tag now */ /* XCX holds value to match */ /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* * 8d 89 76 9b bf ff lea -tag(%ecx) -> %ecx * e3 0b jecxz continue * 8d 89 8a 64 40 00 lea tag(%ecx) -> %ecx * e9 17 00 00 00 jmp <exit stub 1: IBL> * * continue: * <increment stats> * # see FIXME whether to go to prefix or do here * <restore app ecx> * e9 cc aa dd 00 jmp speculate_next_tag * */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); #ifdef HASHTABLE_STATISTICS DOSTATS({ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); /* XCX already saved */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore XCX to app IB target*/ added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* adding a new CTI for speculative target that is a pseudo * direct exit. Although we could have used the indirect stub * to be the unlinked path, with a new CTI way we can unlink a * speculated fragment without affecting any other targets * reached by the IBL. Also in general we could decide to add * multiple speculative comparisons and to chain them we'd * need new CTIs for them. */ /* Ensure all register state is properly preserved on both linked * and unlinked paths - currently only XCX is in use. * * * Preferably we should be targeting prefix of target to * save some space for recovering XCX from hot path. We'd * restore XCX in the exit stub when unlinked. * So it would act like a direct CTI when linked and like indirect * when unlinked. It could just be an unlinked indirect stub, if * we haven't modified any other registers or flags. * * For simplicity, we currently restore XCX here and use a plain * direct exit stub that goes to target start_pc instead of * prefixes. * * FIXME: (case 5085) the problem with the current scheme is that * when we exit unlinked the source will be marked as a DIRECT * exit - therefore no security policies will be enforced. * * FIXME: (case 4718) should add speculated target to current list * in case of RCT policy that needs to be invalidated if target is * flushed */ /* must restore xcx to app value, FIXME: see above for doing this in prefix+stub */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); /* add a new direct exit stub */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_jump(dcontext, opnd_create_pc(speculate_next_tag))); LOG(THREAD, LOG_INTERP, 3, "append_trace_speculate_last_ibl: added cmp vs. " PFX " for ind br\n", speculate_next_tag); if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ return added_size; } #ifdef HASHTABLE_STATISTICS /* Add a counter on last IBL exit * if speculate_next_tag is not NULL then check case 4817's possible success */ /* FIXME: remove this routine once append_trace_speculate_last_ibl() * currently useful only to see statistics without side effects of * adding exit stub */ int append_ib_trace_last_ibl_exit_stat(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before exit CTI */ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); DEBUG_DECLARE(bool ok;) /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ ok = get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(reg))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); if (speculate_next_tag != NULL) { instr_t *next = instr_get_next(inst); reg_id_t reg = IF_X86_ELSE(REG_ECX, DR_REG_R2); /* preinsert comparison before exit CTI, but increment goes after it */ /* we need to compare to speculate_next_tag now - just like * fixup_last_cti() would do later. */ /* ECX holds value to match here */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ /* continue: * increment success counter * jmp targeter * * FIXME: now the last instruction is no longer the exit_cti - see if that * breaks any assumptions, using a short jump to see if anyone erroneously * uses this */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); /* we'll kill again although ECX restored unnecessarily by comparison routine */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore ECX */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); /* jmp where */ added_size += tracelist_add(dcontext, trace, next, IF_X86_ELSE(INSTR_CREATE_jmp_short, XINST_CREATE_jump)( dcontext, opnd_create_instr(where))); } return added_size; } #endif /* HASHTABLE_STATISTICS */ /* Add the fragment f to the end of the trace instrlist_t kept in dcontext * * Note that recreate_fragment_ilist() is making assumptions about its operation * synchronize changes * * Returns the size change in the trace from mangling the previous block * (assumes the caller has already calculated the size from adding the new block) */ uint extend_trace(dcontext_t *dcontext, fragment_t *f, linkstub_t *prev_l) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; fragment_t *prev_f = NULL; instrlist_t *trace = &(md->trace); instrlist_t *ilist; uint size; uint prev_mangle_size = 0; uint num_exits_deleted = 0; uint new_exits_dir = 0, new_exits_indir = 0; #ifdef X64 ASSERT((!!FRAG_IS_32(md->trace_flags) == !X64_MODE_DC(dcontext)) || (!FRAG_IS_32(md->trace_flags) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64))); #endif STATS_INC(num_traces_extended); /* if you want to re-add the ability to add traces, revive * CUSTOM_TRACES_ADD_TRACE from the attic */ ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); /* expecting block fragments */ if (prev_l != NULL) { ASSERT(!LINKSTUB_FAKE(prev_l) || /* we track the ordinal of the del linkstub so it's ok */ prev_l == get_deleted_linkstub(dcontext)); prev_f = linkstub_fragment(dcontext, prev_l); LOG(THREAD, LOG_MONITOR, 4, "prev_l = owned by F%d, branch pc " PFX "\n", prev_f->id, EXIT_CTI_PC(prev_f, prev_l)); } else { LOG(THREAD, LOG_MONITOR, 4, "prev_l is NULL\n"); } /* insert code to optimize last branch based on new fragment */ if (instrlist_last(trace) != NULL) { prev_mangle_size = fixup_last_cti(dcontext, trace, f->tag, f->flags, md->trace_flags, prev_f, prev_l, false, &num_exits_deleted, NULL, NULL); } #ifdef CUSTOM_TRACES_RET_REMOVAL /* add now, want fixup to operate on depth before adding new blk */ dcontext->call_depth += f->num_calls; dcontext->call_depth -= f->num_rets; #endif LOG(THREAD, LOG_MONITOR, 4, "\tadding block %d == " PFX "\n", md->num_blks, f->tag); size = md->trace_buf_size - md->trace_buf_top; LOG(THREAD, LOG_MONITOR, 4, "decoding F%d into trace buf @" PFX " + 0x%x = " PFX "\n", f->id, md->trace_buf, md->trace_buf_top, md->trace_buf + md->trace_buf_top); /* FIXME: PR 307388: if md->pass_to_client, much of this is a waste of time as * we're going to re-mangle and re-fixup after passing our unmangled list to the * client. We do want to keep the size estimate, which requires having the last * cti at least, so for now we keep all the work. Of course the size estimate is * less valuable when the client may add a ton of instrumentation. */ /* decode_fragment will convert f's ibl routines into those appropriate for * our trace, whether f and the trace are shared or private */ ilist = decode_fragment(dcontext, f, md->trace_buf + md->trace_buf_top, &size, md->trace_flags, &new_exits_dir, &new_exits_indir); md->blk_info[md->num_blks].info.tag = f->tag; #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) if (md->num_blks > 0) md->blk_info[md->num_blks - 1].info.num_exits -= num_exits_deleted; md->blk_info[md->num_blks].info.num_exits = new_exits_dir + new_exits_indir; #endif md->num_blks++; /* We need to remove any nops we added for -pad_jmps (we don't expect there * to be any in a bb if -pad_jmps_shift_bb) to avoid screwing up * fixup_last_cti etc. */ process_nops_for_trace(dcontext, ilist, f->flags _IF_DEBUG(false /*!recreating*/)); DOLOG(5, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 5, "post-trace-ibl-fixup, ilist is:\n"); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ASSERT(!instrlist_get_our_mangling(ilist)); instrlist_append(trace, instrlist_first(ilist)); instrlist_init(ilist); /* clear fields so destroy won't kill instrs on trace list */ instrlist_destroy(dcontext, ilist); md->trace_buf_top += size; ASSERT(md->trace_buf_top < md->trace_buf_size); LOG(THREAD, LOG_MONITOR, 4, "post-extend_trace, trace buf + 0x%x => " PFX "\n", md->trace_buf_top, md->trace_buf); DOLOG(4, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 4, "\nafter extending trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, trace, THREAD); }); return prev_mangle_size; } /* If branch_type is 0, sets it to the type of a ubr */ static instr_t * create_exit_jmp(dcontext_t *dcontext, app_pc target, app_pc translation, uint branch_type) { instr_t *jmp = XINST_CREATE_jump(dcontext, opnd_create_pc(target)); instr_set_translation(jmp, translation); if (branch_type == 0) instr_exit_branch_set_type(jmp, instr_branch_type(jmp)); else instr_exit_branch_set_type(jmp, branch_type); instr_set_our_mangling(jmp, true); return jmp; } /* Given an ilist with no mangling or stitching together, this routine does those * things. This is used both for CLIENT_INTERFACE and for recreating traces * for state translation. * It assumes the ilist abides by client rules: single-mbr bbs, no * changes in source app code. Else, it returns false. * Elision is supported. * * Our docs disallow removal of an entire block, changing inter-block ctis, and * changing the ordering of the blocks, which is what allows us to correctly * mangle the inter-block ctis here. * * Reads the following fields from md: * - trace_tag * - trace_flags * - num_blks * - blk_info * - final_exit_flags */ bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md) { instr_t *inst, *next_inst, *start_instr, *jmp; uint blk, num_exits_deleted; app_pc fallthrough = NULL; bool found_syscall = false, found_int = false; #ifdef CLIENT_INTERFACE /* We don't assert that mangle_trace_at_end() is true b/c the client * can unregister its bb and trace hooks if it really wants to, * though we discourage it. */ ASSERT(md->pass_to_client); #endif LOG(THREAD, LOG_MONITOR, 2, "mangle_trace " PFX "\n", md->trace_tag); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "ilist passed to mangle_trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We make 3 passes. * 1st walk: find bb boundaries */ blk = 0; for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { app_pc xl8 = instr_get_translation(inst); next_inst = instr_get_next(inst); if (instr_is_meta(inst)) continue; DOLOG(5, LOG_INTERP, { LOG(THREAD, LOG_MONITOR, 4, "transl " PFX " ", xl8); d_r_loginst(dcontext, 4, inst, "considering non-meta"); }); /* Skip blocks that don't end in ctis (except final) */ while (blk < md->num_blks - 1 && !md->blk_info[blk].final_cti) { LOG(THREAD, LOG_MONITOR, 4, "skipping fall-through bb #%d\n", blk); md->blk_info[blk].end_instr = NULL; blk++; } #ifdef CLIENT_INTERFACE /* Ensure non-ignorable syscall/int2b terminates trace */ if (md->pass_to_client && !client_check_syscall(ilist, inst, &found_syscall, &found_int)) return false; /* Clients should not add new source code regions, which would mess us up * here, as well as mess up our cache consistency (both page prot and * selfmod). */ if (md->pass_to_client && (!vm_list_overlaps(dcontext, md->blk_info[blk].vmlist, xl8, xl8 + 1) && !(instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && xl8 == opnd_get_pc(instr_get_target(inst)))) IF_WINDOWS(&&!vmvector_overlap(landing_pad_areas, md->blk_info[blk].info.tag, md->blk_info[blk].info.tag + 1))) { LOG(THREAD, LOG_MONITOR, 2, "trace error: out-of-bounds transl " PFX " vs block w/ start " PFX "\n", xl8, md->blk_info[blk].info.tag); CLIENT_ASSERT(false, "trace's app sources (instr_set_translation() targets) " "must remain within original bounds"); return false; } #endif /* in case no exit ctis in last block, find last non-meta fall-through */ if (blk == md->num_blks - 1) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ fallthrough = decode_next_pc(dcontext, xl8); } /* PR 299808: identify bb boundaries. We can't go by translations alone, as * ubrs can point at their targets and theoretically the entire trace could * be ubrs: so we have to go by exits, and limit what the client can do. We * can assume that each bb should not violate the bb callback rules (PR * 215217): if has cbr or mbr, that must end bb. If it has a call, that * could be elided; if not, its target should match the start of the next * block. We also want to * impose the can't-be-trace rules (PR 215219), which are not documented for * bbs: if more than one exit cti or if code beyond last exit cti then can't * be in a trace. We can soften a little and allow extra ubrs if they do not * target the subsequent block. FIXME: we could have stricter translation * reqts for ubrs: make them point at corresponding app ubr (but what if * really correspond to app cbr?): then can handle code past exit ubr. */ if (instr_will_be_exit_cti(inst) && ((!instr_is_ubr(inst) && !instr_is_near_call_direct(inst)) || (inst == instrlist_last(ilist) || (blk + 1 < md->num_blks && /* client is disallowed from changing bb exits and sequencing in trace * hook; if they change in bb for_trace, will be reflected here. */ opnd_get_pc(instr_get_target(inst)) == md->blk_info[blk + 1].info.tag)))) { DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, inst, "end of bb"); }); /* Add jump that fixup_last_cti expects */ if (!instr_is_ubr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { app_pc target; if (instr_is_mbr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { target = get_ibl_routine( dcontext, get_ibl_entry_type(instr_branch_type(inst)), DEFAULT_IBL_TRACE(), get_ibl_branch_type(inst)); } else if (instr_is_cbr(inst)) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ target = decode_next_pc(dcontext, xl8); } else { target = opnd_get_pc(instr_get_target(inst)); } ASSERT(target != NULL); jmp = create_exit_jmp(dcontext, target, xl8, instr_branch_type(inst)); instrlist_postinsert(ilist, inst, jmp); /* we're now done w/ vmlist: switch to end instr. * d_r_mangle() shouldn't remove the exit cti. */ vm_area_destroy_list(dcontext, md->blk_info[blk].vmlist); md->blk_info[blk].vmlist = NULL; md->blk_info[blk].end_instr = jmp; } else md->blk_info[blk].end_instr = inst; blk++; DOLOG(4, LOG_INTERP, { if (blk < md->num_blks) { LOG(THREAD, LOG_MONITOR, 4, "starting next bb " PFX "\n", md->blk_info[blk].info.tag); } }); if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: too many exits"); return false; } } #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) /* PR 306761: we need to re-calculate md->blk_info[blk].info.num_exits, * and then adjust after fixup_last_cti. */ if (instr_will_be_exit_cti(inst)) md->blk_info[blk].info.num_exits++; #endif } if (blk < md->num_blks) { ASSERT(!instr_is_ubr(instrlist_last(ilist))); if (blk + 1 < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: too few exits"); return false; } /* must have been no final exit cti: add final fall-through jmp */ jmp = create_exit_jmp(dcontext, fallthrough, fallthrough, 0); /* FIXME PR 307284: support client modifying, replacing, or adding * syscalls and ints: need to re-analyze. Then we wouldn't * need the md->final_exit_flags field anymore. * For now we disallow. */ if (found_syscall || found_int) { instr_exit_branch_set_type(jmp, md->final_exit_flags); #ifdef WINDOWS /* For INSTR_SHARED_SYSCALL, we set it pre-mangling, and it * survives to here if the instr is not clobbered, * and does not come from md->final_exit_flags */ if (TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { instr_set_target(jmp, opnd_create_pc(shared_syscall_routine(dcontext))); instr_set_our_mangling(jmp, true); /* undone by target set */ } /* FIXME: test for linux too, but allowing ignorable syscalls */ if (!TESTANY(LINK_NI_SYSCALL_ALL IF_WINDOWS(| LINK_CALLBACK_RETURN), md->final_exit_flags) && !TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { CLIENT_ASSERT(false, "client modified or added a syscall or int: unsupported"); return false; } #endif } instrlist_append(ilist, jmp); md->blk_info[blk].end_instr = jmp; } else { CLIENT_ASSERT((!found_syscall && !found_int) /* On linux we allow ignorable syscalls in middle. * FIXME PR 307284: see notes above. */ IF_UNIX(|| !TEST(LINK_NI_SYSCALL, md->final_exit_flags)), "client changed exit target where unsupported\n" "check if trace ends in a syscall or int"); } ASSERT(instr_is_ubr(instrlist_last(ilist))); if (found_syscall) md->trace_flags |= FRAG_HAS_SYSCALL; else md->trace_flags &= ~FRAG_HAS_SYSCALL; /* 2nd walk: mangle */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist before mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We do not need to remove nops since we never emitted */ d_r_mangle(dcontext, ilist, &md->trace_flags, true /*mangle calls*/, /* we're post-client so we don't need translations unless storing */ TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags)); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist after mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* 3rd walk: stitch together delineated bbs */ for (blk = 0; blk < md->num_blks && md->blk_info[blk].end_instr == NULL; blk++) ; /* nothing */ start_instr = instrlist_first(ilist); for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); if (inst == md->blk_info[blk].end_instr) { /* Chain exit to point to next bb */ if (blk + 1 < md->num_blks) { /* We must do proper analysis so that state translation matches * created traces in whether eflags are restored post-cmp */ uint next_flags = forward_eflags_analysis(dcontext, ilist, instr_get_next(inst)); next_flags = instr_eflags_to_fragment_eflags(next_flags); LOG(THREAD, LOG_INTERP, 4, "next_flags for fixup_last_cti: 0x%x\n", next_flags); fixup_last_cti(dcontext, ilist, md->blk_info[blk + 1].info.tag, next_flags, md->trace_flags, NULL, NULL, TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags), &num_exits_deleted, /* Only walk ilist between these instrs */ start_instr, inst); #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) md->blk_info[blk].info.num_exits -= num_exits_deleted; #endif } blk++; /* skip fall-throughs */ while (blk < md->num_blks && md->blk_info[blk].end_instr == NULL) blk++; if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: exits modified"); return false; } start_instr = next_inst; } } if (blk < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: cannot find all exits"); return false; } return true; } /**************************************************************************** * UTILITIES */ /* Converts instr_t EFLAGS_ flags to corresponding fragment_t FRAG_ flags, * assuming that the instr_t flags correspond to the start of the fragment_t. * Assumes instr_eflags has already accounted for predication. */ uint instr_eflags_to_fragment_eflags(uint instr_eflags) { uint frag_eflags = 0; #ifdef X86 if (instr_eflags == EFLAGS_WRITE_OF) { /* this fragment writes OF before reading it * May still read other flags before writing them. */ frag_eflags |= FRAG_WRITES_EFLAGS_OF; return frag_eflags; } #endif if (instr_eflags == EFLAGS_WRITE_ARITH) { /* fragment writes all 6 prior to reading */ frag_eflags |= FRAG_WRITES_EFLAGS_ARITH; #ifdef X86 frag_eflags |= FRAG_WRITES_EFLAGS_OF; #endif } return frag_eflags; } /* Returns one of these flags, defined in instr.h: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-only) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information before 1st cti */ uint forward_eflags_analysis(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_t *in; uint eflags_6 = 0; /* holds flags written so far (in read slots) */ int eflags_result = 0; for (in = instr; in != NULL; in = instr_get_next_expanded(dcontext, ilist, in)) { if (!instr_valid(in) || instr_is_cti(in)) { /* give up */ break; } if (eflags_result != EFLAGS_WRITE_ARITH IF_X86(&&eflags_result != EFLAGS_READ_OF)) eflags_result = eflags_analysis(in, eflags_result, &eflags_6); DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, in, "forward_eflags_analysis"); LOG(THREAD, LOG_INTERP, 4, "\tinstr %x => %x\n", instr_get_eflags(in, DR_QUERY_DEFAULT), eflags_result); }); } return eflags_result; } /* This translates f's code into an instrlist_t and returns it. * If buf is NULL: * The Instrs returned point into f's raw bits, so encode them * before you delete f! * Else, f's raw bits are copied into buf, and *bufsz is modified to * contain the total bytes copied * FIXME: should have release build checks and not just asserts where * we rely on caller to have big-enough buffer? * If target_flags differ from f->flags in sharing and/or in trace-ness, * converts ibl and tls usage in f to match the desired target_flags. * FIXME: converting from private to shared tls is not yet * implemented: we rely on -private_ib_in_tls for adding normal * private bbs to shared traces, and disallow any extensive mangling * (native_exec, selfmod) from becoming shared traces. * The caller is responsible for destroying the instrlist and its instrs. * If the fragment ends in an elided jmp, a new jmp instr is created, though * its bits field is NULL, allowing the caller to set it to do-not-emit if * trying to exactly duplicate or calculate the size, though most callers * will want to emit that jmp. See decode_fragment_exact(). */ static void instr_set_raw_bits_trace_buf(instr_t *instr, byte *buf_writable_addr, uint length) { /* The trace buffer is a writable address, so we need to translate to an * executable address for pointing at bits. */ instr_set_raw_bits(instr, vmcode_get_executable_addr(buf_writable_addr), length); } /* We want to avoid low-loglevel disassembly when we're in the middle of disassembly */ #define DF_LOGLEVEL(dc) (((dc) != GLOBAL_DCONTEXT && (dc)->in_opnd_disassemble) ? 6U : 4U) instrlist_t * decode_fragment(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { linkstub_t *l; cache_pc start_pc, stop_pc, pc, prev_pc = NULL, raw_start_pc; instr_t *instr, *cti = NULL, *raw_instr; instrlist_t *ilist = instrlist_create(dcontext); byte *top_buf = NULL, *cur_buf = NULL; app_pc target_tag; uint num_bytes, offset; uint num_dir = 0, num_indir = 0; bool tls_to_dc; bool shared_to_private = TEST(FRAG_SHARED, f->flags) && !TEST(FRAG_SHARED, target_flags); #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && TEST(FRAG_HAS_SYSCALL, f->flags); #endif instrlist_t intra_ctis; coarse_info_t *info = NULL; bool coarse_elided_ubrs = false; dr_isa_mode_t old_mode; /* for decoding and get_ibl routines we need the dcontext mode set */ bool ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); /* i#1494: Decoding a code fragment from code cache, decode_fragment * may mess up the 32-bit/64-bit mode in -x86_to_x64 because 32-bit * application code is encoded as 64-bit code fragments into the code cache. * Thus we currently do not support using decode_fragment with -x86_to_x64, * including trace and coarse_units (coarse-grain code cache management) */ IF_X86_64(ASSERT(!DYNAMO_OPTION(x86_to_x64))); instrlist_init(&intra_ctis); /* Now we need to go through f and make cti's for each of its exit cti's and * non-exit cti's with off-fragment targets that need to be re-pc-relativized. * The rest of the instructions can be lumped into raw instructions. */ start_pc = FCACHE_ENTRY_PC(f); pc = start_pc; raw_start_pc = start_pc; if (buf != NULL) { cur_buf = buf; top_buf = cur_buf; ASSERT(bufsz != NULL); } /* Handle code after last exit but before stubs by allowing l to be NULL. * Handle coarse-grain fake fragment_t by discovering exits as we go, with * l being NULL the whole time. */ if (TEST(FRAG_FAKE, f->flags)) { ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); info = get_fragment_coarse_info(f); ASSERT(info != NULL); coarse_elided_ubrs = (info->persisted && TEST(PERSCACHE_ELIDED_UBR, info->flags)) || (!info->persisted && DYNAMO_OPTION(coarse_freeze_elide_ubr)); /* Assumption: coarse-grain fragments have no ctis w/ off-fragment targets * that are not exit ctis */ l = NULL; } else l = FRAGMENT_EXIT_STUBS(f); while (true) { uint l_flags; cti = NULL; if (l != NULL) { stop_pc = EXIT_CTI_PC(f, l); } else if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f */ stop_pc = (cache_pc)UNIVERSAL_REGION_END; } else { /* fake fragment_t, or code between last exit but before stubs or padding */ stop_pc = fragment_body_end_pc(dcontext, f); if (PAD_FRAGMENT_JMPS(f->flags) && stop_pc != raw_start_pc) { /* We need to adjust stop_pc to account for any padding, only * way any code could get here is via client interface, * and there really is no nice way to distinguish it * from any padding we added. * PR 213005: we do not support decode_fragment() for bbs * that have code added beyond the last exit cti (we turn * off FRAG_COARSE_GRAIN and set FRAG_CANNOT_BE_TRACE). * Sanity check, make sure it at least looks like there is no * code here */ ASSERT(IS_SET_TO_DEBUG(raw_start_pc, stop_pc - raw_start_pc)); stop_pc = raw_start_pc; } } IF_X64(ASSERT(TEST(FRAG_FAKE, f->flags) /* no copy made */ || CHECK_TRUNCATE_TYPE_uint((stop_pc - raw_start_pc)))); num_bytes = (uint)(stop_pc - raw_start_pc); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decoding fragment from " PFX " to " PFX "\n", raw_start_pc, stop_pc); if (num_bytes > 0) { if (buf != NULL) { if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f, so we copy later, though * we do point instrs into buf before we copy! */ } else { /* first copy entire sequence up to exit cti into buf * so we don't have to copy it in pieces if we find cti's, if we don't * find any we want one giant piece anyway */ ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); /* cur_buf is incremented later -- it always points to start * of raw bytes for next-to-add-to-ilist instr, while * top_buf points to top of copied-to-buf data */ } } else { /* point at bits in code cache */ cur_buf = raw_start_pc; } /* now, we can't make a single raw instr for all that, there may * be calls with off-fragment targets in there that need to be * re-pc-relativized (instrumentation, etc. insert calls), or * we may not even know where the exit ctis are (coarse-grain fragments), * so walk through (original bytes!) and decode, looking for cti's */ instr = instr_create(dcontext); pc = raw_start_pc; /* do we have to translate the store of xcx from tls to dcontext? * be careful -- there can be private bbs w/ indirect branches, so * must see if this is a shared fragment we're adding */ tls_to_dc = (shared_to_private && !DYNAMO_OPTION(private_ib_in_tls) && /* if l==NULL (coarse src) we'll check for xcx every time */ (l == NULL || LINKSTUB_INDIRECT(l->flags))); do { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode() just * below */ #endif /* For frozen coarse fragments, ubr eliding forces us to check * every instr for a potential next fragment start. This is * expensive so users are advised to decode from app code if * possible (case 9325 -- need exact re-mangle + re-instrument), * though -coarse_pclookup_table helps. */ if (info != NULL && info->frozen && coarse_elided_ubrs && pc != start_pc) { /* case 6532: check for ib stubs as we elide the jmp there too */ bool stop = false; if (coarse_is_indirect_stub(pc)) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit ib stub @" PFX "\n", pc); } else { app_pc tag = fragment_coarse_entry_pclookup(dcontext, info, pc); if (tag != NULL) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit frozen tgt: " PFX "." PFX "\n", tag, pc); } } if (stop) { /* Add the ubr ourselves */ ASSERT(cti == NULL); cti = XINST_CREATE_jump(dcontext, opnd_create_pc(pc)); /* It's up to the caller to decide whether to mark this * as do-not-emit or not */ /* Process as an exit cti */ stop_pc = pc; pc = stop_pc; break; } } instr_reset(dcontext, instr); prev_pc = pc; pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, instr); #ifdef WINDOWS /* Perform fixups for ignorable syscalls on XP & 2003. */ if (possible_ignorable_sysenter && instr_opcode_valid(instr) && instr_is_syscall(instr)) { /* We want to find the instr preceding the sysenter and have * it point to the post-sysenter instr in the trace, rather than * remain pointing to the post-sysenter instr in the BB. */ instr_t *sysenter_prev; instr_t *sysenter_post; ASSERT(prev_decode_pc != NULL); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: sysenter found @" PFX "\n", instr_get_raw_bits(instr)); /* create single raw instr for instructions up to the * sysenter EXCEPT for the immediately preceding instruction */ offset = (int)(prev_decode_pc - raw_start_pc); ASSERT(offset > 0); raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; /* Get the "mov" instr just before the sysenter. We know that * it's there because mangle put it there, so we can safely * decode at prev_decode_pc. */ sysenter_prev = instr_create(dcontext); decode(dcontext, prev_decode_pc, sysenter_prev); ASSERT(instr_valid(instr) && instr_is_mov_imm_to_tos(sysenter_prev)); instrlist_append(ilist, sysenter_prev); cur_buf += instr_length(dcontext, sysenter_prev); /* Append the sysenter. */ instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); instrlist_append(ilist, instr); instr_set_meta(instr); /* skip current instr -- the sysenter */ cur_buf += (int)(pc - prev_pc); /* Decode the next instr -- the one after the sysenter. */ sysenter_post = instr_create(dcontext); prev_decode_pc = pc; prev_pc = pc; pc = decode(dcontext, pc, sysenter_post); if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) ASSERT(!instr_is_cti(sysenter_post)); raw_start_pc = pc; /* skip the post-sysenter instr */ cur_buf += (int)(pc - prev_pc); instrlist_append(ilist, sysenter_post); /* Point the pre-sysenter mov to the post-sysenter instr. */ instr_set_src(sysenter_prev, 0, opnd_create_instr(sysenter_post)); instr_set_meta(sysenter_prev); instr_set_meta(sysenter_post); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Post-sysenter -- F%d (" PFX ") into:\n", f->id, f->tag); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); /* Set all local state so that we can fall-thru and correctly * process the post-sysenter instruction. Point instr to the * already decoded instruction, sysenter_post. At this point, * pc and raw_start_pc point to just after sysenter_post, * prev_pc points to sysenter_post, prev_decode_pc points to * the sysenter itself, and cur_buf points to post_sysenter. */ instr = sysenter_post; } #endif /* look for a cti with an off-fragment target */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { bool separate_cti = false; bool re_relativize = false; bool intra_target = true; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "decode_fragment: found non-exit cti"); }); if (TEST(FRAG_FAKE, f->flags)) { /* Case 8711: we don't know the size so we can't even * distinguish off-fragment from intra-fragment targets. * Thus we have to assume that any cti is an exit cti, and * make all fragments for which that is not true into * fine-grained. * Except that we want to support intra-fragment ctis for * clients (i#665), so we use some heuristics. */ if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Pull in the two short jmps for a "short-rewrite" instr. * We must do this before asking whether it's an * intra-fragment so we don't just look at the * first part of the sequence. */ pc = remangle_short_rewrite(dcontext, instr, prev_pc, 0 /*same target*/); } if (!coarse_cti_is_intra_fragment(dcontext, info, instr, start_pc)) { /* Process this cti as an exit cti. FIXME: we will then * re-copy the raw bytes from this cti to the end of the * fragment at the top of the next loop iter, but for * coarse-grain bbs that should be just one instr for cbr bbs * or none for others, so not worth doing anything about. */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse exit cti"); }); intra_target = false; stop_pc = prev_pc; pc = stop_pc; break; } else { /* we'll make it to intra_target if() below */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse intra-fragment cti"); }); } } else if (instr_is_return(instr) || !opnd_is_near_pc(instr_get_target(instr))) { /* just leave it undecoded */ intra_target = false; } else if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Cti-short should only occur as exit ctis, which are * separated out unless we're decoding a fake fragment. We * include this case for future use, as otherwise we'll * decode just the short cti and think it is an * intra-fragment cti. */ ASSERT_NOT_REACHED(); separate_cti = true; re_relativize = true; intra_target = false; } else if (opnd_get_pc(instr_get_target(instr)) < start_pc || opnd_get_pc(instr_get_target(instr)) > start_pc + f->size) { separate_cti = true; re_relativize = true; intra_target = false; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has off-fragment target"); }); } if (intra_target) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, instr); /* HACK: use note field! */ instr_set_note(clone, (void *)instr); /* we leave the clone pointing at valid original raw bits */ instrlist_append(&intra_ctis, clone); /* intra-fragment target */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has intra-fragment target"); }); /* since the resulting instrlist could be manipulated, * we need to change the target operand from pc to instr_t. * that requires having this instr separated out now so * our clone-in-note-field hack above works. */ separate_cti = true; re_relativize = false; } if (separate_cti) { /* create single raw instr for instructions up to the cti */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append cti, indicating that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(instr); if (re_relativize) instr_set_raw_bits_valid(instr, false); else if (!instr_is_cti_short_rewrite(instr, NULL)) { instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); } instrlist_append(ilist, instr); /* include buf for off-fragment cti, to simplify assert below */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } } /* is cti */ /* instr_is_tls_xcx_spill won't upgrade from level 1 */ else if (tls_to_dc && instr_is_tls_xcx_spill(instr)) { /* shouldn't get here for x64, where everything uses tls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "mangling xcx save from tls to dcontext\n"); /* create single raw instr for instructions up to the xcx save */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append our new xcx save */ instrlist_append(ilist, instr_create_save_to_dcontext( dcontext, IF_X86_ELSE(REG_XCX, DR_REG_R2), IF_X86_ELSE(XCX_OFFSET, R2_OFFSET))); /* make sure skip current instr */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; } #if defined(X86) && defined(X64) else if (instr_has_rel_addr_reference(instr)) { /* We need to re-relativize, which is done automatically only for * level 1 instrs (PR 251479), and only when raw bits point to * their original location. We assume that all the if statements * above end up creating a high-level instr, so a cti w/ a * rip-rel operand is already covered. */ /* create single raw instr for instructions up to this one */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* should be valid right now since pointing at original bits */ ASSERT(instr_rip_rel_valid(instr)); if (buf != NULL) { /* re-relativize into the new buffer */ DEBUG_DECLARE(byte *nxt =) instr_encode_to_copy(dcontext, instr, cur_buf, vmcode_get_executable_addr(cur_buf)); instr_set_raw_bits_trace_buf(instr, vmcode_get_executable_addr(cur_buf), (int)(pc - prev_pc)); instr_set_rip_rel_valid(instr, true); ASSERT(nxt != NULL); } instrlist_append(ilist, instr); cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } #endif } while (pc < stop_pc); DODEBUG({ if (pc != stop_pc) { LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "PC " PFX ", stop_pc " PFX "\n", pc, stop_pc); } }); ASSERT(pc == stop_pc); /* create single raw instr for rest of instructions up to exit cti */ if (pc > raw_start_pc) { instr_reset(dcontext, instr); /* point to buffer bits */ offset = (int)(pc - raw_start_pc); if (offset > 0) { instr_set_raw_bits_trace_buf(instr, cur_buf, offset); instrlist_append(ilist, instr); cur_buf += offset; } if (buf != NULL && TEST(FRAG_FAKE, f->flags)) { /* Now that we know the size we can copy into buf. * We have been incrementing cur_buf all along, though * we didn't have contents there. */ ASSERT(top_buf < cur_buf); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((cur_buf - top_buf)))); num_bytes = (uint)(cur_buf - top_buf); ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); } ASSERT(buf == NULL || cur_buf == top_buf); } else { /* will reach here if had a processed instr (off-fragment target, etc.) * immediately prior to exit cti, so now don't need instr -- an * example (in absence of clients) is trampoline to interception code */ instr_destroy(dcontext, instr); } } if (l == NULL && !TEST(FRAG_FAKE, f->flags)) break; /* decode the exit branch */ if (cti != NULL) { /* already created */ instr = cti; ASSERT(info != NULL && info->frozen && instr_is_ubr(instr)); raw_start_pc = pc; } else { instr = instr_create(dcontext); raw_start_pc = decode(dcontext, stop_pc, instr); ASSERT(raw_start_pc != NULL); /* our own code! */ /* pc now points into fragment! */ } ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* replace fcache target with target_tag and add to fragment */ if (l == NULL) { app_pc instr_tgt; /* Ensure we get proper target for short cti sequence */ if (instr_is_cti_short_rewrite(instr, stop_pc)) remangle_short_rewrite(dcontext, instr, stop_pc, 0 /*same target*/); instr_tgt = opnd_get_pc(instr_get_target(instr)); ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); if (cti == NULL && coarse_is_entrance_stub(instr_tgt)) { target_tag = entrance_stub_target_tag(instr_tgt, info); l_flags = LINK_DIRECT; /* FIXME; try to get LINK_JMP vs LINK_CALL vs fall-through? */ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tstub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else if (instr_tgt == raw_start_pc /*target next instr*/ /* could optimize by not checking for stub if * coarse_elided_ubrs but we need to know whether ALL * ubrs were elided, which we don't know as normally * entire-bb-ubrs are not elided (case 9677). * plus now that we elide jmp-to-ib-stub we must check. */ && coarse_is_indirect_stub(instr_tgt)) { ibl_type_t ibl_type; DEBUG_DECLARE(bool is_ibl;) target_tag = coarse_indirect_stub_jmp_target(instr_tgt); l_flags = LINK_INDIRECT; DEBUG_DECLARE(is_ibl =) get_ibl_routine_type_ex(dcontext, target_tag, &ibl_type _IF_X86_64(NULL)); ASSERT(is_ibl); l_flags |= ibltype_to_linktype(ibl_type.branch_type); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tind stub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else { target_tag = fragment_coarse_entry_pclookup(dcontext, info, instr_tgt); /* Only frozen units don't jump through stubs */ ASSERT(info != NULL && info->frozen); ASSERT(target_tag != NULL); l_flags = LINK_DIRECT; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tfrozen tgt: " PFX "." PFX "\n", target_tag, instr_tgt); } } else { target_tag = EXIT_TARGET_TAG(dcontext, f, l); l_flags = l->flags; } if (LINKSTUB_DIRECT(l_flags)) num_dir++; else num_indir++; ASSERT(target_tag != NULL); if (instr_is_cti_short_rewrite(instr, stop_pc)) { raw_start_pc = remangle_short_rewrite(dcontext, instr, stop_pc, target_tag); } else { app_pc new_target = target_tag; /* don't point to fcache bits */ instr_set_raw_bits_valid(instr, false); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "decode_fragment exit_cti: pc=" PFX " l->target_tag=" PFX " l->flags=0x%x\n", stop_pc, target_tag, l_flags); /* need to propagate exit branch type flags, * instr_t flag copied from old fragment linkstub * TODO: when ibl targets are different this won't be necessary */ instr_exit_branch_set_type(instr, linkstub_propagatable_flags(l_flags)); /* convert to proper ibl */ if (is_indirect_branch_lookup_routine(dcontext, target_tag)) { DEBUG_DECLARE(app_pc old_target = new_target;) new_target = get_alternate_ibl_routine(dcontext, target_tag, target_flags); ASSERT(new_target != NULL); /* for stats on traces, we assume if target_flags contains * FRAG_IS_TRACE then we are extending a trace */ DODEBUG({ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "%s: %s ibl_routine " PFX " with %s_target=" PFX "\n", TEST(FRAG_IS_TRACE, target_flags) ? "extend_trace" : "decode_fragment", new_target == old_target ? "maintaining" : "replacing", old_target, new_target == old_target ? "old" : "new", new_target); STATS_INC(num_traces_ibl_extended); }); #ifdef WINDOWS DOSTATS({ if (TEST(FRAG_IS_TRACE, target_flags) && old_target == shared_syscall_routine(dcontext)) STATS_INC(num_traces_shared_syscall_extended); }); #endif } instr_set_target(instr, opnd_create_pc(new_target)); if (instr_is_cti_short(instr)) { /* make sure non-mangled short ctis, which are generated by * us and never left there from apps, are not marked as exit ctis */ instr_set_meta(instr); } } instrlist_append(ilist, instr); #ifdef CUSTOM_EXIT_STUBS if (l != NULL && l->fixed_stub_offset > 0) regenerate_custom_exit_stub(dcontext, instr, l, f); #endif if (TEST(FRAG_FAKE, f->flags)) { /* Assumption: coarse-grain bbs have 1 ind exit or 2 direct, * and no code beyond the last exit! Of course frozen bbs * can have their final jmp elided, which we handle above. */ if (instr_is_ubr(instr)) { break; } } if (l != NULL) /* if NULL keep going: discovering exits as we go */ l = LINKSTUB_NEXT_EXIT(l); } /* end while(true) loop through exit stubs */ /* now fix up intra-trace cti targets */ if (instrlist_first(&intra_ctis) != NULL) { /* We have to undo all of our level 0 blocks by expanding. * Any instrs that need re-relativization should already be * separate, so this should not affect rip-rel instrs. */ int offs = 0; for (instr = instrlist_first_expanded(dcontext, ilist); instr != NULL; instr = instr_get_next_expanded(dcontext, ilist, instr)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { /* The clone we put in intra_ctis has raw bits equal to the * original bits, so its target will be in original fragment body. * We can't rely on the raw bits of the new instrs (since the * non-level-0 ones may have allocated raw bits) so we * calculate a running offset as we go. */ if (opnd_get_pc(instr_get_target(cti)) - start_pc == offs) { /* cti targets this instr */ instr_t *real_cti = (instr_t *)instr_get_note(cti); /* PR 333691: do not preserve raw bits of real_cti, since * instrlist may change (e.g., inserted nops). Must re-encode * once instrlist is finalized. */ instr_set_target(real_cti, opnd_create_instr(instr)); DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, real_cti, "\tre-set intra-fragment target"); }); break; } } offs += instr_length(dcontext, instr); } } instrlist_clear(dcontext, &intra_ctis); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Decoded F%d (" PFX "." PFX ") into:\n", f->id, f->tag, FCACHE_ENTRY_PC(f)); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); if (dir_exits != NULL) *dir_exits = num_dir; if (indir_exits != NULL) *indir_exits = num_indir; if (buf != NULL) { IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((top_buf - buf)))); *bufsz = (uint)(top_buf - buf); } return ilist; } #undef DF_LOGLEVEL /* Just like decode_fragment() but marks any instrs missing in the cache * as do-not-emit */ instrlist_t * decode_fragment_exact(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { instrlist_t *ilist = decode_fragment(dcontext, f, buf, bufsz, target_flags, dir_exits, indir_exits); /* If the final jmp was elided we do NOT want to count it in the size! */ if (instr_get_raw_bits(instrlist_last(ilist)) == NULL) { instr_set_ok_to_emit(instrlist_last(ilist), false); } return ilist; } /* Makes a new copy of fragment f * If replace is true, * removes f from the fcache and adds the new copy in its place * Else * creates f as an invisible fragment (caller is responsible for linking * the new fragment!) */ fragment_t * copy_fragment(dcontext_t *dcontext, fragment_t *f, bool replace) { instrlist_t *trace = instrlist_create(dcontext); instr_t *instr; uint *trace_buf; int trace_buf_top; /* index of next free location in trace_buf */ linkstub_t *l; byte *p; cache_pc start_pc; int num_bytes; fragment_t *new_f; void *vmlist = NULL; app_pc target_tag; DEBUG_DECLARE(bool ok;) trace_buf = heap_alloc(dcontext, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); start_pc = FCACHE_ENTRY_PC(f); trace_buf_top = 0; p = ((byte *)trace_buf) + trace_buf_top; IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* must re-relativize when copying! */ for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) { /* Copy the instruction bytes up to (but not including) the first * control-transfer instruction. ***WARNING*** This code assumes * that the first link stub corresponds to the first exit branch * in the body. */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((EXIT_CTI_PC(f, l) - start_pc)))); num_bytes = (uint)(EXIT_CTI_PC(f, l) - start_pc); if (num_bytes > 0) { memcpy(p, (byte *)start_pc, num_bytes); trace_buf_top += num_bytes; start_pc += num_bytes; /* build a mongo instruction corresponding to the copied instructions */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_append(trace, instr); } /* decode the exit branch */ instr = instr_create(dcontext); p = decode(dcontext, (byte *)EXIT_CTI_PC(f, l), instr); ASSERT(p != NULL); /* our own code! */ /* p now points into fragment! */ ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* Replace cache_pc target with target_tag and add to trace. For * an indirect branch, the target_tag is zero. */ target_tag = EXIT_TARGET_TAG(dcontext, f, l); ASSERT(target_tag); if (instr_is_cti_short_rewrite(instr, EXIT_CTI_PC(f, l))) { p = remangle_short_rewrite(dcontext, instr, EXIT_CTI_PC(f, l), target_tag); } else { /* no short ctis that aren't mangled should be exit ctis */ ASSERT(!instr_is_cti_short(instr)); instr_set_target(instr, opnd_create_pc(target_tag)); } instrlist_append(trace, instr); start_pc += (p - (byte *)EXIT_CTI_PC(f, l)); } /* emit as invisible fragment */ /* We don't support shared fragments, where vm_area_add_to_list can fail */ ASSERT_NOT_IMPLEMENTED(!TEST(FRAG_SHARED, f->flags)); DEBUG_DECLARE(ok =) vm_area_add_to_list(dcontext, f->tag, &vmlist, f->flags, f, false /*no locks*/); ASSERT(ok); /* should never fail for private fragments */ new_f = emit_invisible_fragment(dcontext, f->tag, trace, f->flags, vmlist); if (replace) { /* link and replace old fragment */ shift_links_to_new_fragment(dcontext, f, new_f); fragment_replace(dcontext, f, new_f); } else { /* caller is responsible for linking new fragment */ } ASSERT(new_f->flags == f->flags); fragment_copy_data_fields(dcontext, f, new_f); #ifdef DEBUG if (d_r_stats->loglevel > 1) { LOG(THREAD, LOG_ALL, 2, "Copying F%d to F%d\n", f->id, new_f->id); disassemble_fragment(dcontext, f, d_r_stats->loglevel < 3); disassemble_fragment(dcontext, new_f, d_r_stats->loglevel < 3); } #endif /* DEBUG */ heap_free(dcontext, trace_buf, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, trace); if (replace) { fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE); STATS_INC(num_fragments_deleted_copy_and_replace); } return new_f; } /* Used when the code cache is enlarged by copying to a larger space, * and all of the relative ctis that target outside the cache need * to be shifted. Additionally, sysenter-related patching for ignore-syscalls * on XP/2003 is performed here, as the absolute code cache address pushed * onto the stack must be updated. * Assumption: old code cache has been copied to TOP of new cache, so to * detect for ctis targeting outside of old cache can look at new cache * start plus old cache size. */ void shift_ctis_in_fragment(dcontext_t *dcontext, fragment_t *f, ssize_t shift, cache_pc fcache_start, cache_pc fcache_end, size_t old_size) { cache_pc pc, prev_pc = NULL; cache_pc start_pc = FCACHE_ENTRY_PC(f); cache_pc stop_pc = fragment_stubs_end_pc(f); /* get what would have been end of cache if just shifted not resized */ cache_pc fcache_old_end = fcache_start + old_size; #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && /* FIXME Traces don't have FRAG_HAS_SYSCALL set so we can't filter on * that flag for all fragments. */ (TEST(FRAG_HAS_SYSCALL, f->flags) || TEST(FRAG_IS_TRACE, f->flags)); #endif instr_t instr; instr_init(dcontext, &instr); pc = start_pc; while (pc < stop_pc) { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode_cti() just * below */ #endif prev_pc = pc; instr_reset(dcontext, &instr); pc = (cache_pc)decode_cti(dcontext, (byte *)pc, &instr); #ifdef WINDOWS /* Perform fixups for sysenter instrs when ignorable syscalls is used on * XP & 2003. These are not cache-external fixups, but it's convenient & * efficient to perform them here since decode_cti() is called on every * instruction, allowing identification of sysenters without additional * decoding. */ if (possible_ignorable_sysenter && instr_opcode_valid(&instr) && instr_is_syscall(&instr)) { cache_pc next_pc; app_pc target; DEBUG_DECLARE(app_pc old_target;) DEBUG_DECLARE(cache_pc encode_nxt;) /* Peek up to find the "mov $post-sysenter -> (%xsp)" */ instr_reset(dcontext, &instr); next_pc = decode(dcontext, prev_decode_pc, &instr); ASSERT(next_pc == prev_pc); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov found @" PFX "\n", instr_get_raw_bits(&instr)); ASSERT(instr_is_mov_imm_to_tos(&instr)); target = instr_get_raw_bits(&instr) + instr_length(dcontext, &instr) + (pc - prev_pc); DODEBUG(old_target = (app_pc)opnd_get_immed_int(instr_get_src(&instr, 0));); /* PR 253943: we don't support sysenter in x64 */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */ instr_set_src(&instr, 0, opnd_create_immed_int((ptr_int_t)target, OPSZ_4)); ASSERT(old_target + shift == target); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov now pts to @" PFX "\n", target); DEBUG_DECLARE(encode_nxt =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_decode_pc), prev_decode_pc); /* must not change size! */ ASSERT(encode_nxt != NULL && vmcode_get_executable_addr(encode_nxt) == next_pc); } /* The following 'if' won't get executed since a sysenter isn't * a CTI instr, so we don't need an else. We do need to take care * that any 'else' clauses are added after the 'if' won't trigger * on a sysenter either. */ #endif /* look for a pc-relative cti (including exit ctis) w/ out-of-cache * target (anything in-cache is fine, the whole cache was moved) */ if (instr_is_cti(&instr) && /* only ret, ret_far, and iret don't have targets, and * we really shouldn't see them, except possibly if they * are inserted through instrumentation, so go ahead and * check num srcs */ instr_num_srcs(&instr) > 0 && opnd_is_near_pc(instr_get_target(&instr))) { app_pc target = opnd_get_pc(instr_get_target(&instr)); if (target < fcache_start || target > fcache_old_end) { DEBUG_DECLARE(byte * nxt_pc;) /* re-encode instr w/ new pc-relative target */ instr_set_raw_bits_valid(&instr, false); instr_set_target(&instr, opnd_create_pc(target - shift)); DEBUG_DECLARE(nxt_pc =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_pc), prev_pc); /* must not change size! */ ASSERT(nxt_pc != NULL && vmcode_get_executable_addr(nxt_pc) == pc); #ifdef DEBUG if ((d_r_stats->logmask & LOG_CACHE) != 0) { d_r_loginst( dcontext, 5, &instr, "shift_ctis_in_fragment: found cti w/ out-of-cache target"); } #endif } } } instr_free(dcontext, &instr); } #ifdef PROFILE_RDTSC /* Add profile call to front of the trace in dc * Must call finalize_profile_call and pass it the fragment_t* * once the trace is turned into a fragment to fix up a few profile * call instructions. */ void add_profile_call(dcontext_t *dcontext) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; instrlist_t *trace = &(md->trace); byte *p = ((byte *)md->trace_buf) + md->trace_buf_top; instr_t *instr; uint num_bytes = profile_call_size(); ASSERT(num_bytes + md->trace_buf_top < md->trace_buf_size); insert_profile_call((cache_pc)p); /* use one giant BINARY instruction to hold everything, * to keep dynamo from interpreting the cti instructions as real ones */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_prepend(trace, instr); md->trace_buf_top += num_bytes; } #endif /* emulates the effects of the instruction at pc with the state in mcontext * limited right now to only mov instructions * returns NULL if failed or not yet implemented, else returns the pc of the next instr. */ app_pc d_r_emulate(dcontext_t *dcontext, app_pc pc, priv_mcontext_t *mc) { instr_t instr; app_pc next_pc = NULL; uint opc; instr_init(dcontext, &instr); next_pc = decode(dcontext, pc, &instr); if (!instr_valid(&instr)) { next_pc = NULL; goto emulate_failure; } DOLOG(2, LOG_INTERP, { d_r_loginst(dcontext, 2, &instr, "emulating"); }); opc = instr_get_opcode(&instr); if (opc == OP_store) { opnd_t src = instr_get_src(&instr, 0); opnd_t dst = instr_get_dst(&instr, 0); reg_t *target; reg_t val; uint sz = opnd_size_in_bytes(opnd_get_size(dst)); ASSERT(opnd_is_memory_reference(dst)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } target = (reg_t *)opnd_compute_address_priv(dst, mc); if (opnd_is_reg(src)) { val = reg_get_value_priv(opnd_get_reg(src), mc); } else if (opnd_is_immed_int(src)) { val = (reg_t)opnd_get_immed_int(src); } else { next_pc = NULL; goto emulate_failure; } DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating store by writing " PFX " to " PFX "\n", val, target); if (sz == 4) *((int *)target) = (int)val; #ifdef X64 else if (sz == 8) *target = val; #endif } else if (opc == IF_X86_ELSE(OP_inc, OP_add) || opc == IF_X86_ELSE(OP_dec, OP_sub)) { opnd_t src = instr_get_src(&instr, 0); reg_t *target; uint sz = opnd_size_in_bytes(opnd_get_size(src)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } /* FIXME: handle changing register value */ ASSERT(opnd_is_memory_reference(src)); /* FIXME: change these to take in priv_mcontext_t* ? */ target = (reg_t *)opnd_compute_address_priv(src, mc); DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating %s to " PFX "\n", opc == IF_X86_ELSE(OP_inc, OP_add) ? "inc" : "dec", target); if (sz == 4) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*((int *)target))++; else (*((int *)target))--; } #ifdef X64 else if (sz == 8) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*target)++; else (*target)--; } #endif } emulate_failure: instr_free(dcontext, &instr); return next_pc; }
1
17,805
This should be inside the set routine IMHO: matches the others; all callers need it; simplifies code here.
DynamoRIO-dynamorio
c
@@ -106,6 +106,8 @@ public class TwoPhaseCommitter { /** unit is second */ private static final long DEFAULT_BATCH_WRITE_LOCK_TTL = 3000; + private static final long MAX_RETRY_LEVEL = 3; + private static final Logger LOG = LoggerFactory.getLogger(TwoPhaseCommitter.class); private TxnKVClient kvClient;
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv; import com.google.protobuf.ByteString; import com.pingcap.tikv.codec.KeyUtils; import com.pingcap.tikv.exception.GrpcException; import com.pingcap.tikv.exception.TiBatchWriteException; import com.pingcap.tikv.region.RegionManager; import com.pingcap.tikv.region.TiRegion; import com.pingcap.tikv.txn.TxnKVClient; import com.pingcap.tikv.txn.type.BatchKeys; import com.pingcap.tikv.txn.type.ClientRPCResult; import com.pingcap.tikv.txn.type.GroupKeyResult; import com.pingcap.tikv.util.BackOffFunction; import com.pingcap.tikv.util.BackOffer; import com.pingcap.tikv.util.ConcreteBackOffer; import com.pingcap.tikv.util.Pair; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.tikv.kvproto.Kvrpcpb; import org.tikv.kvproto.Metapb; public class TwoPhaseCommitter { public static class ByteWrapper { private byte[] bytes; public ByteWrapper(byte[] bytes) { this.bytes = bytes; } public byte[] getBytes() { return this.bytes; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ByteWrapper that = (ByteWrapper) o; return Arrays.equals(bytes, that.bytes); } @Override public int hashCode() { return Arrays.hashCode(bytes); } } public static class BytePairWrapper { private byte[] key; private byte[] value; public BytePairWrapper(byte[] key, byte[] value) { this.key = key; this.value = value; } public byte[] getKey() { return key; } public byte[] getValue() { return value; } } /** buffer spark rdd iterator data into memory */ private static final int WRITE_BUFFER_SIZE = 32 * 1024; /** * TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's Key+Value size * below 768KB. */ private static final int TXN_COMMIT_BATCH_SIZE = 768 * 1024; /** unit is second */ private static final long DEFAULT_BATCH_WRITE_LOCK_TTL = 3000; private static final Logger LOG = LoggerFactory.getLogger(TwoPhaseCommitter.class); private TxnKVClient kvClient; private RegionManager regionManager; /** start timestamp of transaction which get from PD */ private final long startTs; public TwoPhaseCommitter(TxnKVClient kvClient, long startTime) { this.kvClient = kvClient; this.regionManager = kvClient.getRegionManager(); this.startTs = startTime; } public void close() throws Exception {} /** * 2pc - prewrite primary key * * @param backOffer * @param primaryKey * @param value * @return */ public void prewritePrimaryKey(BackOffer backOffer, byte[] primaryKey, byte[] value) throws TiBatchWriteException { this.doPrewritePrimaryKeyWithRetry( backOffer, ByteString.copyFrom(primaryKey), ByteString.copyFrom(value)); } private void doPrewritePrimaryKeyWithRetry(BackOffer backOffer, ByteString key, ByteString value) throws TiBatchWriteException { Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; Kvrpcpb.Mutation mutation; if (!value.isEmpty()) { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setValue(value).setOp(Kvrpcpb.Op.Put).build(); } else { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setOp(Kvrpcpb.Op.Put).build(); } List<Kvrpcpb.Mutation> mutationList = Collections.singletonList(mutation); // send rpc request to tikv server long lockTTL = getTxnLockTTL(this.startTs); ClientRPCResult prewriteResult = this.kvClient.prewrite( backOffer, mutationList, key, lockTTL, this.startTs, tiRegion, store); if (!prewriteResult.isSuccess() && !prewriteResult.isRetry()) { throw new TiBatchWriteException("prewrite primary key error", prewriteResult.getException()); } if (prewriteResult.isRetry()) { try { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format("Txn prewrite primary key failed, regionId=%s", tiRegion.getId()), prewriteResult.getException())); // re-split keys and commit again. this.doPrewritePrimaryKeyWithRetry(backOffer, key, value); } catch (GrpcException e) { String errorMsg = String.format( "Txn prewrite primary key error, re-split commit failed, regionId=%s, detail=%s", tiRegion.getId(), e.getMessage()); throw new TiBatchWriteException(errorMsg, e); } } LOG.debug("prewrite primary key {} successfully", KeyUtils.formatBytes(key)); } /** * 2pc - commit primary key * * @param backOffer * @param key * @return */ public void commitPrimaryKey(BackOffer backOffer, byte[] key, long commitTs) throws TiBatchWriteException { doCommitPrimaryKeyWithRetry(backOffer, ByteString.copyFrom(key), commitTs); } private void doCommitPrimaryKeyWithRetry(BackOffer backOffer, ByteString key, long commitTs) throws TiBatchWriteException { Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; ByteString[] keys = new ByteString[] {key}; // send rpc request to tikv server ClientRPCResult commitResult = this.kvClient.commit(backOffer, keys, this.startTs, commitTs, tiRegion, store); if (!commitResult.isSuccess()) { if (!commitResult.isRetry()) { throw new TiBatchWriteException("commit primary key error", commitResult.getException()); } else { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format("Txn commit primary key failed, regionId=%s", tiRegion.getId()), commitResult.getException())); // re-split keys and commit again. this.doCommitPrimaryKeyWithRetry(backOffer, key, commitTs); } } LOG.debug("commit primary key {} successfully", KeyUtils.formatBytes(key)); } /** * 2pc - prewrite secondary keys * * @param primaryKey * @param pairs * @return */ public void prewriteSecondaryKeys(byte[] primaryKey, Iterator<BytePairWrapper> pairs) throws TiBatchWriteException { Iterator<Pair<ByteString, ByteString>> byteStringKeys = new Iterator<Pair<ByteString, ByteString>>() { @Override public boolean hasNext() { return pairs.hasNext(); } @Override public Pair<ByteString, ByteString> next() { BytePairWrapper pair = pairs.next(); return new Pair<>( ByteString.copyFrom(pair.getKey()), ByteString.copyFrom(pair.getValue())); } }; doPrewriteSecondaryKeys(ByteString.copyFrom(primaryKey), byteStringKeys); } private void doPrewriteSecondaryKeys( ByteString primaryKey, Iterator<Pair<ByteString, ByteString>> pairs) throws TiBatchWriteException { int totalSize = 0; while (pairs.hasNext()) { ByteString[] keyBytes = new ByteString[WRITE_BUFFER_SIZE]; ByteString[] valueBytes = new ByteString[WRITE_BUFFER_SIZE]; int size = 0; while (size < WRITE_BUFFER_SIZE && pairs.hasNext()) { Pair<ByteString, ByteString> pair = pairs.next(); keyBytes[size] = pair.first; valueBytes[size] = pair.second; size++; } BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(BackOffer.BATCH_PREWRITE_BACKOFF); doPrewriteSecondaryKeysInBatchesWithRetry(backOffer, primaryKey, keyBytes, valueBytes, size); totalSize = totalSize + size; } } private void doPrewriteSecondaryKeysInBatchesWithRetry( BackOffer backOffer, ByteString primaryKey, ByteString[] keys, ByteString[] values, int size) throws TiBatchWriteException { LOG.debug( "start prewrite secondary key in batches, primary key={}, size={}", KeyUtils.formatBytes(primaryKey), size); if (keys == null || keys.length == 0 || values == null || values.length == 0 || size <= 0) { // return success return; } Map<ByteString, Kvrpcpb.Mutation> mutations = new LinkedHashMap<>(); for (int i = 0; i < size; i++) { ByteString key = keys[i]; ByteString value = values[i]; Kvrpcpb.Mutation mutation; if (!value.isEmpty()) { mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setValue(value).setOp(Kvrpcpb.Op.Put).build(); } else { // value can be null (table with one primary key integer column, data is encoded in key) mutation = Kvrpcpb.Mutation.newBuilder().setKey(key).setOp(Kvrpcpb.Op.Put).build(); } mutations.put(key, mutation); } // groups keys by region GroupKeyResult groupResult = this.groupKeysByRegion(keys, size); List<BatchKeys> batchKeyList = new LinkedList<>(); Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult(); for (Pair<TiRegion, Metapb.Store> pair : groupKeyMap.keySet()) { TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; this.appendBatchBySize(batchKeyList, tiRegion, store, groupKeyMap.get(pair), true, mutations); } // For prewrite, stop sending other requests after receiving first error. for (BatchKeys batchKeys : batchKeyList) { doPrewriteSecondaryKeySingleBatchWithRetry(backOffer, primaryKey, batchKeys, mutations); } LOG.debug( "prewrite secondary key in batches successfully, primary key={}, size={}", KeyUtils.formatBytes(primaryKey), size); } private void doPrewriteSecondaryKeySingleBatchWithRetry( BackOffer backOffer, ByteString primaryKey, BatchKeys batchKeys, Map<ByteString, Kvrpcpb.Mutation> mutations) throws TiBatchWriteException { LOG.debug("start prewrite secondary key, size={}", batchKeys.getKeys().size()); List<ByteString> keyList = batchKeys.getKeys(); int batchSize = keyList.size(); List<Kvrpcpb.Mutation> mutationList = new ArrayList<>(batchSize); for (ByteString key : keyList) { mutationList.add(mutations.get(key)); } // send rpc request to tikv server int txnSize = batchKeys.getKeys().size(); long lockTTL = getTxnLockTTL(this.startTs, txnSize); ClientRPCResult prewriteResult = this.kvClient.prewrite( backOffer, mutationList, primaryKey, lockTTL, this.startTs, batchKeys.getRegion(), batchKeys.getStore()); if (!prewriteResult.isSuccess() && !prewriteResult.isRetry()) { throw new TiBatchWriteException( "prewrite secondary key error", prewriteResult.getException()); } if (prewriteResult.isRetry()) { try { backOffer.doBackOff( BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException( String.format( "Txn prewrite secondary key SingleBatch failed, regionId=%s", batchKeys.getRegion().getId()), prewriteResult.getException())); // re-split keys and commit again. int size = batchKeys.getKeys().size(); ByteString[] keyBytes = new ByteString[size]; ByteString[] valueBytes = new ByteString[size]; int i = 0; for (ByteString k : batchKeys.getKeys()) { keyBytes[i] = k; valueBytes[i] = mutations.get(k).getValue(); i++; } doPrewriteSecondaryKeysInBatchesWithRetry( backOffer, primaryKey, keyBytes, valueBytes, size); } catch (GrpcException e) { String errorMsg = String.format( "Txn prewrite secondary key SingleBatch error, re-split commit failed, regionId=%s, detail=%s", batchKeys.getRegion().getId(), e.getMessage()); throw new TiBatchWriteException(errorMsg, e); } } LOG.debug("prewrite secondary key successfully, size={}", batchKeys.getKeys().size()); } private void appendBatchBySize( List<BatchKeys> batchKeyList, TiRegion tiRegion, Metapb.Store store, List<ByteString> keys, boolean sizeIncludeValue, Map<ByteString, Kvrpcpb.Mutation> mutations) { int start; int end; int len = keys.size(); for (start = 0; start < len; start = end) { int size = 0; for (end = start; end < len && size < TXN_COMMIT_BATCH_SIZE; end++) { if (sizeIncludeValue) { size += this.keyValueSize(keys.get(end), mutations); } else { size += this.keySize(keys.get(end)); } } BatchKeys batchKeys = new BatchKeys(tiRegion, store, keys.subList(start, end)); batchKeyList.add(batchKeys); } } private long keyValueSize(ByteString key, Map<ByteString, Kvrpcpb.Mutation> mutations) { long size = key.size(); Kvrpcpb.Mutation mutation = mutations.get(key); if (mutation != null) { size += mutation.getValue().toByteArray().length; } return size; } private long keySize(ByteString key) { return key.size(); } /** * 2pc - commit secondary keys * * @param keys * @param commitTs * @return */ public void commitSecondaryKeys(Iterator<ByteWrapper> keys, long commitTs) throws TiBatchWriteException { Iterator<ByteString> byteStringKeys = new Iterator<ByteString>() { @Override public boolean hasNext() { return keys.hasNext(); } @Override public ByteString next() { return ByteString.copyFrom(keys.next().bytes); } }; doCommitSecondaryKeys(byteStringKeys, commitTs); } private void doCommitSecondaryKeys(Iterator<ByteString> keys, long commitTs) throws TiBatchWriteException { LOG.debug("start commit secondary key"); int totalSize = 0; while (keys.hasNext()) { ByteString[] keyBytes = new ByteString[WRITE_BUFFER_SIZE]; int size = 0; for (int i = 0; i < WRITE_BUFFER_SIZE; i++) { if (keys.hasNext()) { keyBytes[size] = keys.next(); size++; } else { break; } } totalSize = totalSize + size; BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(BackOffer.BATCH_COMMIT_BACKOFF); doCommitSecondaryKeys(backOffer, keyBytes, size, commitTs); } LOG.debug("commit secondary key successfully, total size={}", totalSize); } private void doCommitSecondaryKeys( BackOffer backOffer, ByteString[] keys, int size, long commitTs) throws TiBatchWriteException { if (keys == null || keys.length == 0 || size <= 0) { return; } // groups keys by region GroupKeyResult groupResult = this.groupKeysByRegion(keys, size); List<BatchKeys> batchKeyList = new LinkedList<>(); Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult(); for (Pair<TiRegion, Metapb.Store> pair : groupKeyMap.keySet()) { TiRegion tiRegion = pair.first; Metapb.Store store = pair.second; this.appendBatchBySize(batchKeyList, tiRegion, store, groupKeyMap.get(pair), false, null); } // For prewrite, stop sending other requests after receiving first error. for (BatchKeys batchKeys : batchKeyList) { doCommitSecondaryKeySingleBatch(backOffer, batchKeys, commitTs); } } private void doCommitSecondaryKeySingleBatch( BackOffer backOffer, BatchKeys batchKeys, long commitTs) throws TiBatchWriteException { List<ByteString> keysCommit = batchKeys.getKeys(); ByteString[] keys = new ByteString[keysCommit.size()]; keysCommit.toArray(keys); // send rpc request to tikv server ClientRPCResult commitResult = this.kvClient.commit( backOffer, keys, this.startTs, commitTs, batchKeys.getRegion(), batchKeys.getStore()); if (!commitResult.isSuccess()) { String error = String.format("Txn commit secondary key error, regionId=%s", batchKeys.getRegion()); LOG.warn(error); throw new TiBatchWriteException("commit secondary key error", commitResult.getException()); } LOG.debug("commit {} rows successfully", batchKeys.getKeys().size()); } private GroupKeyResult groupKeysByRegion(ByteString[] keys, int size) throws TiBatchWriteException { Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groups = new HashMap<>(); int index = 0; try { for (; index < size; index++) { ByteString key = keys[index]; Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key); if (pair != null) { groups.computeIfAbsent(pair, e -> new LinkedList<>()).add(key); } } } catch (Exception e) { throw new TiBatchWriteException("Txn groupKeysByRegion error", e); } GroupKeyResult result = new GroupKeyResult(); result.setGroupsResult(groups); return result; } private long getTxnLockTTL(long startTime) { // TODO: calculate txn lock ttl return DEFAULT_BATCH_WRITE_LOCK_TTL; } private long getTxnLockTTL(long startTime, int txnSize) { // TODO: calculate txn lock ttl return DEFAULT_BATCH_WRITE_LOCK_TTL; } }
1
10,164
MAX_RETRY_TIMES makes much more sense.
pingcap-tispark
java
@@ -56,6 +56,7 @@ type testContext struct { fakeStatsKeeper *fakeSessionStatsKeeper fakeDialog *fakeDialog fakePromiseIssuer *fakePromiseIssuer + fakeStorage *fakeStorage sync.RWMutex }
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package connection import ( "errors" "sync" "testing" "time" "github.com/mysteriumnetwork/node/client/stats" "github.com/mysteriumnetwork/node/communication" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/server" "github.com/mysteriumnetwork/node/service_discovery/dto" "github.com/mysteriumnetwork/node/session" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" ) type fakeState string const ( ProcessStarted fakeState = "ProcessStarted" ConnectingState fakeState = "ConnectingState" ReconnectingState fakeState = "ReconnectingState" WaitState fakeState = "WaitState" AuthenticatingState fakeState = "AuthenticatingState" GetConfigState fakeState = "GetConfigState" AssignIPState fakeState = "AssignIPState" ConnectedState fakeState = "ConnectedState" ExitingState fakeState = "ExitingState" ProcessExited fakeState = "ProcessExited" ) type testContext struct { suite.Suite fakeConnectionFactory *connectionFactoryFake connManager *connectionManager fakeDiscoveryClient *server.ClientFake fakeStatsKeeper *fakeSessionStatsKeeper fakeDialog *fakeDialog fakePromiseIssuer *fakePromiseIssuer sync.RWMutex } type connectionFactoryFake struct { vpnClientCreationError error fakeVpnClient *vpnClientFake } func (cff *connectionFactoryFake) CreateConnection(connectionParams ConnectOptions, stateChannel StateChannel) (Connection, error) { //each test can set this value to simulate openvpn creation error, this flag is reset BEFORE each test if cff.vpnClientCreationError != nil { return nil, cff.vpnClientCreationError } stateCallback := func(state fakeState) { if state == ConnectedState { stateChannel <- Connected } if state == ExitingState { stateChannel <- Disconnecting } if state == ReconnectingState { stateChannel <- Reconnecting } //this is the last state - close channel (according to best practices of go - channel writer controls channel) if state == ProcessExited { close(stateChannel) } } cff.fakeVpnClient.StateCallback(stateCallback) return cff.fakeVpnClient, nil } var ( myID = identity.FromAddress("identity-1") activeProviderID = identity.FromAddress("vpn-node-1") activeProviderContact = dto.Contact{} activeProposal = dto.ServiceProposal{ ProviderID: activeProviderID.Address, ProviderContacts: []dto.Contact{activeProviderContact}, } ) func (tc *testContext) SetupTest() { tc.Lock() defer tc.Unlock() tc.fakeDiscoveryClient = server.NewClientFake() tc.fakeDiscoveryClient.RegisterProposal(activeProposal, nil) tc.fakeDialog = &fakeDialog{} dialogCreator := func(consumer, provider identity.Identity, contact dto.Contact) (communication.Dialog, error) { tc.RLock() defer tc.RUnlock() return tc.fakeDialog, nil } tc.fakePromiseIssuer = &fakePromiseIssuer{} promiseIssuerFactory := func(_ identity.Identity, _ communication.Dialog) PromiseIssuer { return tc.fakePromiseIssuer } tc.fakeConnectionFactory = &connectionFactoryFake{ vpnClientCreationError: nil, fakeVpnClient: &vpnClientFake{ nil, []fakeState{ ProcessStarted, ConnectingState, WaitState, AuthenticatingState, GetConfigState, AssignIPState, ConnectedState, }, []fakeState{ ExitingState, ProcessExited, }, nil, sync.WaitGroup{}, sync.RWMutex{}, }, } tc.fakeStatsKeeper = &fakeSessionStatsKeeper{} tc.connManager = NewManager(tc.fakeDiscoveryClient, dialogCreator, promiseIssuerFactory, tc.fakeConnectionFactory, tc.fakeStatsKeeper) } func (tc *testContext) TestWhenNoConnectionIsMadeStatusIsNotConnected() { assert.Exactly(tc.T(), statusNotConnected(), tc.connManager.Status()) } func (tc *testContext) TestWithUnknownProviderConnectionIsNotMade() { noProposalsError := errors.New("provider has no service proposals") assert.Equal(tc.T(), noProposalsError, tc.connManager.Connect(myID, identity.FromAddress("unknown-node"), ConnectParams{})) assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) } func (tc *testContext) TestOnConnectErrorStatusIsNotConnected() { tc.fakeConnectionFactory.vpnClientCreationError = errors.New("fatal connection error") assert.Error(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.True(tc.T(), tc.fakeDialog.closed) } func (tc *testContext) TestWhenManagerMadeConnectionStatusReturnsConnectedStateAndSessionId() { err := tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) assert.NoError(tc.T(), err) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.True(tc.T(), tc.fakeStatsKeeper.sessionStartMarked) } func (tc *testContext) TestStatusReportsConnectingWhenConnectionIsInProgress() { tc.fakeConnectionFactory.fakeVpnClient.onStartReportStates = []fakeState{} go func() { tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) }() waitABit() assert.Equal(tc.T(), statusConnecting(), tc.connManager.Status()) tc.connManager.Disconnect() } func (tc *testContext) TestStatusReportsDisconnectingThenNotConnected() { tc.fakeConnectionFactory.fakeVpnClient.onStopReportStates = []fakeState{} err := tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) assert.NoError(tc.T(), err) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) assert.Equal(tc.T(), statusDisconnecting(), tc.connManager.Status()) tc.fakeConnectionFactory.fakeVpnClient.reportState(ExitingState) tc.fakeConnectionFactory.fakeVpnClient.reportState(ProcessExited) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.True(tc.T(), tc.fakeStatsKeeper.sessionEndMarked) } func (tc *testContext) TestConnectResultsInAlreadyConnectedErrorWhenConnectionExists() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) assert.Equal(tc.T(), ErrAlreadyExists, tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) } func (tc *testContext) TestDisconnectReturnsErrorWhenNoConnectionExists() { assert.Equal(tc.T(), ErrNoConnection, tc.connManager.Disconnect()) } func (tc *testContext) TestReconnectingStatusIsReportedWhenOpenVpnGoesIntoReconnectingState() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) tc.fakeConnectionFactory.fakeVpnClient.reportState(ReconnectingState) waitABit() assert.Equal(tc.T(), statusReconnecting(), tc.connManager.Status()) } func (tc *testContext) TestDoubleDisconnectResultsInError() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.Equal(tc.T(), ErrNoConnection, tc.connManager.Disconnect()) } func (tc *testContext) TestTwoConnectDisconnectCyclesReturnNoError() { assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) waitABit() assert.Equal(tc.T(), statusNotConnected(), tc.connManager.Status()) } func (tc *testContext) TestConnectFailsIfOpenvpnFactoryReturnsError() { tc.fakeConnectionFactory.vpnClientCreationError = errors.New("failed to create vpn instance") assert.Error(tc.T(), tc.connManager.Connect(myID, activeProviderID, ConnectParams{})) } func (tc *testContext) TestStatusIsConnectedWhenConnectCommandReturnsWithoutError() { tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) assert.Equal(tc.T(), statusConnected("vpn-connection-id"), tc.connManager.Status()) } func (tc *testContext) TestConnectingInProgressCanBeCanceled() { tc.fakeConnectionFactory.fakeVpnClient.onStartReportStates = []fakeState{} connectWaiter := &sync.WaitGroup{} connectWaiter.Add(1) var err error go func() { defer connectWaiter.Done() err = tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) }() waitABit() assert.Equal(tc.T(), statusConnecting(), tc.connManager.Status()) assert.NoError(tc.T(), tc.connManager.Disconnect()) connectWaiter.Wait() assert.Equal(tc.T(), ErrConnectionCancelled, err) } func (tc *testContext) TestConnectMethodReturnsErrorIfOpenvpnClientExitsDuringConnect() { tc.fakeConnectionFactory.fakeVpnClient.onStartReportStates = []fakeState{} tc.fakeConnectionFactory.fakeVpnClient.onStopReportStates = []fakeState{} connectWaiter := sync.WaitGroup{} connectWaiter.Add(1) var err error go func() { defer connectWaiter.Done() err = tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) }() waitABit() tc.fakeConnectionFactory.fakeVpnClient.reportState(ProcessExited) connectWaiter.Wait() assert.Equal(tc.T(), ErrConnectionFailed, err) } func (tc *testContext) Test_PromiseIssuer_WhenManagerMadeConnectionIsStarted() { err := tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) assert.NoError(tc.T(), err) assert.True(tc.T(), tc.fakePromiseIssuer.startCalled) } func (tc *testContext) Test_PromiseIssuer_OnConnectErrorIsStopped() { tc.fakeConnectionFactory.vpnClientCreationError = errors.New("fatal connection error") err := tc.connManager.Connect(myID, activeProviderID, ConnectParams{}) assert.Error(tc.T(), err) assert.True(tc.T(), tc.fakePromiseIssuer.stopCalled) } func TestConnectionManagerSuite(t *testing.T) { suite.Run(t, new(testContext)) } type vpnClientFake struct { onStartReturnError error onStartReportStates []fakeState onStopReportStates []fakeState stateCallback func(state fakeState) fakeProcess sync.WaitGroup sync.RWMutex } func (foc *vpnClientFake) Start() error { foc.RLock() defer foc.RUnlock() if foc.onStartReturnError != nil { return foc.onStartReturnError } foc.fakeProcess.Add(1) for _, fakeState := range foc.onStartReportStates { foc.reportState(fakeState) } return nil } func (foc *vpnClientFake) Wait() error { foc.fakeProcess.Wait() return nil } func (foc *vpnClientFake) Stop() { for _, fakeState := range foc.onStopReportStates { foc.reportState(fakeState) } foc.fakeProcess.Done() } func (foc *vpnClientFake) reportState(state fakeState) { foc.RLock() defer foc.RUnlock() foc.stateCallback(state) } func (foc *vpnClientFake) StateCallback(callback func(state fakeState)) { foc.Lock() defer foc.Unlock() foc.stateCallback = callback } type fakeDialog struct { peerID identity.Identity closed bool sync.RWMutex } func (fd *fakeDialog) PeerID() identity.Identity { fd.RLock() defer fd.RUnlock() return fd.peerID } func (fd *fakeDialog) Close() error { fd.Lock() defer fd.Unlock() fd.closed = true return nil } func (fd *fakeDialog) Receive(consumer communication.MessageConsumer) error { return nil } func (fd *fakeDialog) Respond(consumer communication.RequestConsumer) error { return nil } func (fd *fakeDialog) Send(producer communication.MessageProducer) error { return nil } func (fd *fakeDialog) Request(producer communication.RequestProducer) (responsePtr interface{}, err error) { return &session.CreateResponse{ Success: true, Session: session.SessionDto{ ID: "vpn-connection-id", Config: []byte("{}"), }, }, nil } type fakePromiseIssuer struct { startCalled bool stopCalled bool } func (issuer *fakePromiseIssuer) Start(proposal dto.ServiceProposal) error { issuer.startCalled = true return nil } func (issuer *fakePromiseIssuer) Stop() error { issuer.stopCalled = true return nil } func waitABit() { //usually time.Sleep call gives a chance for other goroutines to kick in //important when testing async code time.Sleep(10 * time.Millisecond) } type fakeSessionStatsKeeper struct { sessionStartMarked, sessionEndMarked bool } func (fsk *fakeSessionStatsKeeper) Save(stats stats.SessionStats) { } func (fsk *fakeSessionStatsKeeper) Retrieve() stats.SessionStats { return stats.SessionStats{} } func (fsk *fakeSessionStatsKeeper) MarkSessionStart() { fsk.sessionStartMarked = true } func (fsk *fakeSessionStatsKeeper) GetSessionDuration() time.Duration { return time.Duration(0) } func (fsk *fakeSessionStatsKeeper) MarkSessionEnd() { fsk.sessionEndMarked = true }
1
12,433
`saveSession` allows easier mocking - just save function which You need, instead full interface
mysteriumnetwork-node
go
@@ -211,7 +211,8 @@ class Bucket(object): raise self.connection.provider.storage_response_error( response.status, response.reason, '') - def list(self, prefix='', delimiter='', marker='', headers=None): + def list(self, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result
1
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto from boto import handler from boto.resultset import ResultSet from boto.exception import BotoClientError from boto.s3.acl import Policy, CannedACLStrings, Grant from boto.s3.key import Key from boto.s3.prefix import Prefix from boto.s3.deletemarker import DeleteMarker from boto.s3.multipart import MultiPartUpload from boto.s3.multipart import CompleteMultiPartUpload from boto.s3.multidelete import MultiDeleteResult from boto.s3.multidelete import Error from boto.s3.bucketlistresultset import BucketListResultSet from boto.s3.bucketlistresultset import VersionedBucketListResultSet from boto.s3.bucketlistresultset import MultiPartUploadListResultSet from boto.s3.lifecycle import Lifecycle from boto.s3.tagging import Tags from boto.s3.cors import CORSConfiguration from boto.s3.bucketlogging import BucketLogging from boto.s3 import website import boto.jsonresponse import boto.utils import xml.sax import xml.sax.saxutils import StringIO import urllib import re import base64 from collections import defaultdict # as per http://goo.gl/BDuud (02/19/2011) class S3WebsiteEndpointTranslate(object): trans_region = defaultdict(lambda: 's3-website-us-east-1') trans_region['eu-west-1'] = 's3-website-eu-west-1' trans_region['us-west-1'] = 's3-website-us-west-1' trans_region['us-west-2'] = 's3-website-us-west-2' trans_region['sa-east-1'] = 's3-website-sa-east-1' trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2' @classmethod def translate_region(self, reg): return self.trans_region[reg] S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] class Bucket(object): LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?> <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Payer>%s</Payer> </RequestPaymentConfiguration>""" VersioningBody = """<?xml version="1.0" encoding="UTF-8"?> <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Status>%s</Status> <MfaDelete>%s</MfaDelete> </VersioningConfiguration>""" VersionRE = '<Status>([A-Za-z]+)</Status>' MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>' def __init__(self, connection=None, name=None, key_class=Key): self.name = name self.connection = connection self.key_class = key_class def __repr__(self): return '<Bucket: %s>' % self.name def __iter__(self): return iter(BucketListResultSet(self)) def __contains__(self, key_name): return not (self.get_key(key_name) is None) def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Name': self.name = value elif name == 'CreationDate': self.creation_date = value else: setattr(self, name, value) def set_key_class(self, key_class): """ Set the Key class associated with this bucket. By default, this would be the boto.s3.key.Key class but if you want to subclass that for some reason this allows you to associate your new class with a bucket so that when you call bucket.new_key() or when you get a listing of keys in the bucket you will get an instances of your key class rather than the default. :type key_class: class :param key_class: A subclass of Key that can be more specific """ self.key_class = key_class def lookup(self, key_name, headers=None): """ Deprecated: Please use get_key method. :type key_name: string :param key_name: The name of the key to retrieve :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ return self.get_key(key_name, headers=headers) def get_key(self, key_name, headers=None, version_id=None, response_headers=None): """ Check to see if a particular key exists within the bucket. This method uses a HEAD request to check for the existance of the key. Returns: An instance of a Key object or None :type key_name: string :param key_name: The name of the key to retrieve :type response_headers: dict :param response_headers: A dictionary containing HTTP headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details. :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ query_args_l = [] if version_id: query_args_l.append('versionId=%s' % version_id) if response_headers: for rk, rv in response_headers.iteritems(): query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) key, resp = self._get_key_internal(key_name, headers, query_args_l) return key def _get_key_internal(self, key_name, headers, query_args_l): query_args = '&'.join(query_args_l) or None response = self.connection.make_request('HEAD', self.name, key_name, headers=headers, query_args=query_args) response.read() # Allow any success status (2xx) - for example this lets us # support Range gets, which return status 206: if response.status / 100 == 2: k = self.key_class(self) provider = self.connection.provider k.metadata = boto.utils.get_aws_metadata(response.msg, provider) k.etag = response.getheader('etag') k.content_type = response.getheader('content-type') k.content_encoding = response.getheader('content-encoding') k.content_disposition = response.getheader('content-disposition') k.content_language = response.getheader('content-language') k.last_modified = response.getheader('last-modified') # the following machinations are a workaround to the fact that # apache/fastcgi omits the content-length header on HEAD # requests when the content-length is zero. # See http://goo.gl/0Tdax for more details. clen = response.getheader('content-length') if clen: k.size = int(response.getheader('content-length')) else: k.size = 0 k.cache_control = response.getheader('cache-control') k.name = key_name k.handle_version_headers(response) k.handle_encryption_headers(response) k.handle_restore_headers(response) k.handle_addl_headers(response.getheaders()) return k, response else: if response.status == 404: return None, response else: raise self.connection.provider.storage_response_error( response.status, response.reason, '') def list(self, prefix='', delimiter='', marker='', headers=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. The Key objects returned by the iterator are obtained by parsing the results of a GET on the bucket, also known as the List Objects request. The XML returned by this request contains only a subset of the information about each key. Certain metadata fields such as Content-Type and user metadata are not available in the XML. Therefore, if you want these additional metadata fields you will have to do a HEAD request on the Key in the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See http://goo.gl/Xx63h for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return BucketListResultSet(self, prefix, delimiter, marker, headers) def list_versions(self, prefix='', delimiter='', key_marker='', version_id_marker='', headers=None): """ List version objects within a bucket. This returns an instance of an VersionedBucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See: http://aws.amazon.com/releasenotes/Amazon-S3/213 for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return VersionedBucketListResultSet(self, prefix, delimiter, key_marker, version_id_marker, headers) def list_multipart_uploads(self, key_marker='', upload_id_marker='', headers=None): """ List multipart upload objects within a bucket. This returns an instance of an MultiPartUploadListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return MultiPartUploadListResultSet(self, key_marker, upload_id_marker, headers) def _get_all_query_args(self, params, initial_query_string=''): pairs = [] if initial_query_string: pairs.append(initial_query_string) for key, value in params.items(): key = key.replace('_', '-') if key == 'maxkeys': key = 'max-keys' if isinstance(value, unicode): value = value.encode('utf-8') if value is not None and value != '': pairs.append('%s=%s' % ( urllib.quote(key), urllib.quote(str(value) ))) return '&'.join(pairs) def _get_all(self, element_map, initial_query_string='', headers=None, **params): query_args = self._get_all_query_args( params, initial_query_string=initial_query_string ) response = self.connection.make_request('GET', self.name, headers=headers, query_args=query_args) body = response.read() boto.log.debug(body) if response.status == 200: rs = ResultSet(element_map) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def validate_kwarg_names(self, kwargs, names): """ Checks that all named arguments are in the specified list of names. :type kwargs: dict :param kwargs: Dictionary of kwargs to validate. :type names: list :param names: List of possible named arguments. """ for kwarg in kwargs: if kwarg not in names: raise TypeError('Invalid argument "%s"!' % kwarg) def get_all_keys(self, headers=None, **params): """ A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', 'marker', 'delimiter']) return self._get_all([('Contents', self.key_class), ('CommonPrefixes', Prefix)], '', headers, **params) def get_all_versions(self, headers=None, **params): """ A lower-level, version-aware method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type key_marker: string :param key_marker: The "marker" of where you are in the result set with respect to keys. :type version_id_marker: string :param version_id_marker: The "marker" of where you are in the result set with respect to version-id's. :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ self.validate_get_all_versions_params(params) return self._get_all([('Version', self.key_class), ('CommonPrefixes', Prefix), ('DeleteMarker', DeleteMarker)], 'versions', headers, **params) def validate_get_all_versions_params(self, params): """ Validate that the parameters passed to get_all_versions are valid. Overridden by subclasses that allow a different set of parameters. :type params: dict :param params: Parameters to validate. """ self.validate_kwarg_names( params, ['maxkeys', 'max_keys', 'prefix', 'key_marker', 'version_id_marker', 'delimiter']) def get_all_multipart_uploads(self, headers=None, **params): """ A lower-level, version-aware method for listing active MultiPart uploads for a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_uploads: int :param max_uploads: The maximum number of uploads to retrieve. Default value is 1000. :type key_marker: string :param key_marker: Together with upload_id_marker, this parameter specifies the multipart upload after which listing should begin. If upload_id_marker is not specified, only the keys lexicographically greater than the specified key_marker will be included in the list. If upload_id_marker is specified, any multipart uploads for a key equal to the key_marker might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified upload_id_marker. :type upload_id_marker: string :param upload_id_marker: Together with key-marker, specifies the multipart upload after which listing should begin. If key_marker is not specified, the upload_id_marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key_marker might be included in the list only if they have an upload ID lexicographically greater than the specified upload_id_marker. :rtype: ResultSet :return: The result from S3 listing the uploads requested """ self.validate_kwarg_names(params, ['max_uploads', 'key_marker', 'upload_id_marker']) return self._get_all([('Upload', MultiPartUpload), ('CommonPrefixes', Prefix)], 'uploads', headers, **params) def new_key(self, key_name=None): """ Creates a new key :type key_name: string :param key_name: The name of the key to create :rtype: :class:`boto.s3.key.Key` or subclass :returns: An instance of the newly created key object """ if not key_name: raise ValueError('Empty key names are not allowed') return self.key_class(self, key_name) def generate_url(self, expires_in, method='GET', headers=None, force_http=False, response_headers=None, expires_in_absolute=False): return self.connection.generate_url(expires_in, method, self.name, headers=headers, force_http=force_http, response_headers=response_headers, expires_in_absolute=expires_in_absolute) def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): """ Deletes a set of keys using S3's Multi-object delete API. If a VersionID is specified for that key then that version is removed. Returns a MultiDeleteResult Object, which contains Deleted and Error elements for each key you ask to delete. :type keys: list :param keys: A list of either key_names or (key_name, versionid) pairs or a list of Key instances. :type quiet: boolean :param quiet: In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. :returns: An instance of MultiDeleteResult """ ikeys = iter(keys) result = MultiDeleteResult(self) provider = self.connection.provider query_args = 'delete' def delete_keys2(hdrs): hdrs = hdrs or {} data = u"""<?xml version="1.0" encoding="UTF-8"?>""" data += u"<Delete>" if quiet: data += u"<Quiet>true</Quiet>" count = 0 while count < 1000: try: key = ikeys.next() except StopIteration: break if isinstance(key, basestring): key_name = key version_id = None elif isinstance(key, tuple) and len(key) == 2: key_name, version_id = key elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name: key_name = key.name version_id = key.version_id else: if isinstance(key, Prefix): key_name = key.name code = 'PrefixSkipped' # Don't delete Prefix else: key_name = repr(key) # try get a string code = 'InvalidArgument' # other unknown type message = 'Invalid. No delete action taken for this object.' error = Error(key_name, code=code, message=message) result.errors.append(error) continue count += 1 data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name) if version_id: data += u"<VersionId>%s</VersionId>" % version_id data += u"</Object>" data += u"</Delete>" if count <= 0: return False # no more data = data.encode('utf-8') fp = StringIO.StringIO(data) md5 = boto.utils.compute_md5(fp) hdrs['Content-MD5'] = md5[1] hdrs['Content-Type'] = 'text/xml' if mfa_token: hdrs[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('POST', self.name, headers=hdrs, query_args=query_args, data=data) body = response.read() if response.status == 200: h = handler.XmlHandler(result, self) xml.sax.parseString(body, h) return count >= 1000 # more? else: raise provider.storage_response_error(response.status, response.reason, body) while delete_keys2(headers): pass return result def delete_key(self, key_name, headers=None, version_id=None, mfa_token=None): """ Deletes a key from the bucket. If a version_id is provided, only that version of the key will be deleted. :type key_name: string :param key_name: The key name to delete :type version_id: string :param version_id: The version ID (optional) :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. :rtype: :class:`boto.s3.key.Key` or subclass :returns: A key object holding information on what was deleted. The Caller can see if a delete_marker was created or removed and what version_id the delete created or removed. """ if not key_name: raise ValueError('Empty key names are not allowed') return self._delete_key_internal(key_name, headers=headers, version_id=version_id, mfa_token=mfa_token, query_args_l=None) def _delete_key_internal(self, key_name, headers=None, version_id=None, mfa_token=None, query_args_l=None): query_args_l = query_args_l or [] provider = self.connection.provider if version_id: query_args_l.append('versionId=%s' % version_id) query_args = '&'.join(query_args_l) or None if mfa_token: if not headers: headers = {} headers[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('DELETE', self.name, key_name, headers=headers, query_args=query_args) body = response.read() if response.status != 204: raise provider.storage_response_error(response.status, response.reason, body) else: # return a key object with information on what was deleted. k = self.key_class(self) k.name = key_name k.handle_version_headers(response) k.handle_addl_headers(response.getheaders()) return k def copy_key(self, new_key_name, src_bucket_name, src_key_name, metadata=None, src_version_id=None, storage_class='STANDARD', preserve_acl=False, encrypt_key=False, headers=None, query_args=None): """ Create a new key in the bucket by copying another existing key. :type new_key_name: string :param new_key_name: The name of the new key :type src_bucket_name: string :param src_bucket_name: The name of the source bucket :type src_key_name: string :param src_key_name: The name of the source key :type src_version_id: string :param src_version_id: The version id for the key. This param is optional. If not specified, the newest version of the key will be copied. :type metadata: dict :param metadata: Metadata to be associated with new key. If metadata is supplied, it will replace the metadata of the source key being copied. If no metadata is supplied, the source key's metadata will be copied to the new key. :type storage_class: string :param storage_class: The storage class of the new key. By default, the new key will use the standard storage class. Possible values are: STANDARD | REDUCED_REDUNDANCY :type preserve_acl: bool :param preserve_acl: If True, the ACL from the source key will be copied to the destination key. If False, the destination key will have the default ACL. Note that preserving the ACL in the new key object will require two additional API calls to S3, one to retrieve the current ACL and one to set that ACL on the new object. If you don't care about the ACL, a value of False will be significantly more efficient. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type headers: dict :param headers: A dictionary of header name/value pairs. :type query_args: string :param query_args: A string of additional querystring arguments to append to the request :rtype: :class:`boto.s3.key.Key` or subclass :returns: An instance of the newly created key object """ headers = headers or {} provider = self.connection.provider src_key_name = boto.utils.get_utf8_value(src_key_name) if preserve_acl: if self.name == src_bucket_name: src_bucket = self else: src_bucket = self.connection.get_bucket( src_bucket_name, validate=False) acl = src_bucket.get_xml_acl(src_key_name) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) if src_version_id: src += '?versionId=%s' % src_version_id headers[provider.copy_source_header] = str(src) # make sure storage_class_header key exists before accessing it if provider.storage_class_header and storage_class: headers[provider.storage_class_header] = storage_class if metadata is not None: headers[provider.metadata_directive_header] = 'REPLACE' headers = boto.utils.merge_meta(headers, metadata, provider) elif not query_args: # Can't use this header with multi-part copy. headers[provider.metadata_directive_header] = 'COPY' response = self.connection.make_request('PUT', self.name, new_key_name, headers=headers, query_args=query_args) body = response.read() if response.status == 200: key = self.new_key(new_key_name) h = handler.XmlHandler(key, self) xml.sax.parseString(body, h) if hasattr(key, 'Error'): raise provider.storage_copy_error(key.Code, key.Message, body) key.handle_version_headers(response) key.handle_addl_headers(response.getheaders()) if preserve_acl: self.set_xml_acl(acl, new_key_name) return key else: raise provider.storage_response_error(response.status, response.reason, body) def set_canned_acl(self, acl_str, key_name='', headers=None, version_id=None): assert acl_str in CannedACLStrings if headers: headers[self.connection.provider.acl_header] = acl_str else: headers = {self.connection.provider.acl_header: acl_str} query_args = 'acl' if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, headers=headers, query_args=query_args) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_xml_acl(self, key_name='', headers=None, version_id=None): query_args = 'acl' if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, query_args='acl'): if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, data=acl_str.encode('UTF-8'), query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): if isinstance(acl_or_str, Policy): self.set_xml_acl(acl_or_str.to_xml(), key_name, headers, version_id) else: self.set_canned_acl(acl_or_str, key_name, headers, version_id) def get_acl(self, key_name='', headers=None, version_id=None): query_args = 'acl' if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status == 200: policy = Policy(self) h = handler.XmlHandler(policy, self) xml.sax.parseString(body, h) return policy else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_subresource(self, subresource, value, key_name='', headers=None, version_id=None): """ Set a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to set. :type value: string :param value: The value of the subresource. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. """ if not subresource: raise TypeError('set_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, data=value.encode('UTF-8'), query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_subresource(self, subresource, key_name='', headers=None, version_id=None): """ Get a subresource for a bucket or key. :type subresource: string :param subresource: The subresource to get. :type key_name: string :param key_name: The key to operate on, or None to operate on the bucket. :type headers: dict :param headers: Additional HTTP headers to include in the request. :type src_version_id: string :param src_version_id: Optional. The version id of the key to operate on. If not specified, operate on the newest version. :rtype: string :returns: The value of the subresource. """ if not subresource: raise TypeError('get_subresource called with subresource=None') query_args = subresource if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body def make_public(self, recursive=False, headers=None): self.set_canned_acl('public-read', headers=headers) if recursive: for key in self: self.set_canned_acl('public-read', key.name, headers=headers) def add_email_grant(self, permission, email_address, recursive=False, headers=None): """ Convenience method that provides a quick way to add an email grant to a bucket. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to S3. :type permission: string :param permission: The permission being granted. Should be one of: (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). :type email_address: string :param email_address: The email address associated with the AWS account your are granting the permission to. :type recursive: boolean :param recursive: A boolean value to controls whether the command will apply the grant to all keys within the bucket or not. The default value is False. By passing a True value, the call will iterate through all keys in the bucket and apply the same grant to each key. CAUTION: If you have a lot of keys, this could take a long time! """ if permission not in S3Permissions: raise self.connection.provider.storage_permissions_error( 'Unknown Permission: %s' % permission) policy = self.get_acl(headers=headers) policy.acl.add_email_grant(permission, email_address) self.set_acl(policy, headers=headers) if recursive: for key in self: key.add_email_grant(permission, email_address, headers=headers) def add_user_grant(self, permission, user_id, recursive=False, headers=None, display_name=None): """ Convenience method that provides a quick way to add a canonical user grant to a bucket. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to S3. :type permission: string :param permission: The permission being granted. Should be one of: (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). :type user_id: string :param user_id: The canonical user id associated with the AWS account your are granting the permission to. :type recursive: boolean :param recursive: A boolean value to controls whether the command will apply the grant to all keys within the bucket or not. The default value is False. By passing a True value, the call will iterate through all keys in the bucket and apply the same grant to each key. CAUTION: If you have a lot of keys, this could take a long time! :type display_name: string :param display_name: An option string containing the user's Display Name. Only required on Walrus. """ if permission not in S3Permissions: raise self.connection.provider.storage_permissions_error( 'Unknown Permission: %s' % permission) policy = self.get_acl(headers=headers) policy.acl.add_user_grant(permission, user_id, display_name=display_name) self.set_acl(policy, headers=headers) if recursive: for key in self: key.add_user_grant(permission, user_id, headers=headers, display_name=display_name) def list_grants(self, headers=None): policy = self.get_acl(headers=headers) return policy.acl.grants def get_location(self): """ Returns the LocationConstraint for the bucket. :rtype: str :return: The LocationConstraint for the bucket or the empty string if no constraint was specified when bucket was created. """ response = self.connection.make_request('GET', self.name, query_args='location') body = response.read() if response.status == 200: rs = ResultSet(self) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs.LocationConstraint else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_xml_logging(self, logging_str, headers=None): """ Set logging on a bucket directly to the given xml string. :type logging_str: unicode string :param logging_str: The XML for the bucketloggingstatus which will be set. The string will be converted to utf-8 before it is sent. Usually, you will obtain this XML from the BucketLogging object. :rtype: bool :return: True if ok or raises an exception. """ body = logging_str.encode('utf-8') response = self.connection.make_request('PUT', self.name, data=body, query_args='logging', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def enable_logging(self, target_bucket, target_prefix='', grants=None, headers=None): """ Enable logging on a bucket. :type target_bucket: bucket or string :param target_bucket: The bucket to log to. :type target_prefix: string :param target_prefix: The prefix which should be prepended to the generated log files written to the target_bucket. :type grants: list of Grant objects :param grants: A list of extra permissions which will be granted on the log files which are created. :rtype: bool :return: True if ok or raises an exception. """ if isinstance(target_bucket, Bucket): target_bucket = target_bucket.name blogging = BucketLogging(target=target_bucket, prefix=target_prefix, grants=grants) return self.set_xml_logging(blogging.to_xml(), headers=headers) def disable_logging(self, headers=None): """ Disable logging on a bucket. :rtype: bool :return: True if ok or raises an exception. """ blogging = BucketLogging() return self.set_xml_logging(blogging.to_xml(), headers=headers) def get_logging_status(self, headers=None): """ Get the logging status for this bucket. :rtype: :class:`boto.s3.bucketlogging.BucketLogging` :return: A BucketLogging object for this bucket. """ response = self.connection.make_request('GET', self.name, query_args='logging', headers=headers) body = response.read() if response.status == 200: blogging = BucketLogging() h = handler.XmlHandler(blogging, self) xml.sax.parseString(body, h) return blogging else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_as_logging_target(self, headers=None): """ Setup the current bucket as a logging target by granting the necessary permissions to the LogDelivery group to write log files to this bucket. """ policy = self.get_acl(headers=headers) g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) policy.acl.add_grant(g1) policy.acl.add_grant(g2) self.set_acl(policy, headers=headers) def get_request_payment(self, headers=None): response = self.connection.make_request('GET', self.name, query_args='requestPayment', headers=headers) body = response.read() if response.status == 200: return body else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_request_payment(self, payer='BucketOwner', headers=None): body = self.BucketPaymentBody % payer response = self.connection.make_request('PUT', self.name, data=body, query_args='requestPayment', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def configure_versioning(self, versioning, mfa_delete=False, mfa_token=None, headers=None): """ Configure versioning for this bucket. ..note:: This feature is currently in beta. :type versioning: bool :param versioning: A boolean indicating whether version is enabled (True) or disabled (False). :type mfa_delete: bool :param mfa_delete: A boolean indicating whether the Multi-Factor Authentication Delete feature is enabled (True) or disabled (False). If mfa_delete is enabled then all Delete operations will require the token from your MFA device to be passed in the request. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required when you are changing the status of the MfaDelete property of the bucket. """ if versioning: ver = 'Enabled' else: ver = 'Suspended' if mfa_delete: mfa = 'Enabled' else: mfa = 'Disabled' body = self.VersioningBody % (ver, mfa) if mfa_token: if not headers: headers = {} provider = self.connection.provider headers[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('PUT', self.name, data=body, query_args='versioning', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_versioning_status(self, headers=None): """ Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended. """ response = self.connection.make_request('GET', self.name, query_args='versioning', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: d = {} ver = re.search(self.VersionRE, body) if ver: d['Versioning'] = ver.group(1) mfa = re.search(self.MFADeleteRE, body) if mfa: d['MfaDelete'] = mfa.group(1) return d else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def configure_lifecycle(self, lifecycle_config, headers=None): """ Configure lifecycle for this bucket. :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle` :param lifecycle_config: The lifecycle configuration you want to configure for this bucket. """ xml = lifecycle_config.to_xml() xml = xml.encode('utf-8') fp = StringIO.StringIO(xml) md5 = boto.utils.compute_md5(fp) if headers is None: headers = {} headers['Content-MD5'] = md5[1] headers['Content-Type'] = 'text/xml' response = self.connection.make_request('PUT', self.name, data=fp.getvalue(), query_args='lifecycle', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_lifecycle_config(self, headers=None): """ Returns the current lifecycle configuration on the bucket. :rtype: :class:`boto.s3.lifecycle.Lifecycle` :returns: A LifecycleConfig object that describes all current lifecycle rules in effect for the bucket. """ response = self.connection.make_request('GET', self.name, query_args='lifecycle', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: lifecycle = Lifecycle() h = handler.XmlHandler(lifecycle, self) xml.sax.parseString(body, h) return lifecycle else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def delete_lifecycle_configuration(self, headers=None): """ Removes all lifecycle configuration from the bucket. """ response = self.connection.make_request('DELETE', self.name, query_args='lifecycle', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def configure_website(self, suffix=None, error_key=None, redirect_all_requests_to=None, routing_rules=None, headers=None): """ Configure this bucket to act as a website :type suffix: str :param suffix: Suffix that is appended to a request that is for a "directory" on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not be empty and must not include a slash character. :type error_key: str :param error_key: The object key name to use when a 4XX class error occurs. This is optional. :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation` :param redirect_all_requests_to: Describes the redirect behavior for every request to this bucket's website endpoint. If this value is non None, no other values are considered when configuring the website configuration for the bucket. This is an instance of ``RedirectLocation``. :type routing_rules: :class:`boto.s3.website.RoutingRules` :param routing_rules: Object which specifies conditions and redirects that apply when the conditions are met. """ config = website.WebsiteConfiguration( suffix, error_key, redirect_all_requests_to, routing_rules) return self.set_website_configuration(config, headers=headers) def set_website_configuration(self, config, headers=None): """ :type config: boto.s3.website.WebsiteConfiguration :param config: Configuration data """ return self.set_website_configuration_xml(config.to_xml(), headers=headers) def set_website_configuration_xml(self, xml, headers=None): """Upload xml website configuration""" response = self.connection.make_request('PUT', self.name, data=xml, query_args='website', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_website_configuration(self, headers=None): """ Returns the current status of website configuration on the bucket. :rtype: dict :returns: A dictionary containing a Python representation of the XML response from S3. The overall structure is: * WebsiteConfiguration * IndexDocument * Suffix : suffix that is appended to request that is for a "directory" on the website endpoint * ErrorDocument * Key : name of object to serve when an error occurs """ return self.get_website_configuration_with_xml(headers)[0] def get_website_configuration_obj(self, headers=None): """Get the website configuration as a :class:`boto.s3.website.WebsiteConfiguration` object. """ config_xml = self.get_website_configuration_xml(headers=headers) config = website.WebsiteConfiguration() h = handler.XmlHandler(config, self) xml.sax.parseString(config_xml, h) return config def get_website_configuration_with_xml(self, headers=None): """ Returns the current status of website configuration on the bucket as unparsed XML. :rtype: 2-Tuple :returns: 2-tuple containing: 1) A dictionary containing a Python representation \ of the XML response. The overall structure is: * WebsiteConfiguration * IndexDocument * Suffix : suffix that is appended to request that \ is for a "directory" on the website endpoint * ErrorDocument * Key : name of object to serve when an error occurs 2) unparsed XML describing the bucket's website configuration """ body = self.get_website_configuration_xml(headers=headers) e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e, body def get_website_configuration_xml(self, headers=None): """Get raw website configuration xml""" response = self.connection.make_request('GET', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body def delete_website_configuration(self, headers=None): """ Removes all website configuration from the bucket. """ response = self.connection.make_request('DELETE', self.name, query_args='website', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_website_endpoint(self): """ Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not. """ l = [self.name] l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) l.append('.'.join(self.connection.host.split('.')[-2:])) return '.'.join(l) def get_policy(self, headers=None): """ Returns the JSON policy associated with the bucket. The policy is returned as an uninterpreted JSON string. """ response = self.connection.make_request('GET', self.name, query_args='policy', headers=headers) body = response.read() if response.status == 200: return body else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_policy(self, policy, headers=None): """ Add or replace the JSON policy associated with the bucket. :type policy: str :param policy: The JSON policy as a string. """ response = self.connection.make_request('PUT', self.name, data=policy, query_args='policy', headers=headers) body = response.read() if response.status >= 200 and response.status <= 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def delete_policy(self, headers=None): response = self.connection.make_request('DELETE', self.name, data='/?policy', query_args='policy', headers=headers) body = response.read() if response.status >= 200 and response.status <= 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_cors_xml(self, cors_xml, headers=None): """ Set the CORS (Cross-Origin Resource Sharing) for a bucket. :type cors_xml: str :param cors_xml: The XML document describing your desired CORS configuration. See the S3 documentation for details of the exact syntax required. """ fp = StringIO.StringIO(cors_xml) md5 = boto.utils.compute_md5(fp) if headers is None: headers = {} headers['Content-MD5'] = md5[1] headers['Content-Type'] = 'text/xml' response = self.connection.make_request('PUT', self.name, data=fp.getvalue(), query_args='cors', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_cors(self, cors_config, headers=None): """ Set the CORS for this bucket given a boto CORSConfiguration object. :type cors_config: :class:`boto.s3.cors.CORSConfiguration` :param cors_config: The CORS configuration you want to configure for this bucket. """ return self.set_cors_xml(cors_config.to_xml()) def get_cors_xml(self, headers=None): """ Returns the current CORS configuration on the bucket as an XML document. """ response = self.connection.make_request('GET', self.name, query_args='cors', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: return body else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_cors(self, headers=None): """ Returns the current CORS configuration on the bucket. :rtype: :class:`boto.s3.cors.CORSConfiguration` :returns: A CORSConfiguration object that describes all current CORS rules in effect for the bucket. """ body = self.get_cors_xml(headers) cors = CORSConfiguration() h = handler.XmlHandler(cors, self) xml.sax.parseString(body, h) return cors def delete_cors(self, headers=None): """ Removes all CORS configuration from the bucket. """ response = self.connection.make_request('DELETE', self.name, query_args='cors', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def initiate_multipart_upload(self, key_name, headers=None, reduced_redundancy=False, metadata=None, encrypt_key=False, policy=None): """ Start a multipart upload operation. :type key_name: string :param key_name: The name of the key that will ultimately result from this multipart upload operation. This will be exactly as the key appears in the bucket after the upload process has been completed. :type headers: dict :param headers: Additional HTTP headers to send and store with the resulting key in S3. :type reduced_redundancy: boolean :param reduced_redundancy: In multipart uploads, the storage class is specified when initiating the upload, not when uploading individual parts. So if you want the resulting key to use the reduced redundancy storage class set this flag when you initiate the upload. :type metadata: dict :param metadata: Any metadata that you would like to set on the key that results from the multipart upload. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key (once completed) in S3. """ query_args = 'uploads' provider = self.connection.provider headers = headers or {} if policy: headers[provider.acl_header] = policy if reduced_redundancy: storage_class_header = provider.storage_class_header if storage_class_header: headers[storage_class_header] = 'REDUCED_REDUNDANCY' # TODO: what if the provider doesn't support reduced redundancy? # (see boto.s3.key.Key.set_contents_from_file) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' if metadata is None: metadata = {} headers = boto.utils.merge_meta(headers, metadata, self.connection.provider) response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: resp = MultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def complete_multipart_upload(self, key_name, upload_id, xml_body, headers=None): """ Complete a multipart upload operation. """ query_args = 'uploadId=%s' % upload_id if headers is None: headers = {} headers['Content-Type'] = 'text/xml' response = self.connection.make_request('POST', self.name, key_name, query_args=query_args, headers=headers, data=xml_body) contains_error = False body = response.read() # Some errors will be reported in the body of the response # even though the HTTP response code is 200. This check # does a quick and dirty peek in the body for an error element. if body.find('<Error>') > 0: contains_error = True boto.log.debug(body) if response.status == 200 and not contains_error: resp = CompleteMultiPartUpload(self) h = handler.XmlHandler(resp, self) xml.sax.parseString(body, h) # Use a dummy key to parse various response headers # for versioning, encryption info and then explicitly # set the completed MPU object values from key. k = self.key_class(self) k.handle_version_headers(response) k.handle_encryption_headers(response) resp.version_id = k.version_id resp.encrypted = k.encrypted return resp else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def cancel_multipart_upload(self, key_name, upload_id, headers=None): query_args = 'uploadId=%s' % upload_id response = self.connection.make_request('DELETE', self.name, key_name, query_args=query_args, headers=headers) body = response.read() boto.log.debug(body) if response.status != 204: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def delete(self, headers=None): return self.connection.delete_bucket(self.name, headers=headers) def get_tags(self): response = self.get_xml_tags() tags = Tags() h = handler.XmlHandler(tags, self) xml.sax.parseString(response, h) return tags def get_xml_tags(self): response = self.connection.make_request('GET', self.name, query_args='tagging', headers=None) body = response.read() if response.status == 200: return body else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_xml_tags(self, tag_str, headers=None, query_args='tagging'): if headers is None: headers = {} md5 = boto.utils.compute_md5(StringIO.StringIO(tag_str)) headers['Content-MD5'] = md5[1] headers['Content-Type'] = 'text/xml' response = self.connection.make_request('PUT', self.name, data=tag_str.encode('utf-8'), query_args=query_args, headers=headers) body = response.read() if response.status != 204: raise self.connection.provider.storage_response_error( response.status, response.reason, body) return True def set_tags(self, tags, headers=None): return self.set_xml_tags(tags.to_xml(), headers=headers) def delete_tags(self, headers=None): response = self.connection.make_request('DELETE', self.name, query_args='tagging', headers=headers) body = response.read() boto.log.debug(body) if response.status == 204: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
1
9,523
No docs for the new param here?
boto-boto
py
@@ -1,5 +1,5 @@ /* - * Copyright ConsenSys AG. + * Copyright Contributors to Hyperledger Besu. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods; import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.BLOCK_NOT_FOUND; import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.INTERNAL_ERROR; import org.hyperledger.besu.datatypes.Hash; import org.hyperledger.besu.datatypes.Wei; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcErrorConverter; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.exception.InvalidJsonRpcParameters; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.BlockParameterOrBlockHash; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.JsonCallParameter; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse; import org.hyperledger.besu.ethereum.api.query.BlockchainQueries; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.mainnet.ImmutableTransactionValidationParams; import org.hyperledger.besu.ethereum.mainnet.TransactionValidationParams; import org.hyperledger.besu.ethereum.mainnet.ValidationResult; import org.hyperledger.besu.ethereum.processing.TransactionProcessingResult; import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason; import org.hyperledger.besu.ethereum.transaction.TransactionSimulator; import org.hyperledger.besu.ethereum.transaction.TransactionSimulatorResult; import org.hyperledger.besu.evm.tracing.OperationTracer; public class EthCall extends AbstractBlockParameterOrBlockHashMethod { private final TransactionSimulator transactionSimulator; public EthCall( final BlockchainQueries blockchainQueries, final TransactionSimulator transactionSimulator) { super(blockchainQueries); this.transactionSimulator = transactionSimulator; } @Override public String getName() { return RpcMethod.ETH_CALL.getMethodName(); } @Override protected BlockParameterOrBlockHash blockParameterOrBlockHash( final JsonRpcRequestContext request) { return request.getRequiredParameter(1, BlockParameterOrBlockHash.class); } @Override protected Object resultByBlockHash(final JsonRpcRequestContext request, final Hash blockHash) { JsonCallParameter callParams = validateAndGetCallParams(request); final BlockHeader header = blockchainQueries.get().getBlockHeaderByHash(blockHash).orElse(null); if (header == null) { return errorResponse(request, BLOCK_NOT_FOUND); } return transactionSimulator .process( callParams, buildTransactionValidationParams(header, callParams), OperationTracer.NO_TRACING, header) .map( result -> result .getValidationResult() .either( (() -> result.isSuccessful() ? new JsonRpcSuccessResponse( request.getRequest().getId(), result.getOutput().toString()) : errorResponse(request, result)), reason -> new JsonRpcErrorResponse( request.getRequest().getId(), JsonRpcErrorConverter.convertTransactionInvalidReason(reason)))) .orElse(errorResponse(request, INTERNAL_ERROR)); } @Override public JsonRpcResponse response(final JsonRpcRequestContext requestContext) { return (JsonRpcResponse) handleParamTypes(requestContext); } private JsonRpcErrorResponse errorResponse( final JsonRpcRequestContext request, final TransactionSimulatorResult result) { final JsonRpcError jsonRpcError; final ValidationResult<TransactionInvalidReason> validationResult = result.getValidationResult(); if (validationResult != null && !validationResult.isValid()) { jsonRpcError = JsonRpcErrorConverter.convertTransactionInvalidReason( validationResult.getInvalidReason()); } else { final TransactionProcessingResult resultTrx = result.getResult(); if (resultTrx != null && resultTrx.getRevertReason().isPresent()) { jsonRpcError = JsonRpcError.REVERT_ERROR; jsonRpcError.setData(resultTrx.getRevertReason().get().toHexString()); } else { jsonRpcError = JsonRpcError.INTERNAL_ERROR; } } return errorResponse(request, jsonRpcError); } private JsonRpcErrorResponse errorResponse( final JsonRpcRequestContext request, final JsonRpcError jsonRpcError) { return new JsonRpcErrorResponse(request.getRequest().getId(), jsonRpcError); } private JsonCallParameter validateAndGetCallParams(final JsonRpcRequestContext request) { final JsonCallParameter callParams = request.getRequiredParameter(0, JsonCallParameter.class); if (callParams.getTo() == null) { throw new InvalidJsonRpcParameters("Missing \"to\" field in call arguments"); } if (callParams.getGasPrice() != null && (callParams.getMaxFeePerGas().isPresent() || callParams.getMaxPriorityFeePerGas().isPresent())) { throw new InvalidJsonRpcParameters( "gasPrice cannot be used with maxFeePerGas or maxPriorityFeePerGas"); } return callParams; } private TransactionValidationParams buildTransactionValidationParams( final BlockHeader header, final JsonCallParameter callParams) { ImmutableTransactionValidationParams.Builder transactionValidationParams = ImmutableTransactionValidationParams.builder() .from(TransactionValidationParams.transactionSimulator()); // if it is not set explicitly whether we want a strict check of the balance or not. this will // be decided according to the provided parameters if (callParams.isMaybeStrict().isEmpty()) { boolean isZeroGasPrice = callParams.getGasPrice() == null || Wei.ZERO.equals(callParams.getGasPrice()); header .getBaseFee() .ifPresentOrElse( __ -> { boolean isZeroMaxFeePerGas = callParams.getMaxFeePerGas().orElse(Wei.ZERO).equals(Wei.ZERO); boolean isZeroMaxPriorityFeePerGas = callParams.getMaxPriorityFeePerGas().orElse(Wei.ZERO).equals(Wei.ZERO); if (isZeroGasPrice && isZeroMaxFeePerGas && isZeroMaxPriorityFeePerGas) { // After 1559, when gas pricing is not provided, 0 is used and the balance is not // checked transactionValidationParams.isAllowExceedingBalance(true); } else { // After 1559, when gas price is provided, it is interpreted as both the max and // priority fee and the balance is checked transactionValidationParams.isAllowExceedingBalance(false); } }, () -> { // Prior 1559, when gas price == 0 or is not provided the balance is not checked transactionValidationParams.isAllowExceedingBalance(isZeroGasPrice); }); } else { transactionValidationParams.isAllowExceedingBalance( !callParams.isMaybeStrict().orElse(Boolean.FALSE)); } return transactionValidationParams.build(); } }
1
26,179
This file is unrelated to the aims of this PR. Please remove. If it is needed to demonstrate Sonar Deltas then it has been proven and can be removed.
hyperledger-besu
java
@@ -42,5 +42,12 @@ namespace OpenTelemetry.Trace.Export /// <param name="cancellationToken">Cancellation token.</param> /// <returns>Returns <see cref="Task"/>.</returns> public abstract Task ShutdownAsync(CancellationToken cancellationToken); + + /// <summary> + /// Flushes all activity objects that have been queued for processing. + /// </summary> + /// <param name="cancellationToken">Cancellation token.</param> + /// <returns>Returns <see cref="Task"/>.</returns> + public abstract Task ForceFlushAsync(CancellationToken cancellationToken); } }
1
// <copyright file="ActivityProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System.Diagnostics; using System.Threading; using System.Threading.Tasks; namespace OpenTelemetry.Trace.Export { /// <summary> /// Activity processor base class. /// </summary> public abstract class ActivityProcessor { /// <summary> /// Activity start hook. /// </summary> /// <param name="activity">Instance of activity to process.</param> public abstract void OnStart(Activity activity); /// <summary> /// Activity end hook. /// </summary> /// <param name="activity">Instance of activity to process.</param> public abstract void OnEnd(Activity activity); /// <summary> /// Shuts down Activity processor asynchronously. /// </summary> /// <param name="cancellationToken">Cancellation token.</param> /// <returns>Returns <see cref="Task"/>.</returns> public abstract Task ShutdownAsync(CancellationToken cancellationToken); } }
1
14,571
minor: `Export all ended spans to the configured Exporter that have not yet been exported.` - This is the spec description. Lets use something on that line. "queue" is not necessarily present for all processor.
open-telemetry-opentelemetry-dotnet
.cs
@@ -683,6 +683,8 @@ func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Co ActionDigest: digest, SkipCacheLookup: skipCacheLookup, }, updateProgress) + log.Debug("completed ExecuteAndWaitProgress() for %v", target.Label) + if err != nil { // Handle timing issues if we try to resume an execution as it fails. If we get a // "not found" we might find that it's already been completed and we can't resume.
1
// Package remote provides our interface to the Google remote execution APIs // (https://github.com/bazelbuild/remote-apis) which Please can use to distribute // work to remote servers. package remote import ( "context" "encoding/hex" "fmt" "io/ioutil" "os" "path" "path/filepath" "strings" "sync" "time" "github.com/bazelbuild/remote-apis-sdks/go/pkg/client" "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" "github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata" "github.com/bazelbuild/remote-apis-sdks/go/pkg/retry" "github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo" fpb "github.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1" pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/bazelbuild/remote-apis/build/bazel/semver" "github.com/grpc-ecosystem/go-grpc-middleware/retry" "golang.org/x/sync/errgroup" "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" "gopkg.in/op/go-logging.v1" "github.com/thought-machine/please/src/core" "github.com/thought-machine/please/src/fs" ) var log = logging.MustGetLogger("remote") // The API version we support. var apiVersion = semver.SemVer{Major: 2} // A Client is the interface to the remote API. // // It provides a higher-level interface over the specific RPCs available. type Client struct { client *client.Client fetchClient fpb.FetchClient initOnce sync.Once state *core.BuildState err error // for initialisation instance string // Stored output directories from previously executed targets. // This isn't just a cache - it is needed for cases where we don't actually // have the files physically on disk. outputs map[core.BuildLabel]*pb.Directory outputMutex sync.RWMutex // The unstamped build action digests. Stamped and test digests are not stored. // This isn't just a cache - it is needed because building a target can modify the target and things like plz hash // --detailed and --shell will fail to get the right action digest. unstampedBuildActionDigests actionDigestMap // Used to control downloading targets (we must make sure we don't re-fetch them // while another target is trying to use them). // // This map is of effective type `map[*core.BuildTarget]*pendingDownload` downloads sync.Map // Server-sent cache properties maxBlobBatchSize int64 // Platform properties that we will request from the remote. // TODO(peterebden): this will need some modification for cross-compiling support. platform *pb.Platform // Path to the shell to use to execute actions in. shellPath string // User's home directory. userHome string // Stats used to report RPC data rates byteRateIn, byteRateOut, totalBytesIn, totalBytesOut int stats *statsHandler // Used to store and retrieve action results to reduce RPC calls when re-building targets mdStore buildMetadataStore // Passed to various SDK functions. fileMetadataCache filemetadata.Cache // existingBlobs is used to track the set of existing blobs remotely. existingBlobs map[string]struct{} existingBlobMutex sync.Mutex } type actionDigestMap struct { m sync.Map } func (m *actionDigestMap) Get(label core.BuildLabel) *pb.Digest { d, ok := m.m.Load(label) if !ok { panic(fmt.Sprintf("could not find action digest for label: %s", label.String())) } return d.(*pb.Digest) } func (m *actionDigestMap) Put(label core.BuildLabel, actionDigest *pb.Digest) { m.m.Store(label, actionDigest) } // A pendingDownload represents a pending download of a build target. It is used to // ensure we only download each target exactly once. type pendingDownload struct { once sync.Once err error // Any error if the download failed. } // New returns a new Client instance. // It begins the process of contacting the remote server but does not wait for it. func New(state *core.BuildState) *Client { c := &Client{ state: state, instance: state.Config.Remote.Instance, outputs: map[core.BuildLabel]*pb.Directory{}, mdStore: newDirMDStore(time.Duration(state.Config.Remote.CacheDuration)), existingBlobs: map[string]struct{}{ digest.Empty.Hash: {}, }, fileMetadataCache: filemetadata.NewNoopCache(), shellPath: state.Config.Remote.Shell, } c.stats = newStatsHandler(c) go c.CheckInitialised() // Kick off init now, but we don't have to wait for it. return c } // CheckInitialised checks that the client has connected to the server correctly. func (c *Client) CheckInitialised() error { c.initOnce.Do(c.init) return c.err } // Disconnect disconnects this client from the remote server. func (c *Client) Disconnect() error { if c.client != nil { log.Debug("Disconnecting from remote execution server...") return c.client.Close() } return nil } // init is passed to the sync.Once to do the actual initialisation. func (c *Client) init() { // Change grpc to log using our implementation grpclog.SetLoggerV2(&grpcLogMabob{}) var g errgroup.Group g.Go(c.initExec) if c.state.Config.Remote.AssetURL != "" { g.Go(c.initFetch) } c.err = g.Wait() if c.err != nil { log.Error("Error setting up remote execution client: %s", c.err) } } // initExec initialiases the remote execution client. func (c *Client) initExec() error { // Create a copy of the state where we can modify the config dialOpts, err := c.dialOpts() if err != nil { return err } client, err := client.NewClient(context.Background(), c.instance, client.DialParams{ Service: c.state.Config.Remote.URL, CASService: c.state.Config.Remote.CASURL, NoSecurity: !c.state.Config.Remote.Secure, TransportCredsOnly: c.state.Config.Remote.Secure, DialOpts: dialOpts, }, client.UseBatchOps(true), &client.TreeSymlinkOpts{Preserved: true}, client.RetryTransient(), client.RPCTimeouts(map[string]time.Duration{ "default": time.Duration(c.state.Config.Remote.Timeout), "GetCapabilities": 5 * time.Second, "BatchUpdateBlobs": time.Minute, "BatchReadBlobs": time.Minute, "GetTree": time.Minute, "Execute": 0, "WaitExecution": 0, })) if err != nil { return err } c.client = client // Extend timeouts a bit, RetryTransient only gives about 1.5 seconds total which isn't // necessarily very much if the other end needs to sort its life out. c.client.Retrier.Backoff = retry.ExponentialBackoff(500*time.Millisecond, 5*time.Second, retry.Attempts(8)) // Query the server for its capabilities. This tells us whether it is capable of // execution, caching or both. resp, err := c.client.GetCapabilities(context.Background()) if err != nil { return err } if lessThan(&apiVersion, resp.LowApiVersion) || lessThan(resp.HighApiVersion, &apiVersion) { return fmt.Errorf("Unsupported API version; we require %s but server only supports %s - %s", printVer(&apiVersion), printVer(resp.LowApiVersion), printVer(resp.HighApiVersion)) } caps := resp.CacheCapabilities if caps == nil { return fmt.Errorf("Cache capabilities not supported by server (we do not support execution-only servers)") } if err := c.chooseDigest(caps.DigestFunctions); err != nil { return err } c.maxBlobBatchSize = caps.MaxBatchTotalSizeBytes if c.maxBlobBatchSize == 0 { // No limit was set by the server, assume we are implicitly limited to 4MB (that's // gRPC's limit which most implementations do not seem to override). Round it down a // bit to allow a bit of serialisation overhead etc. c.maxBlobBatchSize = 4000000 } if c.shellPath == "" { // We have to run everything through a shell since our commands are arbitrary. // Unfortunately we can't just say "bash", we need an absolute path which is // a bit weird since it assumes that our absolute path is the same as the // remote one (which is probably OK on the same OS, but not between say Linux and // FreeBSD where bash is not idiomatically in the same place). bash, err := core.LookBuildPath("bash", c.state.Config) if err != nil { return fmt.Errorf("Failed to set path for bash: %w", err) } c.shellPath = bash } home, err := os.UserHomeDir() if err != nil { return fmt.Errorf("Failed to determine user home dir: %s", err) } c.userHome = home // Now check if it can do remote execution if resp.ExecutionCapabilities == nil { return fmt.Errorf("Remote execution is configured but the build server doesn't support it") } if err := c.chooseDigest([]pb.DigestFunction_Value{resp.ExecutionCapabilities.DigestFunction}); err != nil { return err } else if !resp.ExecutionCapabilities.ExecEnabled { return fmt.Errorf("Remote execution not enabled for this server") } c.platform = convertPlatform(c.state.Config.Remote.Platform) log.Debug("Remote execution client initialised") if c.state.Config.Remote.AssetURL == "" { c.fetchClient = fpb.NewFetchClient(client.Connection) } return nil } // initFetch initialises the remote fetch server. func (c *Client) initFetch() error { dialOpts, err := c.dialOpts() if err != nil { return err } if c.state.Config.Remote.Secure { dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) } else { dialOpts = append(dialOpts, grpc.WithInsecure()) } conn, err := grpc.Dial(c.state.Config.Remote.AssetURL, append(dialOpts, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor()))...) if err != nil { return fmt.Errorf("Failed to connect to the remote fetch server: %s", err) } c.fetchClient = fpb.NewFetchClient(conn) return nil } // chooseDigest selects a digest function that we will use.w func (c *Client) chooseDigest(fns []pb.DigestFunction_Value) error { systemFn := c.digestEnum() for _, fn := range fns { if fn == systemFn { return nil } } return fmt.Errorf("No acceptable hash function available; server supports %s but we require %s. Hint: you may need to set the hash function appropriately in the [build] section of your config", fns, systemFn) } // digestEnum returns a proto enum for the digest function of given name (as we name them in config) func (c *Client) digestEnum() pb.DigestFunction_Value { switch c.state.Config.Build.HashFunction { case "sha256": return pb.DigestFunction_SHA256 case "sha1": return pb.DigestFunction_SHA1 default: return pb.DigestFunction_UNKNOWN // Shouldn't get here } } // Build executes a remote build of the given target. func (c *Client) Build(tid int, target *core.BuildTarget) (*core.BuildMetadata, error) { if err := c.CheckInitialised(); err != nil { return nil, err } metadata, ar, digest, err := c.build(tid, target) if err != nil { return metadata, err } if c.state.TargetHasher != nil { hash, _ := hex.DecodeString(c.outputHash(ar)) c.state.TargetHasher.SetHash(target, hash) } if err := c.setOutputs(target, ar); err != nil { return metadata, c.wrapActionErr(err, digest) } if c.state.ShouldDownload(target) { if !c.outputsExist(target, digest) { c.state.LogBuildResult(tid, target, core.TargetBuilding, "Downloading") if err := c.download(target, func() error { return c.reallyDownload(target, digest, ar) }); err != nil { return metadata, err } } else { log.Debug("Not downloading outputs for %s, they are already up-to-date", target) // Ensure this is marked as already downloaded. v, _ := c.downloads.LoadOrStore(target, &pendingDownload{}) v.(*pendingDownload).once.Do(func() {}) } if err := c.downloadData(target); err != nil { return metadata, err } } return metadata, nil } // downloadData downloads all the runtime data for a target, recursively. func (c *Client) downloadData(target *core.BuildTarget) error { var g errgroup.Group for _, datum := range target.AllData() { if l, ok := datum.Label(); ok { t := c.state.Graph.TargetOrDie(l) g.Go(func() error { if err := c.Download(t); err != nil { return err } return c.downloadData(t) }) } } return g.Wait() } // Run runs a target on the remote executors. func (c *Client) Run(target *core.BuildTarget) error { if err := c.CheckInitialised(); err != nil { return err } cmd, digest, err := c.uploadAction(target, false, true) if err != nil { return err } // 24 hours is kind of an arbitrarily long timeout. Basically we just don't want to limit it here. _, _, err = c.execute(0, target, cmd, digest, false, false) return err } // build implements the actual build of a target. func (c *Client) build(tid int, target *core.BuildTarget) (*core.BuildMetadata, *pb.ActionResult, *pb.Digest, error) { needStdout := target.PostBuildFunction != nil // If we're gonna stamp the target, first check the unstamped equivalent that we store results under. // This implements the rules of stamp whereby we don't force rebuilds every time e.g. the SCM revision changes. var unstampedDigest *pb.Digest if target.Stamp { command, digest, err := c.buildAction(target, false, false) if err != nil { return nil, nil, nil, err } else if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, false, needStdout); metadata != nil { c.unstampedBuildActionDigests.Put(target.Label, digest) return metadata, ar, digest, nil } unstampedDigest = digest } command, stampedDigest, err := c.buildAction(target, false, true) if err != nil { return nil, nil, nil, err } metadata, ar, err := c.execute(tid, target, command, stampedDigest, false, needStdout) if target.Stamp && err == nil { // Store results under unstamped digest too. c.locallyCacheResults(target, unstampedDigest, metadata, ar) c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{ InstanceName: c.instance, ActionDigest: unstampedDigest, ActionResult: ar, }) c.unstampedBuildActionDigests.Put(target.Label, unstampedDigest) } else { c.unstampedBuildActionDigests.Put(target.Label, stampedDigest) } return metadata, ar, stampedDigest, err } // Download downloads outputs for the given target. func (c *Client) Download(target *core.BuildTarget) error { if target.Local { return nil // No download needed since this target was built locally } return c.download(target, func() error { buildAction := c.unstampedBuildActionDigests.Get(target.Label) if c.outputsExist(target, buildAction) { log.Debug("Not downloading outputs for %s, they're already up-to-date", target) return nil } _, ar := c.retrieveResults(target, nil, buildAction, false, false) if ar == nil { return fmt.Errorf("Failed to retrieve action result for %s", target) } return c.reallyDownload(target, buildAction, ar) }) } func (c *Client) download(target *core.BuildTarget, f func() error) error { v, _ := c.downloads.LoadOrStore(target, &pendingDownload{}) d := v.(*pendingDownload) d.once.Do(func() { d.err = f() }) return d.err } func (c *Client) reallyDownload(target *core.BuildTarget, digest *pb.Digest, ar *pb.ActionResult) error { log.Debug("Downloading outputs for %s", target) if err := removeOutputs(target); err != nil { return err } if err := c.downloadActionOutputs(context.Background(), ar, target); err != nil { return c.wrapActionErr(err, digest) } c.recordAttrs(target, digest) log.Debug("Downloaded outputs for %s", target) return nil } func (c *Client) downloadActionOutputs(ctx context.Context, ar *pb.ActionResult, target *core.BuildTarget) error { // We can download straight into the out dir if there are no outdirs to worry about if len(target.OutputDirectories) == 0 { _, err := c.client.DownloadActionOutputs(ctx, ar, target.OutDir(), c.fileMetadataCache) return err } defer os.RemoveAll(target.TmpDir()) if _, err := c.client.DownloadActionOutputs(ctx, ar, target.TmpDir(), c.fileMetadataCache); err != nil { return err } if err := moveOutDirsToTmpRoot(target); err != nil { return fmt.Errorf("failed to move out directories to correct place in tmp folder: %w", err) } if err := moveTmpFilesToOutDir(target); err != nil { return fmt.Errorf("failed to move downloaded action output from target tmp dir to out dir: %w", err) } return nil } // moveTmpFilesToOutDir moves files from the target tmp dir to the out dir func moveTmpFilesToOutDir(target *core.BuildTarget) error { files, err := ioutil.ReadDir(target.TmpDir()) if err != nil { return err } for _, f := range files { oldPath := filepath.Join(target.TmpDir(), f.Name()) newPath := filepath.Join(target.OutDir(), f.Name()) if err := fs.RecursiveCopy(oldPath, newPath, target.OutMode()); err != nil { return err } } return nil } // moveOutDirsToTmpRoot moves all the files from the output dirs into the root of the build temp dir and deletes the // now empty directory func moveOutDirsToTmpRoot(target *core.BuildTarget) error { for _, dir := range target.OutputDirectories { if err := moveOutDirFilesToTmpRoot(target, dir.Dir()); err != nil { return fmt.Errorf("failed to move output dir (%s) contents to rule root: %w", dir, err) } if err := os.Remove(filepath.Join(target.TmpDir(), dir.Dir())); err != nil { return err } } return nil } func moveOutDirFilesToTmpRoot(target *core.BuildTarget, dir string) error { fullDir := filepath.Join(target.TmpDir(), dir) files, err := ioutil.ReadDir(fullDir) if err != nil { return err } for _, f := range files { from := filepath.Join(fullDir, f.Name()) to := filepath.Join(target.TmpDir(), f.Name()) if err := os.Rename(from, to); err != nil { return err } } return nil } // Test executes a remote test of the given target. // It returns the results (and coverage if appropriate) as bytes to be parsed elsewhere. func (c *Client) Test(tid int, target *core.BuildTarget, run int) (metadata *core.BuildMetadata, err error) { if err := c.CheckInitialised(); err != nil { return nil, err } command, digest, err := c.buildAction(target, true, false) if err != nil { return nil, err } metadata, ar, err := c.execute(tid, target, command, digest, true, false) if ar != nil { _, dlErr := c.client.DownloadActionOutputs(context.Background(), ar, target.TestDir(run), c.fileMetadataCache) if dlErr != nil { log.Warningf("%v: failed to download test outputs: %v", target.Label, dlErr) } } return metadata, err } // retrieveResults retrieves target results from where it can (either from the local cache or from remote). // It returns nil if it cannot be retrieved. func (c *Client) retrieveResults(target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest bool) (*core.BuildMetadata, *pb.ActionResult) { // First see if this execution is cached locally if metadata, ar := c.retrieveLocalResults(target, digest); metadata != nil { log.Debug("Got locally cached results for %s %s (age %s)", target.Label, c.actionURL(digest, true), time.Since(metadata.Timestamp).Truncate(time.Second)) metadata.Cached = true return metadata, ar } // Now see if it is cached on the remote server if ar, err := c.client.GetActionResult(context.Background(), &pb.GetActionResultRequest{ InstanceName: c.instance, ActionDigest: digest, InlineStdout: needStdout, }); err == nil { // This action already exists and has been cached. if metadata, err := c.buildMetadata(ar, needStdout, false); err == nil { log.Debug("Got remotely cached results for %s %s", target.Label, c.actionURL(digest, true)) if command != nil { err = c.verifyActionResult(target, command, digest, ar, c.state.Config.Remote.VerifyOutputs, isTest) } if err == nil { c.locallyCacheResults(target, digest, metadata, ar) metadata.Cached = true return metadata, ar } log.Debug("Remotely cached results for %s were missing some outputs, forcing a rebuild: %s", target.Label, err) } } return nil, nil } // maybeRetrieveResults is like retrieveResults but only retrieves if we aren't forcing a rebuild of the target // (i.e. not if we're doing plz build --rebuild or plz test --rerun). func (c *Client) maybeRetrieveResults(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult) { if !c.state.ShouldRebuild(target) && !(c.state.NeedTests && isTest && c.state.ForceRerun) { c.state.LogBuildResult(tid, target, core.TargetBuilding, "Checking remote...") if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil { return metadata, ar } } return nil, nil } // execute submits an action to the remote executor and monitors its progress. // The returned ActionResult may be nil on failure. func (c *Client) execute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult, error) { if !isTest || !c.state.ForceRerun || c.state.NumTestRuns == 1 { if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, isTest, needStdout); metadata != nil { return metadata, ar, nil } } // We didn't actually upload the inputs before, so we must do so now. command, digest, err := c.uploadAction(target, isTest, false) if err != nil { return nil, nil, fmt.Errorf("Failed to upload build action: %s", err) } // Remote actions & filegroups get special treatment at this point. if target.IsFilegroup { // Filegroups get special-cased since they are just a movement of files. return c.buildFilegroup(target, command, digest) } else if target.IsRemoteFile { return c.fetchRemoteFile(tid, target, digest) } else if target.IsTextFile { return c.buildTextFile(c.state, target, command, digest) } // We should skip the cache lookup (and override any existing action result) if we --rebuild, or --rerun and this is // one fo the targets we're testing or building. skipCacheLookup := (isTest && c.state.ForceRerun) || (!isTest && c.state.ForceRebuild) skipCacheLookup = skipCacheLookup && c.state.IsOriginalTarget(target) return c.reallyExecute(tid, target, command, digest, needStdout, isTest, skipCacheLookup) } // reallyExecute is like execute but after the initial cache check etc. // The action & sources must have already been uploaded. func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest, skipCacheLookup bool) (*core.BuildMetadata, *pb.ActionResult, error) { executing := false building := target.State() <= core.Built if building { c.state.LogBuildResult(tid, target, core.TargetBuilding, "Submitting job...") } else { c.state.LogBuildResult(tid, target, core.TargetTesting, "Submitting job...") } updateProgress := func(metadata *pb.ExecuteOperationMetadata) { if c.state.Config.Remote.DisplayURL != "" { log.Debug("Remote progress for %s: %s%s", target.Label, metadata.Stage, c.actionURL(metadata.ActionDigest, true)) } if building { switch metadata.Stage { case pb.ExecutionStage_CACHE_CHECK: c.state.LogBuildResult(tid, target, core.TargetBuilding, "Checking cache...") case pb.ExecutionStage_QUEUED: c.state.LogBuildResult(tid, target, core.TargetBuilding, "Queued") case pb.ExecutionStage_EXECUTING: executing = true c.state.LogBuildResult(tid, target, core.TargetBuilding, "Building...") case pb.ExecutionStage_COMPLETED: c.state.LogBuildResult(tid, target, core.TargetBuilding, "Completed") } } else { switch metadata.Stage { case pb.ExecutionStage_CACHE_CHECK: c.state.LogBuildResult(tid, target, core.TargetTesting, "Checking cache...") case pb.ExecutionStage_QUEUED: c.state.LogBuildResult(tid, target, core.TargetTesting, "Queued") case pb.ExecutionStage_EXECUTING: executing = true c.state.LogBuildResult(tid, target, core.TargetTesting, "Testing...") case pb.ExecutionStage_COMPLETED: c.state.LogBuildResult(tid, target, core.TargetTesting, "Completed") } } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { for i := 1; i < 1000000; i++ { select { case <-ctx.Done(): return case <-time.After(1 * time.Minute): description := "queued" if executing { description = "executing" } if i == 1 { log.Notice("%s still %s after 1 minute", target, description) } else { log.Notice("%s still %s after %d minutes", target, description, i) } } } }() resp, err := c.client.ExecuteAndWaitProgress(c.contextWithMetadata(target), &pb.ExecuteRequest{ InstanceName: c.instance, ActionDigest: digest, SkipCacheLookup: skipCacheLookup, }, updateProgress) if err != nil { // Handle timing issues if we try to resume an execution as it fails. If we get a // "not found" we might find that it's already been completed and we can't resume. if status.Code(err) == codes.NotFound { if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil { return metadata, ar, nil } } return nil, nil, c.wrapActionErr(fmt.Errorf("Failed to execute %s: %s", target, err), digest) } switch result := resp.Result.(type) { case *longrunning.Operation_Error: // We shouldn't really get here - the rex API requires servers to always // use the response field instead of error. return nil, nil, convertError(result.Error) case *longrunning.Operation_Response: response := &pb.ExecuteResponse{} if err := result.Response.UnmarshalTo(response); err != nil { log.Error("Failed to deserialise execution response: %s", err) return nil, nil, err } if response.CachedResult { c.state.LogBuildResult(tid, target, core.TargetCached, "Cached") } for k, v := range response.ServerLogs { log.Debug("Server log available: %s: hash key %s", k, v.Digest.Hash) } var respErr error if response.Status != nil { respErr = convertError(response.Status) if respErr != nil { if !strings.Contains(respErr.Error(), c.state.Config.Remote.DisplayURL) { if url := c.actionURL(digest, false); url != "" { respErr = fmt.Errorf("%s\nAction URL: %s", respErr, url) } } } } if resp.Result == nil { // This is optional on failure. return nil, nil, respErr } if response.Result == nil { // This seems to happen when things go wrong on the build server end. if response.Status != nil { return nil, nil, fmt.Errorf("Build server returned invalid result: %s", convertError(response.Status)) } log.Debug("Bad result from build server: %+v", response) return nil, nil, fmt.Errorf("Build server did not return valid result") } if response.Message != "" { // Informational messages can be emitted on successful actions. log.Debug("Message from build server:\n %s", response.Message) } failed := respErr != nil || response.Result.ExitCode != 0 metadata, err := c.buildMetadata(response.Result, needStdout || failed, failed) logResponseTimings(target, response.Result) // The original error is higher priority than us trying to retrieve the // output of the thing that failed. if respErr != nil { return metadata, response.Result, respErr } else if response.Result.ExitCode != 0 { err := fmt.Errorf("Remotely executed command exited with %d", response.Result.ExitCode) if response.Message != "" { err = fmt.Errorf("%s\n %s", err, response.Message) } if len(metadata.Stdout) != 0 { err = fmt.Errorf("%s\nStdout:\n%s", err, metadata.Stdout) } if len(metadata.Stderr) != 0 { err = fmt.Errorf("%s\nStderr:\n%s", err, metadata.Stderr) } // Add a link to the action URL, but only if the server didn't do it (they // might add one to the failed action if they're using the Buildbarn extension // for it, which we can't replicate here). if !strings.Contains(response.Message, c.state.Config.Remote.DisplayURL) { if url := c.actionURL(digest, true); url != "" { err = fmt.Errorf("%s\n%s", err, url) } } return metadata, response.Result, err } else if err != nil { return nil, nil, err } log.Debug("Completed remote build action for %s", target) if err := c.verifyActionResult(target, command, digest, response.Result, c.state.Config.Remote.VerifyOutputs && !isTest, isTest); err != nil { return metadata, response.Result, err } c.locallyCacheResults(target, digest, metadata, response.Result) return metadata, response.Result, nil default: if !resp.Done { log.Error("Received an incomplete response for %s: %#v", target, resp) return nil, nil, fmt.Errorf("Received an incomplete response for %s", target) } return nil, nil, fmt.Errorf("Unknown response type (was a %T): %#v", resp.Result, resp) // Shouldn't get here } } func logResponseTimings(target *core.BuildTarget, ar *pb.ActionResult) { if ar != nil && ar.ExecutionMetadata != nil { startTime := ar.ExecutionMetadata.ExecutionStartTimestamp.AsTime() endTime := ar.ExecutionMetadata.ExecutionCompletedTimestamp.AsTime() inputFetchStartTime := ar.ExecutionMetadata.InputFetchStartTimestamp.AsTime() inputFetchEndTime := ar.ExecutionMetadata.InputFetchCompletedTimestamp.AsTime() log.Debug("Completed remote build action for %s; input fetch %s, build time %s", target, inputFetchEndTime.Sub(inputFetchStartTime), endTime.Sub(startTime)) } } // PrintHashes prints the action hashes for a target. func (c *Client) PrintHashes(target *core.BuildTarget, isTest bool) { actionDigest := c.unstampedBuildActionDigests.Get(target.Label) fmt.Printf(" Action: %7d bytes: %s\n", actionDigest.SizeBytes, actionDigest.Hash) if c.state.Config.Remote.DisplayURL != "" { fmt.Printf(" URL: %s\n", c.actionURL(actionDigest, false)) } } // DataRate returns an estimate of the current in/out RPC data rates in bytes per second. func (c *Client) DataRate() (int, int, int, int) { return c.byteRateIn, c.byteRateOut, c.totalBytesIn, c.totalBytesOut } // fetchRemoteFile sends a request to fetch a file using the remote asset API. func (c *Client) fetchRemoteFile(tid int, target *core.BuildTarget, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) { c.state.LogBuildResult(tid, target, core.TargetBuilding, "Downloading...") urls := target.AllURLs(c.state) req := &fpb.FetchBlobRequest{ InstanceName: c.instance, Timeout: durationpb.New(target.BuildTimeout), Uris: urls, } if c.state.VerifyHashes && (!c.state.NeedHashesOnly || !c.state.IsOriginalTargetOrParent(target)) { if sri := subresourceIntegrity(target); sri != "" { req.Qualifiers = []*fpb.Qualifier{{ Name: "checksum.sri", Value: sri, }} } } ctx, cancel := context.WithTimeout(context.Background(), target.BuildTimeout) defer cancel() resp, err := c.fetchClient.FetchBlob(ctx, req) if err != nil { return nil, nil, fmt.Errorf("Failed to download file: %s", err) } c.state.LogBuildResult(tid, target, core.TargetBuilt, "Downloaded.") // If we get here, the blob exists in the CAS. Create an ActionResult corresponding to it. outs := target.Outputs() ar := &pb.ActionResult{ OutputFiles: []*pb.OutputFile{{ Path: outs[0], Digest: resp.BlobDigest, IsExecutable: target.IsBinary, }}, } if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{ InstanceName: c.instance, ActionDigest: actionDigest, ActionResult: ar, }); err != nil { return nil, nil, fmt.Errorf("Error updating action result: %s", err) } return &core.BuildMetadata{}, ar, nil } // buildFilegroup "builds" a single filegroup target. func (c *Client) buildFilegroup(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) { inputDir, err := c.uploadInputDir(nil, target, false) // We don't need to actually upload the inputs here, that is already done. if err != nil { return nil, nil, err } ar := &pb.ActionResult{} if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error { defer close(ch) inputDir.Build(ch) for _, out := range command.OutputPaths { if d, f := inputDir.Node(path.Join(target.Label.PackageName, out)); d != nil { entry, digest := c.protoEntry(inputDir.Tree(path.Join(target.Label.PackageName, out))) ch <- entry ar.OutputDirectories = append(ar.OutputDirectories, &pb.OutputDirectory{ Path: out, TreeDigest: digest, }) } else if f != nil { ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{ Path: out, Digest: f.Digest, IsExecutable: f.IsExecutable, }) } else { // Of course, we should not get here (classic developer things...) return fmt.Errorf("Missing output from filegroup: %s", out) } } return nil }); err != nil { return nil, nil, err } if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{ InstanceName: c.instance, ActionDigest: actionDigest, ActionResult: ar, }); err != nil { return nil, nil, fmt.Errorf("Error updating action result: %s", err) } return &core.BuildMetadata{}, ar, nil } // buildTextFile "builds" uploads a text file to the CAS func (c *Client) buildTextFile(state *core.BuildState, target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) { ar := &pb.ActionResult{} if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error { defer close(ch) if len(command.OutputPaths) != 1 { return fmt.Errorf("text_file %s should have a single output, has %d", target.Label, len(command.OutputPaths)) } content, err := target.GetFileContent(state) if err != nil { return err } entry := uploadinfo.EntryFromBlob([]byte(content)) ch <- entry ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{ Path: command.OutputPaths[0], Digest: entry.Digest.ToProto(), }) return nil }); err != nil { return nil, nil, err } if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{ InstanceName: c.instance, ActionDigest: actionDigest, ActionResult: ar, }); err != nil { return nil, nil, fmt.Errorf("Error updating action result: %s", err) } return &core.BuildMetadata{}, ar, nil } // A grpcLogMabob is an implementation of grpc's logging interface using our backend. type grpcLogMabob struct{} func (g *grpcLogMabob) Info(args ...interface{}) { log.Info("%s", args) } func (g *grpcLogMabob) Infof(format string, args ...interface{}) { log.Info(format, args...) } func (g *grpcLogMabob) Infoln(args ...interface{}) { log.Info("%s", args) } func (g *grpcLogMabob) Warning(args ...interface{}) { log.Warning("%s", args) } func (g *grpcLogMabob) Warningf(format string, args ...interface{}) { log.Warning(format, args...) } func (g *grpcLogMabob) Warningln(args ...interface{}) { log.Warning("%s", args) } func (g *grpcLogMabob) Error(args ...interface{}) { log.Error("", args...) } func (g *grpcLogMabob) Errorf(format string, args ...interface{}) { log.Errorf(format, args...) } func (g *grpcLogMabob) Errorln(args ...interface{}) { log.Error("", args...) } func (g *grpcLogMabob) Fatal(args ...interface{}) { log.Fatal(args...) } func (g *grpcLogMabob) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) } func (g *grpcLogMabob) Fatalln(args ...interface{}) { log.Fatal(args...) } func (g *grpcLogMabob) V(l int) bool { return log.IsEnabledFor(logging.Level(l)) }
1
10,236
is this deliberate? or testing?
thought-machine-please
go
@@ -24,7 +24,7 @@ namespace OpenTelemetry { public class CompositeProcessor<T> : BaseProcessor<T> { - private DoublyLinkedListNode head; + private readonly DoublyLinkedListNode head; private DoublyLinkedListNode tail; private bool disposed;
1
// <copyright file="CompositeProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Threading; using OpenTelemetry.Internal; namespace OpenTelemetry { public class CompositeProcessor<T> : BaseProcessor<T> { private DoublyLinkedListNode head; private DoublyLinkedListNode tail; private bool disposed; public CompositeProcessor(IEnumerable<BaseProcessor<T>> processors) { if (processors == null) { throw new ArgumentNullException(nameof(processors)); } using var iter = processors.GetEnumerator(); if (!iter.MoveNext()) { throw new ArgumentException($"{nameof(processors)} collection is empty"); } this.head = new DoublyLinkedListNode(iter.Current); this.tail = this.head; while (iter.MoveNext()) { this.AddProcessor(iter.Current); } } public CompositeProcessor<T> AddProcessor(BaseProcessor<T> processor) { if (processor == null) { throw new ArgumentNullException(nameof(processor)); } var node = new DoublyLinkedListNode(processor) { Previous = this.tail, }; this.tail.Next = node; this.tail = node; return this; } /// <inheritdoc/> public override void OnEnd(T data) { var cur = this.head; while (cur != null) { cur.Value.OnEnd(data); cur = cur.Next; } } /// <inheritdoc/> public override void OnStart(T data) { var cur = this.head; while (cur != null) { cur.Value.OnStart(data); cur = cur.Next; } } /// <inheritdoc/> protected override bool OnForceFlush(int timeoutMilliseconds) { var cur = this.head; var sw = Stopwatch.StartNew(); while (cur != null) { if (timeoutMilliseconds == Timeout.Infinite) { _ = cur.Value.ForceFlush(Timeout.Infinite); } else { var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; if (timeout <= 0) { return false; } var succeeded = cur.Value.ForceFlush((int)timeout); if (!succeeded) { return false; } } cur = cur.Next; } return true; } /// <inheritdoc/> protected override bool OnShutdown(int timeoutMilliseconds) { var cur = this.head; var result = true; var sw = Stopwatch.StartNew(); while (cur != null) { if (timeoutMilliseconds == Timeout.Infinite) { result = cur.Value.Shutdown(Timeout.Infinite) && result; } else { var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; // notify all the processors, even if we run overtime result = cur.Value.Shutdown((int)Math.Max(timeout, 0)) && result; } cur = cur.Next; } return result; } protected override void Dispose(bool disposing) { if (this.disposed) { return; } if (disposing) { var cur = this.head; while (cur != null) { try { cur.Value?.Dispose(); } catch (Exception ex) { OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.Dispose), ex); } cur = cur.Next; } } this.disposed = true; } private class DoublyLinkedListNode { public readonly BaseProcessor<T> Value; public DoublyLinkedListNode(BaseProcessor<T> value) { this.Value = value; } public DoublyLinkedListNode Previous { get; set; } public DoublyLinkedListNode Next { get; set; } } } }
1
20,461
I want to get more perspectives regarding this one. Making it `readonly` makes me feel that we're assuming the head should never change, and my worry is that other code might assume it (e.g. they might cache the value and assume it will never change). While this is true for now, I guess in the future we might want to support something like `PrependProcessor(processor)` or `InsertProcessor(index, processor)`.
open-telemetry-opentelemetry-dotnet
.cs
@@ -65,7 +65,7 @@ public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCas iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L)); } final IndexReader reader = iw.getReader(); - final IndexSearcher searcher = newSearcher(reader, false); + final IndexSearcher searcher = newSearcher(reader); iw.close(); for (int i = 0; i < 100; ++i) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search; import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import static org.hamcrest.CoreMatchers.instanceOf; public class TestIndexSortSortedNumericDocValuesRangeQuery extends LuceneTestCase { public void testSameHitsAsPointRangeQuery() throws IOException { final int iters = atLeast(10); for (int iter = 0; iter < iters; ++iter) { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); boolean reverse = random().nextBoolean(); SortField sortField = new SortedNumericSortField("dv", SortField.Type.LONG, reverse); sortField.setMissingValue(random().nextLong()); iwc.setIndexSort(new Sort(sortField)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); final int numDocs = atLeast(100); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); final int numValues = TestUtil.nextInt(random(), 0, 1); for (int j = 0; j < numValues; ++j) { final long value = TestUtil.nextLong(random(), -100, 10000); doc.add(new SortedNumericDocValuesField("dv", value)); doc.add(new LongPoint("idx", value)); } iw.addDocument(doc); } if (random().nextBoolean()) { iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L)); } final IndexReader reader = iw.getReader(); final IndexSearcher searcher = newSearcher(reader, false); iw.close(); for (int i = 0; i < 100; ++i) { final long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000); final long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000); final Query q1 = LongPoint.newRangeQuery("idx", min, max); final Query q2 = createQuery("dv", min, max); assertSameHits(searcher, q1, q2, false); } reader.close(); dir.close(); } } private void assertSameHits(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException { final int maxDoc = searcher.getIndexReader().maxDoc(); final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER); assertEquals(td1.totalHits.value, td2.totalHits.value); for (int i = 0; i < td1.scoreDocs.length; ++i) { assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc); if (scores) { assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7); } } } public void testEquals() { Query q1 = createQuery("foo", 3, 5); QueryUtils.checkEqual(q1, createQuery("foo", 3, 5)); QueryUtils.checkUnequal(q1, createQuery("foo", 3, 6)); QueryUtils.checkUnequal(q1, createQuery("foo", 4, 5)); QueryUtils.checkUnequal(q1, createQuery("bar", 3, 5)); } public void testToString() { Query q1 = createQuery("foo", 3, 5); assertEquals("foo:[3 TO 5]", q1.toString()); assertEquals("[3 TO 5]", q1.toString("foo")); assertEquals("foo:[3 TO 5]", q1.toString("bar")); Query q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true); assertEquals("foo:[[62 61 72] TO [62 61 7a]]", q2.toString()); q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, true); assertEquals("foo:{[62 61 72] TO [62 61 7a]]", q2.toString()); q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, false); assertEquals("foo:{[62 61 72] TO [62 61 7a]}", q2.toString()); q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("bar"), null, true, true); assertEquals("foo:[[62 61 72] TO *}", q2.toString()); q2 = SortedSetDocValuesField.newSlowRangeQuery("foo", null, new BytesRef("baz"), true, true); assertEquals("foo:{* TO [62 61 7a]]", q2.toString()); assertEquals("{* TO [62 61 7a]]", q2.toString("foo")); assertEquals("foo:{* TO [62 61 7a]]", q2.toString("bar")); } public void testIndexSortDocValuesWithEvenLength() throws Exception { testIndexSortDocValuesWithEvenLength(false); testIndexSortDocValuesWithEvenLength(true); } public void testIndexSortDocValuesWithEvenLength(boolean reverse) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG, reverse)); iwc.setIndexSort(indexSort); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); writer.addDocument(createDocument("field", -80)); writer.addDocument(createDocument("field", -5)); writer.addDocument(createDocument("field", 0)); writer.addDocument(createDocument("field", 0)); writer.addDocument(createDocument("field", 30)); writer.addDocument(createDocument("field", 35)); DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); // Test ranges consisting of one value. assertEquals(1, searcher.count(createQuery("field", -80, -80))); assertEquals(1, searcher.count(createQuery("field", -5, -5))); assertEquals(2, searcher.count(createQuery("field", 0, 0))); assertEquals(1, searcher.count(createQuery("field", 30, 30))); assertEquals(1, searcher.count(createQuery("field", 35, 35))); assertEquals(0, searcher.count(createQuery("field", -90, -90))); assertEquals(0, searcher.count(createQuery("field", 5, 5))); assertEquals(0, searcher.count(createQuery("field", 40, 40))); // Test the lower end of the document value range. assertEquals(2, searcher.count(createQuery("field", -90, -4))); assertEquals(2, searcher.count(createQuery("field", -80, -4))); assertEquals(1, searcher.count(createQuery("field", -70, -4))); assertEquals(2, searcher.count(createQuery("field", -80, -5))); // Test the upper end of the document value range. assertEquals(1, searcher.count(createQuery("field", 25, 34))); assertEquals(2, searcher.count(createQuery("field", 25, 35))); assertEquals(2, searcher.count(createQuery("field", 25, 36))); assertEquals(2, searcher.count(createQuery("field", 30, 35))); // Test multiple occurrences of the same value. assertEquals(2, searcher.count(createQuery("field", -4, 4))); assertEquals(2, searcher.count(createQuery("field", -4, 0))); assertEquals(2, searcher.count(createQuery("field", 0, 4))); assertEquals(3, searcher.count(createQuery("field", 0, 30))); // Test ranges that span all documents. assertEquals(6, searcher.count(createQuery("field", -80, 35))); assertEquals(6, searcher.count(createQuery("field", -90, 40))); writer.close(); reader.close(); dir.close(); } public void testIndexSortDocValuesWithOddLength() throws Exception { testIndexSortDocValuesWithOddLength(false); testIndexSortDocValuesWithOddLength(true); } public void testIndexSortDocValuesWithOddLength(boolean reverse) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG, reverse)); iwc.setIndexSort(indexSort); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); writer.addDocument(createDocument("field", -80)); writer.addDocument(createDocument("field", -5)); writer.addDocument(createDocument("field", 0)); writer.addDocument(createDocument("field", 0)); writer.addDocument(createDocument("field", 5)); writer.addDocument(createDocument("field", 30)); writer.addDocument(createDocument("field", 35)); DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); // Test ranges consisting of one value. assertEquals(1, searcher.count(createQuery("field", -80, -80))); assertEquals(1, searcher.count(createQuery("field", -5, -5))); assertEquals(2, searcher.count(createQuery("field", 0, 0))); assertEquals(1, searcher.count(createQuery("field", 5, 5))); assertEquals(1, searcher.count(createQuery("field", 30, 30))); assertEquals(1, searcher.count(createQuery("field", 35, 35))); assertEquals(0, searcher.count(createQuery("field", -90, -90))); assertEquals(0, searcher.count(createQuery("field", 6, 6))); assertEquals(0, searcher.count(createQuery("field", 40, 40))); // Test the lower end of the document value range. assertEquals(2, searcher.count(createQuery("field", -90, -4))); assertEquals(2, searcher.count(createQuery("field", -80, -4))); assertEquals(1, searcher.count(createQuery("field", -70, -4))); assertEquals(2, searcher.count(createQuery("field", -80, -5))); // Test the upper end of the document value range. assertEquals(1, searcher.count(createQuery("field", 25, 34))); assertEquals(2, searcher.count(createQuery("field", 25, 35))); assertEquals(2, searcher.count(createQuery("field", 25, 36))); assertEquals(2, searcher.count(createQuery("field", 30, 35))); // Test multiple occurrences of the same value. assertEquals(2, searcher.count(createQuery("field", -4, 4))); assertEquals(2, searcher.count(createQuery("field", -4, 0))); assertEquals(2, searcher.count(createQuery("field", 0, 4))); assertEquals(4, searcher.count(createQuery("field", 0, 30))); // Test ranges that span all documents. assertEquals(7, searcher.count(createQuery("field", -80, 35))); assertEquals(7, searcher.count(createQuery("field", -90, 40))); writer.close(); reader.close(); dir.close(); } public void testIndexSortDocValuesWithSingleValue() throws Exception { testIndexSortDocValuesWithSingleValue(false); testIndexSortDocValuesWithSingleValue(true); } private void testIndexSortDocValuesWithSingleValue(boolean reverse) throws IOException{ Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG, reverse)); iwc.setIndexSort(indexSort); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); writer.addDocument(createDocument("field", 42)); DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(createQuery("field", 42, 43))); assertEquals(1, searcher.count(createQuery("field", 42, 42))); assertEquals(0, searcher.count(createQuery("field", 41, 41))); assertEquals(0, searcher.count(createQuery("field", 43, 43))); writer.close(); reader.close(); dir.close(); } public void testIndexSortMissingValues() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); SortField sortField = new SortedNumericSortField("field", SortField.Type.LONG); sortField.setMissingValue(random().nextLong()); iwc.setIndexSort(new Sort(sortField)); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); writer.addDocument(createDocument("field", -80)); writer.addDocument(createDocument("field", -5)); writer.addDocument(createDocument("field", 0)); writer.addDocument(createDocument("field", 35)); writer.addDocument(createDocument("other-field", 0)); writer.addDocument(createDocument("other-field", 10)); writer.addDocument(createDocument("other-field", 20)); DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(2, searcher.count(createQuery("field", -70, 0))); assertEquals(2, searcher.count(createQuery("field", -2, 35))); assertEquals(4, searcher.count(createQuery("field", -80, 35))); assertEquals(4, searcher.count(createQuery("field", Long.MIN_VALUE, Long.MAX_VALUE))); writer.close(); reader.close(); dir.close(); } public void testNoDocuments() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(new Document()); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Query query = createQuery("foo", 2, 4); Weight w = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1); assertNull(w.scorer(searcher.getIndexReader().leaves().get(0))); writer.close(); reader.close(); dir.close(); } public void testRewriteExhaustiveRange() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(new Document()); IndexReader reader = writer.getReader(); Query query = createQuery("field", Long.MIN_VALUE, Long.MAX_VALUE); Query rewrittenQuery = query.rewrite(reader); assertEquals(new DocValuesFieldExistsQuery("field"), rewrittenQuery); writer.close(); reader.close(); dir.close(); } public void testRewriteFallbackQuery() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(new Document()); IndexReader reader = writer.getReader(); // Create an (unrealistic) fallback query that is sure to be rewritten. Query fallbackQuery = new BooleanQuery.Builder().build(); Query query = new IndexSortSortedNumericDocValuesRangeQuery("field", 1, 42, fallbackQuery); Query rewrittenQuery = query.rewrite(reader); assertNotEquals(query, rewrittenQuery); assertThat(rewrittenQuery, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)); IndexSortSortedNumericDocValuesRangeQuery rangeQuery = (IndexSortSortedNumericDocValuesRangeQuery) rewrittenQuery; assertEquals(new MatchNoDocsQuery(), rangeQuery.getFallbackQuery()); writer.close(); reader.close(); dir.close(); } /** * Test that the index sort optimization not activated if there is no index sort. */ public void testNoIndexSort() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(createDocument("field", 0)); testIndexSortOptimizationDeactivated(writer); writer.close(); dir.close(); } /** * Test that the index sort optimization is not activated when the sort is * on the wrong field. */ public void testIndexSortOnWrongField() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); Sort indexSort = new Sort(new SortedNumericSortField("other-field", SortField.Type.LONG)); iwc.setIndexSort(indexSort); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); writer.addDocument(createDocument("field", 0)); testIndexSortOptimizationDeactivated(writer); writer.close(); dir.close(); } /** * Test that the index sort optimization is not activated when some documents * have multiple values. */ public void testMultiDocValues() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG)); iwc.setIndexSort(indexSort); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); Document doc = new Document(); doc.add(new SortedNumericDocValuesField("field", 0)); doc.add(new SortedNumericDocValuesField("field", 10)); writer.addDocument(doc); testIndexSortOptimizationDeactivated(writer); writer.close(); dir.close(); } public void testIndexSortOptimizationDeactivated(RandomIndexWriter writer) throws IOException { DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Query query = createQuery("field", 0, 0); Weight weight = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0F); // Check that the two-phase iterator is not null, indicating that we've fallen // back to SortedNumericDocValuesField.newSlowRangeQuery. for (LeafReaderContext context : searcher.getIndexReader().leaves()) { Scorer scorer = weight.scorer(context); assertNotNull(scorer.twoPhaseIterator()); } reader.close(); } private Document createDocument(String field, long value) { Document doc = new Document(); doc.add(new SortedNumericDocValuesField(field, value)); return doc; } private Query createQuery(String field, long lowerValue, long upperValue) { Query fallbackQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, lowerValue, upperValue); return new IndexSortSortedNumericDocValuesRangeQuery(field, lowerValue, upperValue, fallbackQuery); } }
1
36,831
This isn't critical for test coverage, but it seemed off that we had disabled wrapping the reader.
apache-lucene-solr
java
@@ -21,9 +21,9 @@ _descList = [] def _setupDescriptors(namespace): global _descList, descList - from rdkit.Chem import GraphDescriptors, MolSurf, Lipinski, Fragments, Crippen + from rdkit.Chem import GraphDescriptors, MolSurf, Lipinski, Fragments, Crippen, Descriptors3D from rdkit.Chem.EState import EState_VSA - mods = [GraphDescriptors, MolSurf, EState_VSA, Lipinski, Crippen, Fragments] + mods = [GraphDescriptors, MolSurf, EState_VSA, Lipinski, Crippen,Descriptors3D, Fragments] otherMods = [Chem]
1
# # Copyright (C) 2001-2017 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # from rdkit import Chem from rdkit.Chem import rdPartialCharges, rdMolDescriptors import collections def _isCallable(thing): return (hasattr(collections,'Callable') and isinstance(thing,collections.Callable)) or \ hasattr(thing,'__call__') _descList = [] def _setupDescriptors(namespace): global _descList, descList from rdkit.Chem import GraphDescriptors, MolSurf, Lipinski, Fragments, Crippen from rdkit.Chem.EState import EState_VSA mods = [GraphDescriptors, MolSurf, EState_VSA, Lipinski, Crippen, Fragments] otherMods = [Chem] for nm, thing in namespace.items(): if nm[0] != '_' and _isCallable(thing): _descList.append((nm, thing)) others = [] for mod in otherMods: tmp = dir(mod) for name in tmp: if name[0] != '_': thing = getattr(mod, name) if _isCallable(thing): others.append(name) for mod in mods: tmp = dir(mod) for name in tmp: if name[0] != '_' and name[-1] != '_' and name not in others: # filter out python reference implementations: if name[:2] == 'py' and name[2:] in tmp: continue thing = getattr(mod, name) if _isCallable(thing): namespace[name] = thing _descList.append((name, thing)) descList = _descList from rdkit.Chem import rdMolDescriptors as _rdMolDescriptors MolWt = lambda *x, **y: _rdMolDescriptors._CalcMolWt(*x, **y) MolWt.version = _rdMolDescriptors._CalcMolWt_version MolWt.__doc__ = """The average molecular weight of the molecule >>> MolWt(Chem.MolFromSmiles('CC')) 30.07 >>> MolWt(Chem.MolFromSmiles('[NH4+].[Cl-]')) 53.49... """ HeavyAtomMolWt = lambda x: MolWt(x, True) HeavyAtomMolWt.__doc__ = """The average molecular weight of the molecule ignoring hydrogens >>> HeavyAtomMolWt(Chem.MolFromSmiles('CC')) 24.02... >>> HeavyAtomMolWt(Chem.MolFromSmiles('[NH4+].[Cl-]')) 49.46 """ HeavyAtomMolWt.version = "1.0.0" ExactMolWt = lambda *x, **y: _rdMolDescriptors.CalcExactMolWt(*x, **y) ExactMolWt.version = _rdMolDescriptors._CalcExactMolWt_version ExactMolWt.__doc__ = """The exact molecular weight of the molecule >>> ExactMolWt(Chem.MolFromSmiles('CC')) 30.04... >>> ExactMolWt(Chem.MolFromSmiles('[13CH3]C')) 31.05... """ def NumValenceElectrons(mol): """ The number of valence electrons the molecule has >>> NumValenceElectrons(Chem.MolFromSmiles('CC')) 14 >>> NumValenceElectrons(Chem.MolFromSmiles('C(=O)O')) 18 >>> NumValenceElectrons(Chem.MolFromSmiles('C(=O)[O-]')) 18 >>> NumValenceElectrons(Chem.MolFromSmiles('C(=O)')) 12 """ tbl = Chem.GetPeriodicTable() return sum( tbl.GetNOuterElecs(atom.GetAtomicNum()) - atom.GetFormalCharge() + atom.GetTotalNumHs() for atom in mol.GetAtoms()) NumValenceElectrons.version = "1.1.0" def NumRadicalElectrons(mol): """ The number of radical electrons the molecule has (says nothing about spin state) >>> NumRadicalElectrons(Chem.MolFromSmiles('CC')) 0 >>> NumRadicalElectrons(Chem.MolFromSmiles('C[CH3]')) 0 >>> NumRadicalElectrons(Chem.MolFromSmiles('C[CH2]')) 1 >>> NumRadicalElectrons(Chem.MolFromSmiles('C[CH]')) 2 >>> NumRadicalElectrons(Chem.MolFromSmiles('C[C]')) 3 """ return sum(atom.GetNumRadicalElectrons() for atom in mol.GetAtoms()) NumRadicalElectrons.version = "1.1.0" def _ChargeDescriptors(mol, force=False): if not force and hasattr(mol, '_chargeDescriptors'): return mol._chargeDescriptors chgs = rdPartialCharges.ComputeGasteigerCharges(mol) minChg = 500. maxChg = -500. for at in mol.GetAtoms(): chg = float(at.GetProp('_GasteigerCharge')) minChg = min(chg, minChg) maxChg = max(chg, maxChg) res = (minChg, maxChg) mol._chargeDescriptors = res return res def MaxPartialCharge(mol, force=False): _, res = _ChargeDescriptors(mol, force) return res MaxPartialCharge.version = "1.0.0" def MinPartialCharge(mol, force=False): res, _ = _ChargeDescriptors(mol, force) return res MinPartialCharge.version = "1.0.0" def MaxAbsPartialCharge(mol, force=False): v1, v2 = _ChargeDescriptors(mol, force) return max(abs(v1), abs(v2)) MaxAbsPartialCharge.version = "1.0.0" def MinAbsPartialCharge(mol, force=False): v1, v2 = _ChargeDescriptors(mol, force) return min(abs(v1), abs(v2)) MinAbsPartialCharge.version = "1.0.0" from rdkit.Chem.EState.EState import MaxEStateIndex, MinEStateIndex, MaxAbsEStateIndex, MinAbsEStateIndex _setupDescriptors(locals()) class PropertyFunctor(rdMolDescriptors.PythonPropertyFunctor): """Creates a python based property function that can be added to the global property list. To use, subclass this class and override the __call__ method. Then create an instance and add it to the registry. The __call__ method should return a numeric value. Example: class NumAtoms(Descriptors.PropertyFunctor): def __init__(self): Descriptors.PropertyFunctor.__init__(self, "NumAtoms", "1.0.0") def __call__(self, mol): return mol.GetNumAtoms() numAtoms = NumAtoms() rdMolDescriptors.Properties.RegisterProperty(numAtoms) """ def __init__(self, name, version): rdMolDescriptors.PythonPropertyFunctor.__init__(self, self, name, version) def __call__(self, mol): raise NotImplementedError("Please implement the __call__ method") #------------------------------------ # # doctest boilerplate # def _test(): import doctest, sys return doctest.testmod(sys.modules["__main__"], optionflags=doctest.ELLIPSIS) if __name__ == '__main__': import sys failed, tried = _test() sys.exit(failed)
1
16,632
Please remove Descriptors3D from this file. We just did this in master.
rdkit-rdkit
cpp
@@ -15,12 +15,17 @@ namespace storage { StorageClient::StorageClient(std::shared_ptr<folly::IOThreadPoolExecutor> threadPool) : ioThreadPool_(threadPool) { - client_ = std::make_unique<meta::MetaClient>(); - client_->init(); clientsMan_ = std::make_unique<thrift::ThriftClientManager<storage::cpp2::StorageServiceAsyncClient>>(); } + +void StorageClient::init(meta::MetaClient *client) { + CHECK(client); + client_ = client; +} + + folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> StorageClient::addVertices( GraphSpaceID space, std::vector<cpp2::Vertex> vertices,
1
/* Copyright (c) 2018 - present, VE Software Inc. All rights reserved * * This source code is licensed under Apache 2.0 License * (found in the LICENSE.Apache file in the root directory) */ #include "base/Base.h" #include "storage/client/StorageClient.h" #define ID_HASH(id, numShards) \ ((static_cast<uint64_t>(id)) % numShards + 1) namespace nebula { namespace storage { StorageClient::StorageClient(std::shared_ptr<folly::IOThreadPoolExecutor> threadPool) : ioThreadPool_(threadPool) { client_ = std::make_unique<meta::MetaClient>(); client_->init(); clientsMan_ = std::make_unique<thrift::ThriftClientManager<storage::cpp2::StorageServiceAsyncClient>>(); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> StorageClient::addVertices( GraphSpaceID space, std::vector<cpp2::Vertex> vertices, bool overwritable, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, vertices, [] (const cpp2::Vertex& v) { return v.get_id(); }); std::unordered_map<HostAddr, cpp2::AddVerticesRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_overwritable(overwritable); req.set_parts(std::move(c.second)); } VLOG(3) << "requests size " << requests.size(); return collectResponse( evb, std::move(requests), [](cpp2::StorageServiceAsyncClient* client, const cpp2::AddVerticesRequest& r) { return client->future_addVertices(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> StorageClient::addEdges( GraphSpaceID space, std::vector<storage::cpp2::Edge> edges, bool overwritable, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, edges, [] (const cpp2::Edge& e) { return e.get_key().get_src(); }); std::unordered_map<HostAddr, cpp2::AddEdgesRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_overwritable(overwritable); req.set_parts(std::move(c.second)); } return collectResponse( evb, std::move(requests), [](cpp2::StorageServiceAsyncClient* client, const cpp2::AddEdgesRequest& r) { return client->future_addEdges(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::QueryResponse>> StorageClient::getNeighbors( GraphSpaceID space, std::vector<VertexID> vertices, EdgeType edgeType, bool isOutBound, std::string filter, std::vector<cpp2::PropDef> returnCols, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, vertices, [] (const VertexID& v) { return v; }); std::unordered_map<HostAddr, cpp2::GetNeighborsRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); // Make edge type a negative number when query in-bound req.set_edge_type(isOutBound ? edgeType : -edgeType); req.set_filter(filter); req.set_return_columns(returnCols); } return collectResponse( evb, std::move(requests), [isOutBound](cpp2::StorageServiceAsyncClient* client, const cpp2::GetNeighborsRequest& r) { if (isOutBound) { return client->future_getOutBound(r); } else { return client->future_getInBound(r); } }); } folly::SemiFuture<StorageRpcResponse<cpp2::QueryStatsResponse>> StorageClient::neighborStats( GraphSpaceID space, std::vector<VertexID> vertices, EdgeType edgeType, bool isOutBound, std::string filter, std::vector<cpp2::PropDef> returnCols, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, vertices, [] (const VertexID& v) { return v; }); std::unordered_map<HostAddr, cpp2::GetNeighborsRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); // Make edge type a negative number when query in-bound req.set_edge_type(isOutBound ? edgeType : -edgeType); req.set_filter(filter); req.set_return_columns(returnCols); } return collectResponse( evb, std::move(requests), [isOutBound](cpp2::StorageServiceAsyncClient* client, const cpp2::GetNeighborsRequest& r) { if (isOutBound) { return client->future_outBoundStats(r); } else { return client->future_inBoundStats(r); } }); } folly::SemiFuture<StorageRpcResponse<cpp2::QueryResponse>> StorageClient::getVertexProps( GraphSpaceID space, std::vector<VertexID> vertices, std::vector<cpp2::PropDef> returnCols, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, vertices, [] (const VertexID& v) { return v; }); std::unordered_map<HostAddr, cpp2::VertexPropRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_return_columns(returnCols); } return collectResponse( evb, std::move(requests), [](cpp2::StorageServiceAsyncClient* client, const cpp2::VertexPropRequest& r) { return client->future_getProps(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::EdgePropResponse>> StorageClient::getEdgeProps( GraphSpaceID space, std::vector<cpp2::EdgeKey> edges, std::vector<cpp2::PropDef> returnCols, folly::EventBase* evb) { auto clusters = clusterIdsToHosts( space, edges, [] (const cpp2::EdgeKey& v) { return v.get_src(); }); std::unordered_map<HostAddr, cpp2::EdgePropRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); for (auto& p : c.second) { req.set_edge_type((p.second[0].edge_type)); break; } req.set_parts(std::move(c.second)); req.set_return_columns(returnCols); } return collectResponse( evb, std::move(requests), [](cpp2::StorageServiceAsyncClient* client, const cpp2::EdgePropRequest& r) { return client->future_getEdgeProps(r); }); } PartitionID StorageClient::partId(GraphSpaceID spaceId, int64_t id) const { auto parts = client_->partsNum(spaceId); auto s = ID_HASH(id, parts); CHECK_GE(s, 0U); return s; } } // namespace storage } // namespace nebula
1
16,779
You could pass the MetaClient instance in ctor, and create a new instance if nullptr.
vesoft-inc-nebula
cpp
@@ -264,7 +264,11 @@ func subincludeTarget(s *scope, l core.BuildLabel) *core.BuildTarget { s.NAssert(s.contextPkg.Target(l.Name) == nil, "Target :%s is not defined in this package; it has to be defined before the subinclude() call", l.Name) } s.NAssert(l.IsAllTargets() || l.IsAllSubpackages(), "Can't pass :all or /... to subinclude()") + // Temporarily release the parallelism limiter; this is important to keep us from deadlocking + // all available parser threads (easy to happen if they're all waiting on a single target which now can't start) + <-s.interpreter.limiter t := s.state.WaitForBuiltTarget(l, pkgLabel) + s.interpreter.limiter <- struct{}{} // This is not quite right, if you subinclude from another subinclude we can basically // lose track of it later on. It's hard to know what better to do at this point though. s.contextPkg.RegisterSubinclude(l)
1
package asp import ( "encoding/json" "fmt" "io" "path" "reflect" "sort" "strconv" "strings" "github.com/manifoldco/promptui" "github.com/thought-machine/please/src/cli" "github.com/thought-machine/please/src/core" "github.com/thought-machine/please/src/fs" ) // A few sneaky globals for when we don't have a scope handy var stringMethods, dictMethods, configMethods map[string]*pyFunc // A nativeFunc is a function that implements a builtin function natively. type nativeFunc func(*scope, []pyObject) pyObject // registerBuiltins sets up the "special" builtins that map to native code. func registerBuiltins(s *scope) { setNativeCode(s, "build_rule", buildRule) setNativeCode(s, "subrepo", subrepo) setNativeCode(s, "fail", builtinFail) setNativeCode(s, "subinclude", subinclude) setNativeCode(s, "load", bazelLoad).varargs = true setNativeCode(s, "package", pkg).kwargs = true setNativeCode(s, "sorted", sorted) setNativeCode(s, "isinstance", isinstance) setNativeCode(s, "range", pyRange) setNativeCode(s, "enumerate", enumerate) setNativeCode(s, "zip", zip).varargs = true setNativeCode(s, "len", lenFunc) setNativeCode(s, "glob", glob) setNativeCode(s, "bool", boolType) setNativeCode(s, "int", intType) setNativeCode(s, "str", strType) setNativeCode(s, "join_path", joinPath).varargs = true setNativeCode(s, "get_base_path", packageName) setNativeCode(s, "package_name", packageName) setNativeCode(s, "canonicalise", canonicalise) setNativeCode(s, "get_labels", getLabels) setNativeCode(s, "add_dep", addDep) setNativeCode(s, "add_out", addOut) setNativeCode(s, "add_licence", addLicence) setNativeCode(s, "get_licences", getLicences) setNativeCode(s, "get_command", getCommand) setNativeCode(s, "set_command", setCommand) setNativeCode(s, "json", valueAsJSON) setNativeCode(s, "breakpoint", breakpoint) stringMethods = map[string]*pyFunc{ "join": setNativeCode(s, "join", strJoin), "split": setNativeCode(s, "split", strSplit), "replace": setNativeCode(s, "replace", strReplace), "partition": setNativeCode(s, "partition", strPartition), "rpartition": setNativeCode(s, "rpartition", strRPartition), "startswith": setNativeCode(s, "startswith", strStartsWith), "endswith": setNativeCode(s, "endswith", strEndsWith), "lstrip": setNativeCode(s, "lstrip", strLStrip), "rstrip": setNativeCode(s, "rstrip", strRStrip), "strip": setNativeCode(s, "strip", strStrip), "find": setNativeCode(s, "find", strFind), "rfind": setNativeCode(s, "find", strRFind), "format": setNativeCode(s, "format", strFormat), "count": setNativeCode(s, "count", strCount), "upper": setNativeCode(s, "upper", strUpper), "lower": setNativeCode(s, "lower", strLower), } stringMethods["format"].kwargs = true dictMethods = map[string]*pyFunc{ "get": setNativeCode(s, "get", dictGet), "setdefault": s.Lookup("setdefault").(*pyFunc), "keys": setNativeCode(s, "keys", dictKeys), "items": setNativeCode(s, "items", dictItems), "values": setNativeCode(s, "values", dictValues), "copy": setNativeCode(s, "copy", dictCopy), } configMethods = map[string]*pyFunc{ "get": setNativeCode(s, "config_get", configGet), "setdefault": s.Lookup("setdefault").(*pyFunc), } if s.state.Config.Parse.GitFunctions { setNativeCode(s, "git_branch", execGitBranch) setNativeCode(s, "git_commit", execGitCommit) setNativeCode(s, "git_show", execGitShow) setNativeCode(s, "git_state", execGitState) } setLogCode(s, "debug", log.Debug) setLogCode(s, "info", log.Info) setLogCode(s, "notice", log.Notice) setLogCode(s, "warning", log.Warning) setLogCode(s, "error", log.Errorf) setLogCode(s, "fatal", log.Fatalf) } // registerSubincludePackage sets up the package for remote subincludes. func registerSubincludePackage(s *scope) { // Another small hack - replace the code for these two with native code, must be done after the // declarations which are in misc_rules. buildRule := s.Lookup("build_rule").(*pyFunc) f := setNativeCode(s, "filegroup", filegroup) f.args = buildRule.args f.argIndices = buildRule.argIndices f.defaults = buildRule.defaults f.constants = buildRule.constants f.types = buildRule.types f = setNativeCode(s, "hash_filegroup", hashFilegroup) f.args = buildRule.args f.argIndices = buildRule.argIndices f.defaults = buildRule.defaults f.constants = buildRule.constants f.types = buildRule.types } func setNativeCode(s *scope, name string, code nativeFunc) *pyFunc { f := s.Lookup(name).(*pyFunc) f.nativeCode = code f.code = nil // Might as well save a little memory here return f } // setLogCode specialises setNativeCode for handling the log functions (of which there are a few) func setLogCode(s *scope, name string, f func(format string, args ...interface{})) { setNativeCode(s, name, func(s *scope, args []pyObject) pyObject { if str, ok := args[0].(pyString); ok { l := make([]interface{}, len(args)) for i, arg := range args { l[i] = arg } f("//%s: %s", s.pkgFilename(), fmt.Sprintf(string(str), l[1:]...)) return None } f("//%s: %s", s.pkgFilename(), args) return None }).varargs = true } // buildRule implements the build_rule() builtin function. // This is the main interface point; every build rule ultimately calls this to add // new objects to the build graph. func buildRule(s *scope, args []pyObject) pyObject { s.NAssert(s.pkg == nil, "Cannot create new build rules in this context") // We need to set various defaults from config here; it is useful to put it on the rule but not often so // because most rules pass them through anyway. // TODO(peterebden): when we get rid of the old parser, put these defaults on all the build rules and // get rid of this. args[11] = defaultFromConfig(s.config, args[11], "DEFAULT_VISIBILITY") args[15] = defaultFromConfig(s.config, args[15], "DEFAULT_TESTONLY") args[30] = defaultFromConfig(s.config, args[30], "DEFAULT_LICENCES") args[20] = defaultFromConfig(s.config, args[20], "BUILD_SANDBOX") args[21] = defaultFromConfig(s.config, args[21], "TEST_SANDBOX") target := createTarget(s, args) s.Assert(s.pkg.Target(target.Label.Name) == nil, "Duplicate build target in %s: %s", s.pkg.Name, target.Label.Name) populateTarget(s, target, args) s.state.AddTarget(s.pkg, target) if s.Callback { target.AddedPostBuild = true s.pkg.MarkTargetModified(target) } return pyString(":" + target.Label.Name) } // filegroup implements the filegroup() builtin. func filegroup(s *scope, args []pyObject) pyObject { args[1] = filegroupCommand return buildRule(s, args) } // hashFilegroup implements the hash_filegroup() builtin. func hashFilegroup(s *scope, args []pyObject) pyObject { args[1] = hashFilegroupCommand return buildRule(s, args) } // defaultFromConfig sets a default value from the config if the property isn't set. func defaultFromConfig(config *pyConfig, arg pyObject, name string) pyObject { if arg == nil || arg == None { return config.Get(name, arg) } return arg } // pkg implements the package() builtin function. func pkg(s *scope, args []pyObject) pyObject { s.Assert(s.pkg.NumTargets() == 0, "package() must be called before any build targets are defined") for k, v := range s.locals { k = strings.ToUpper(k) s.Assert(s.config.Get(k, nil) != nil, "error calling package(): %s is not a known config value", k) s.config.IndexAssign(pyString(k), v) } return None } // tagName applies the given tag to a target name. func tagName(name, tag string) string { if name[0] != '_' { name = "_" + name } if strings.ContainsRune(name, '#') { name = name + "_" } else { name = name + "#" } return name + tag } // bazelLoad implements the load() builtin, which is only available for Bazel compatibility. func bazelLoad(s *scope, args []pyObject) pyObject { s.Assert(s.state.Config.Bazel.Compatibility, "load() is only available in Bazel compatibility mode. See `plz help bazel` for more information.") // The argument always looks like a build label, but it is not really one (i.e. there is no BUILD file that defines it). // We do not support their legacy syntax here (i.e. "/tools/build_rules/build_test" etc). l := core.ParseBuildLabelContext(string(args[0].(pyString)), s.contextPkg) filename := path.Join(l.PackageName, l.Name) if l.Subrepo != "" { subrepo := s.state.Graph.Subrepo(l.Subrepo) if subrepo == nil || (subrepo.Target != nil && subrepo != s.contextPkg.Subrepo) { subincludeTarget(s, l) subrepo = s.state.Graph.SubrepoOrDie(l.Subrepo) } filename = subrepo.Dir(filename) } s.SetAll(s.interpreter.Subinclude(filename, s.contextPkg), false) return None } // builtinFail raises an immediate error that can't be intercepted. func builtinFail(s *scope, args []pyObject) pyObject { s.Error(string(args[0].(pyString))) return None } func subinclude(s *scope, args []pyObject) pyObject { s.NAssert(s.contextPkg == nil, "Cannot subinclude() from this context") target := string(args[0].(pyString)) t := subincludeTarget(s, core.ParseBuildLabelContext(target, s.contextPkg)) pkg := s.contextPkg if t.Subrepo != s.contextPkg.Subrepo && t.Subrepo != nil { pkg = &core.Package{ Name: "@" + t.Subrepo.Name, SubrepoName: t.Subrepo.Name, Subrepo: t.Subrepo, } } l := pkg.Label() s.Assert(l.CanSee(s.state, t), "Target %s isn't visible to be subincluded into %s", t.Label, l) for _, out := range t.Outputs() { s.SetAll(s.interpreter.Subinclude(path.Join(t.OutDir(), out), pkg), false) } return None } // subincludeTarget returns the target for a subinclude() call to a label. // It blocks until the target exists and is built. func subincludeTarget(s *scope, l core.BuildLabel) *core.BuildTarget { pkgLabel := s.contextPkg.Label() if l.Subrepo == pkgLabel.Subrepo && l.PackageName == pkgLabel.PackageName { // This is a subinclude in the same package, check the target exists. s.NAssert(s.contextPkg.Target(l.Name) == nil, "Target :%s is not defined in this package; it has to be defined before the subinclude() call", l.Name) } s.NAssert(l.IsAllTargets() || l.IsAllSubpackages(), "Can't pass :all or /... to subinclude()") t := s.state.WaitForBuiltTarget(l, pkgLabel) // This is not quite right, if you subinclude from another subinclude we can basically // lose track of it later on. It's hard to know what better to do at this point though. s.contextPkg.RegisterSubinclude(l) return t } func lenFunc(s *scope, args []pyObject) pyObject { return objLen(args[0]) } func objLen(obj pyObject) pyInt { switch t := obj.(type) { case pyList: return pyInt(len(t)) case pyDict: return pyInt(len(t)) case pyString: return pyInt(len(t)) } panic("object of type " + obj.Type() + " has no len()") } func isinstance(s *scope, args []pyObject) pyObject { obj := args[0] types := args[1] if f, ok := types.(*pyFunc); ok && isType(obj, f.name) { // Special case for 'str' and so forth that are functions but also types. return True } else if l, ok := types.(pyList); ok { for _, li := range l { if lif, ok := li.(*pyFunc); ok && isType(obj, lif.name) { return True } else if reflect.TypeOf(obj) == reflect.TypeOf(li) { return True } } } return newPyBool(reflect.TypeOf(obj) == reflect.TypeOf(types)) } func isType(obj pyObject, name string) bool { switch obj.(type) { case pyBool: return name == "bool" || name == "int" // N.B. For compatibility with old assert statements case pyInt: return name == "int" case pyString: return name == "str" case pyList: return name == "list" case pyDict: return name == "dict" case *pyConfig: return name == "config" } return false } func strJoin(s *scope, args []pyObject) pyObject { self := string(args[0].(pyString)) seq := asStringList(s, args[1], "seq") return pyString(strings.Join(seq, self)) } func strSplit(s *scope, args []pyObject) pyObject { self := args[0].(pyString) on := args[1].(pyString) return fromStringList(strings.Split(string(self), string(on))) } func strReplace(s *scope, args []pyObject) pyObject { self := args[0].(pyString) old := args[1].(pyString) new := args[2].(pyString) return pyString(strings.Replace(string(self), string(old), string(new), -1)) } func strPartition(s *scope, args []pyObject) pyObject { self := args[0].(pyString) sep := args[1].(pyString) if idx := strings.Index(string(self), string(sep)); idx != -1 { return pyList{self[:idx], self[idx : idx+len(sep)], self[idx+len(sep):]} } return pyList{self, pyString(""), pyString("")} } func strRPartition(s *scope, args []pyObject) pyObject { self := args[0].(pyString) sep := args[1].(pyString) if idx := strings.LastIndex(string(self), string(sep)); idx != -1 { return pyList{self[:idx], self[idx : idx+1], self[idx+1:]} } return pyList{pyString(""), pyString(""), self} } func strStartsWith(s *scope, args []pyObject) pyObject { self := args[0].(pyString) x := args[1].(pyString) return newPyBool(strings.HasPrefix(string(self), string(x))) } func strEndsWith(s *scope, args []pyObject) pyObject { self := args[0].(pyString) x := args[1].(pyString) return newPyBool(strings.HasSuffix(string(self), string(x))) } func strLStrip(s *scope, args []pyObject) pyObject { self := args[0].(pyString) cutset := args[1].(pyString) return pyString(strings.TrimLeft(string(self), string(cutset))) } func strRStrip(s *scope, args []pyObject) pyObject { self := args[0].(pyString) cutset := args[1].(pyString) return pyString(strings.TrimRight(string(self), string(cutset))) } func strStrip(s *scope, args []pyObject) pyObject { self := args[0].(pyString) cutset := args[1].(pyString) return pyString(strings.Trim(string(self), string(cutset))) } func strFind(s *scope, args []pyObject) pyObject { self := args[0].(pyString) needle := args[1].(pyString) return pyInt(strings.Index(string(self), string(needle))) } func strRFind(s *scope, args []pyObject) pyObject { self := args[0].(pyString) needle := args[1].(pyString) return pyInt(strings.LastIndex(string(self), string(needle))) } func strFormat(s *scope, args []pyObject) pyObject { self := string(args[0].(pyString)) for k, v := range s.locals { self = strings.Replace(self, "{"+k+"}", v.String(), -1) } return pyString(strings.Replace(strings.Replace(self, "{{", "{", -1), "}}", "}", -1)) } func strCount(s *scope, args []pyObject) pyObject { self := string(args[0].(pyString)) needle := string(args[1].(pyString)) return pyInt(strings.Count(self, needle)) } func strUpper(s *scope, args []pyObject) pyObject { self := string(args[0].(pyString)) return pyString(strings.ToUpper(self)) } func strLower(s *scope, args []pyObject) pyObject { self := string(args[0].(pyString)) return pyString(strings.ToLower(self)) } func boolType(s *scope, args []pyObject) pyObject { return newPyBool(args[0].IsTruthy()) } func intType(s *scope, args []pyObject) pyObject { i, err := strconv.Atoi(string(args[0].(pyString))) s.Assert(err == nil, "%s", err) return pyInt(i) } func strType(s *scope, args []pyObject) pyObject { return pyString(args[0].String()) } func glob(s *scope, args []pyObject) pyObject { include := asStringList(s, args[0], "include") exclude := asStringList(s, args[1], "exclude") hidden := args[2].IsTruthy() exclude = append(exclude, s.state.Config.Parse.BuildFileName...) return fromStringList(fs.Glob(s.state.Config.Parse.BuildFileName, s.pkg.SourceRoot(), include, exclude, hidden)) } func asStringList(s *scope, arg pyObject, name string) []string { l, ok := arg.(pyList) s.Assert(ok, "argument %s must be a list", name) sl := make([]string, len(l)) for i, x := range l { sx, ok := x.(pyString) s.Assert(ok, "%s must be a list of strings", name) sl[i] = string(sx) } return sl } func fromStringList(l []string) pyList { ret := make(pyList, len(l)) for i, s := range l { ret[i] = pyString(s) } return ret } func configGet(s *scope, args []pyObject) pyObject { self := args[0].(*pyConfig) return self.Get(string(args[1].(pyString)), args[2]) } func dictGet(s *scope, args []pyObject) pyObject { self := args[0].(pyDict) sk, ok := args[1].(pyString) s.Assert(ok, "dict keys must be strings, not %s", args[1].Type()) if ret, present := self[string(sk)]; present { return ret } return args[2] } func dictKeys(s *scope, args []pyObject) pyObject { self := args[0].(pyDict) ret := make(pyList, len(self)) for i, k := range self.Keys() { ret[i] = pyString(k) } return ret } func dictValues(s *scope, args []pyObject) pyObject { self := args[0].(pyDict) ret := make(pyList, len(self)) for i, k := range self.Keys() { ret[i] = self[k] } return ret } func dictItems(s *scope, args []pyObject) pyObject { self := args[0].(pyDict) ret := make(pyList, len(self)) for i, k := range self.Keys() { ret[i] = pyList{pyString(k), self[k]} } return ret } func dictCopy(s *scope, args []pyObject) pyObject { self := args[0].(pyDict) ret := make(pyDict, len(self)) for k, v := range self { ret[k] = v } return ret } func sorted(s *scope, args []pyObject) pyObject { l, ok := args[0].(pyList) s.Assert(ok, "unsortable type %s", args[0].Type()) l = l[:] sort.Slice(l, func(i, j int) bool { return l[i].Operator(LessThan, l[j]).IsTruthy() }) return l } func joinPath(s *scope, args []pyObject) pyObject { l := make([]string, len(args)) for i, arg := range args { l[i] = string(arg.(pyString)) } return pyString(path.Join(l...)) } func packageName(s *scope, args []pyObject) pyObject { return pyString(s.pkg.Name) } func canonicalise(s *scope, args []pyObject) pyObject { s.Assert(s.pkg != nil, "Cannot call canonicalise() from this context") label := core.ParseBuildLabel(string(args[0].(pyString)), s.pkg.Name) return pyString(label.String()) } func pyRange(s *scope, args []pyObject) pyObject { start := args[0].(pyInt) stop, isInt := args[1].(pyInt) step := args[2].(pyInt) if !isInt { // Stop not passed so we start at 0 and start is the stop. stop = start start = 0 } ret := make(pyList, 0, stop-start) for i := start; i < stop; i += step { ret = append(ret, i) } return ret } func enumerate(s *scope, args []pyObject) pyObject { l, ok := args[0].(pyList) s.Assert(ok, "Argument to enumerate must be a list, not %s", args[0].Type()) ret := make(pyList, len(l)) for i, li := range l { ret[i] = pyList{pyInt(i), li} } return ret } func zip(s *scope, args []pyObject) pyObject { lastLen := 0 for i, seq := range args { si, ok := seq.(pyList) s.Assert(ok, "Arguments to zip must be lists, not %s", si.Type()) // This isn't a restriction in Python but I can't be bothered handling all the stuff that real zip does. s.Assert(i == 0 || lastLen == len(si), "All arguments to zip must have the same length") lastLen = len(si) } ret := make(pyList, lastLen) for i := range ret { r := make(pyList, len(args)) for j, li := range args { r[j] = li.(pyList)[i] } ret[i] = r } return ret } // getLabels returns the set of labels for a build target and its transitive dependencies. // The labels are filtered by the given prefix, which is stripped from the returned labels. // Two formats are supported here: either passing just the name of a target in the current // package, or a build label referring specifically to one. func getLabels(s *scope, args []pyObject) pyObject { name := string(args[0].(pyString)) prefix := string(args[1].(pyString)) all := args[2].IsTruthy() if core.LooksLikeABuildLabel(name) { label := core.ParseBuildLabel(name, s.pkg.Name) return getLabelsInternal(s.state.Graph.TargetOrDie(label), prefix, core.Built, all) } target := getTargetPost(s, name) return getLabelsInternal(target, prefix, core.Building, all) } func getLabelsInternal(target *core.BuildTarget, prefix string, minState core.BuildTargetState, all bool) pyObject { if target.State() < minState { log.Fatalf("get_labels called on a target that is not yet built: %s", target.Label) } labels := map[string]bool{} done := map[*core.BuildTarget]bool{} var getLabels func(*core.BuildTarget) getLabels = func(t *core.BuildTarget) { for _, label := range t.Labels { if strings.HasPrefix(label, prefix) { labels[strings.TrimSpace(strings.TrimPrefix(label, prefix))] = true } } done[t] = true if !t.OutputIsComplete || t == target || all { for _, dep := range t.Dependencies() { if !done[dep] { getLabels(dep) } } } } getLabels(target) ret := make([]string, len(labels)) i := 0 for label := range labels { ret[i] = label i++ } sort.Strings(ret) return fromStringList(ret) } // getTargetPost is called by various functions to get a target from the current package. // Panics if the target is not in the current package or has already been built. func getTargetPost(s *scope, name string) *core.BuildTarget { target := s.pkg.Target(name) s.Assert(target != nil, "Unknown build target %s in %s", name, s.pkg.Name) // It'd be cheating to try to modify targets that're already built. // Prohibit this because it'd likely end up with nasty race conditions. s.Assert(target.State() < core.Built, "Attempted to modify target %s, but it's already built", target.Label) return target } // addDep adds a dependency to a target. func addDep(s *scope, args []pyObject) pyObject { s.Assert(s.Callback, "can only be called from a pre- or post-build callback") target := getTargetPost(s, string(args[0].(pyString))) dep := core.ParseBuildLabelContext(string(args[1].(pyString)), s.pkg) exported := args[2].IsTruthy() target.AddMaybeExportedDependency(dep, exported, false, false) // Note that here we're in a post-build function so we must call this explicitly // (in other callbacks it's handled after the package parses all at once). s.state.Graph.AddDependency(target.Label, dep) s.pkg.MarkTargetModified(target) return None } // addOut adds an output to a target. func addOut(s *scope, args []pyObject) pyObject { target := getTargetPost(s, string(args[0].(pyString))) name := string(args[1].(pyString)) out := string(args[2].(pyString)) if out == "" { target.AddOutput(name) s.pkg.MustRegisterOutput(name, target) } else { target.AddNamedOutput(name, out) s.pkg.MustRegisterOutput(out, target) } return None } // addLicence adds a licence to a target. func addLicence(s *scope, args []pyObject) pyObject { target := getTargetPost(s, string(args[0].(pyString))) target.AddLicence(string(args[1].(pyString))) return None } // getLicences returns the licences for a single target. func getLicences(s *scope, args []pyObject) pyObject { return fromStringList(getTargetPost(s, string(args[0].(pyString))).Licences) } // getCommand gets the command of a target, optionally for a configuration. func getCommand(s *scope, args []pyObject) pyObject { target := getTargetPost(s, string(args[0].(pyString))) return pyString(target.GetCommandConfig(string(args[1].(pyString)))) } // valueAsJSON returns a JSON-formatted string representation of a plz value. func valueAsJSON(s *scope, args []pyObject) pyObject { js, err := json.Marshal(args[0]) if err != nil { s.Error("Could not marshal object as JSON") return None } return pyString(js) } // setCommand sets the command of a target, optionally for a configuration. func setCommand(s *scope, args []pyObject) pyObject { target := getTargetPost(s, string(args[0].(pyString))) config := string(args[1].(pyString)) command := string(args[2].(pyString)) if command == "" { target.Command = config } else { target.AddCommand(config, command) } return None } // selectFunc implements the select() builtin. func selectFunc(s *scope, args []pyObject) pyObject { d, _ := asDict(args[0]) var def pyObject pkgName := "" if s.pkg != nil { pkgName = s.pkg.Name } // This is not really the same as Bazel's order-of-matching rules, but is at least deterministic. keys := d.Keys() for i := len(keys) - 1; i >= 0; i-- { k := keys[i] if k == "//conditions:default" || k == "default" { def = d[k] } else if selectTarget(s, core.ParseBuildLabel(k, pkgName)).HasLabel("config:on") { return d[k] } } s.NAssert(def == nil, "None of the select() conditions matched") return def } // selectTarget returns the target to be used for a select() call. // It panics appropriately if the target isn't built yet. func selectTarget(s *scope, l core.BuildLabel) *core.BuildTarget { if s.pkg != nil && l.PackageName == s.pkg.Name { t := s.pkg.Target(l.Name) s.NAssert(t == nil, "Target %s in select() call has not been defined yet", l.Name) return t } return subincludeTarget(s, l) } // subrepo implements the subrepo() builtin that adds a new repository. func subrepo(s *scope, args []pyObject) pyObject { s.NAssert(s.pkg == nil, "Cannot create new subrepos in this context") name := string(args[0].(pyString)) dep := string(args[1].(pyString)) var target *core.BuildTarget root := name if dep != "" { // N.B. The target must be already registered on this package. target = s.pkg.TargetOrDie(core.ParseBuildLabelContext(dep, s.pkg).Name) root = path.Join(target.OutDir(), name) } else if args[2] != None { root = string(args[2].(pyString)) } state := s.state if args[3] != None { // arg 3 is the config file to load state = state.ForConfig(path.Join(s.pkg.Name, string(args[3].(pyString)))) } else if args[4].IsTruthy() { // arg 4 is bazel_compat state = state.ForConfig() state.Config.Bazel.Compatibility = true state.Config.Parse.BuildFileName = append(state.Config.Parse.BuildFileName, "BUILD.bazel") } isCrossCompile := s.pkg.Subrepo != nil && s.pkg.Subrepo.IsCrossCompile arch := cli.HostArch() if args[5] != None { // arg 5 is arch-string, for arch-subrepos. givenArch := string(args[5].(pyString)) if err := arch.UnmarshalFlag(givenArch); err != nil { log.Fatalf("Could not interpret architecture '%s' for subrepo '%s'", givenArch, name) } state = state.ForArch(arch) isCrossCompile = true } sr := &core.Subrepo{ Name: s.pkg.SubrepoArchName(path.Join(s.pkg.Name, name)), Root: root, Target: target, State: state, Arch: arch, IsCrossCompile: isCrossCompile, } if s.state.Config.Bazel.Compatibility && s.pkg.Name == "workspace" { sr.Name = s.pkg.SubrepoArchName(name) } log.Debug("Registering subrepo %s in package %s", sr.Name, s.pkg.Label()) s.state.Graph.MaybeAddSubrepo(sr) return pyString("///" + sr.Name) } // breakpoint implements an interactive debugger for the breakpoint() builtin func breakpoint(s *scope, args []pyObject) pyObject { // Take this mutex to ensure only one debugger runs at a time s.interpreter.breakpointMutex.Lock() defer s.interpreter.breakpointMutex.Unlock() fmt.Printf("breakpoint() encountered in %s, entering interactive debugger...\n", s.contextPkg.Filename) // This is a small hack to get the return value back from an ident statement, which // is normally not available since we don't have implicit returns. interpretStatements := func(stmts []*Statement) (ret pyObject, err error) { if len(stmts) == 1 && stmts[0].Ident != nil { defer func() { if r := recover(); r != nil { err = fmt.Errorf("%s", r) } }() return s.interpretIdentStatement(stmts[0].Ident), nil } return s.interpreter.interpretStatements(s, stmts) } for { prompt := promptui.Prompt{ Label: "plz", Validate: func(input string) error { _, err := s.interpreter.parser.ParseData([]byte(input), "<stdin>") return err }, } if input, err := prompt.Run(); err != nil { if err == io.EOF { break } else if err.Error() != "^C" { log.Error("%s", err) } } else if stmts, err := s.interpreter.parser.ParseData([]byte(input), "<stdin>"); err != nil { log.Error("Syntax error: %s", err) } else if ret, err := interpretStatements(stmts); err != nil { log.Error("%s", err) } else if ret != nil && ret != None { fmt.Printf("%s\n", ret) } else { fmt.Printf("\n") } } fmt.Printf("Debugger exited, continuing...\n") return None }
1
9,036
What does this do? Seems like a poor mans mutex/semaphore?
thought-machine-please
go
@@ -45,5 +45,5 @@ type ResponseWriter interface { // SetApplicationError specifies that this response contains an // application error. If called, this MUST be called before any invocation // of Write(). - SetApplicationError() + SetApplicationError(err error) }
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package transport import "io" // Response is the low level response representation. type Response struct { Headers Headers Body io.ReadCloser ApplicationError bool } // ResponseWriter allows Handlers to write responses in a streaming fashion. // // Functions on ResponseWriter are not thread-safe. type ResponseWriter interface { io.Writer // AddHeaders adds the given headers to the response. If called, this MUST // be called before any invocation of Write(). // // This MUST NOT panic if Headers is nil. AddHeaders(Headers) // TODO(abg): Ability to set individual headers instead? // SetApplicationError specifies that this response contains an // application error. If called, this MUST be called before any invocation // of Write(). SetApplicationError() }
1
16,263
This is a breaking change, and cannot be made. We have committed to this API for v1.
yarpc-yarpc-go
go
@@ -136,16 +136,14 @@ class StatementsAnalyzer extends SourceAnalyzer implements StatementsSource * Checks an array of statements for validity * * @param array<PhpParser\Node\Stmt> $stmts - * @param Context|null $global_context - * @param bool $root_scope * * @return null|false */ public function analyze( array $stmts, Context $context, - Context $global_context = null, - $root_scope = false + ?Context $global_context = null, + bool $root_scope = false ) { if (!$stmts) { return;
1
<?php namespace Psalm\Internal\Analyzer; use PhpParser; use Psalm\Internal\Analyzer\Statements\Block\DoAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\ForAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\ForeachAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\IfAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\SwitchAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\TryAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\WhileAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\AssignmentAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\Assignment\InstancePropertyAssignmentAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\Fetch\ClassConstFetchAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\Fetch\ConstFetchAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\Fetch\VariableFetchAnalyzer; use Psalm\Internal\Analyzer\Statements\Expression\SimpleTypeInferer; use Psalm\Internal\Analyzer\Statements\ExpressionAnalyzer; use Psalm\Internal\Analyzer\Statements\ReturnAnalyzer; use Psalm\Internal\Analyzer\Statements\ThrowAnalyzer; use Psalm\Internal\Scanner\ParsedDocblock; use Psalm\Codebase; use Psalm\CodeLocation; use Psalm\Context; use Psalm\DocComment; use Psalm\Exception\DocblockParseException; use Psalm\FileManipulation; use Psalm\Internal\FileManipulation\FileManipulationBuffer; use Psalm\Issue\InvalidDocblock; use Psalm\Issue\MissingDocblockType; use Psalm\Issue\Trace; use Psalm\Issue\UndefinedTrace; use Psalm\Issue\UnevaluatedCode; use Psalm\Issue\UnrecognizedStatement; use Psalm\Issue\UnusedVariable; use Psalm\IssueBuffer; use Psalm\StatementsSource; use Psalm\Type; use function strtolower; use function fwrite; use const STDERR; use function array_filter; use function array_map; use function array_merge; use function preg_split; use function get_class; use function strrpos; use function strlen; use function substr; use function array_key_exists; use function array_change_key_case; use function array_reverse; use function trim; use function array_column; use function array_combine; /** * @internal */ class StatementsAnalyzer extends SourceAnalyzer implements StatementsSource { /** * @var SourceAnalyzer */ protected $source; /** * @var FileAnalyzer */ protected $file_analyzer; /** * @var Codebase */ protected $codebase; /** * @var array<string, CodeLocation> */ private $all_vars = []; /** * @var array<string, int> */ private $var_branch_points = []; /** * Possibly undefined variables should be initialised if we're altering code * * @var array<string, int>|null */ private $vars_to_initialize; /** * @var array<string, FunctionAnalyzer> */ private $function_analyzers = []; /** * @var array<string, array{0: string, 1: CodeLocation}> */ private $unused_var_locations = []; /** * @var array<string, bool> */ private $used_var_locations = []; /** * @var ?array<string, bool> */ public $byref_uses; /** * @var ParsedDocblock|null */ private $parsed_docblock = null; /** * @var ?string */ private $fake_this_class = null; /** @var \Psalm\Internal\Provider\NodeDataProvider */ public $node_data; public function __construct(SourceAnalyzer $source, \Psalm\Internal\Provider\NodeDataProvider $node_data) { $this->source = $source; $this->file_analyzer = $source->getFileAnalyzer(); $this->codebase = $source->getCodebase(); $this->node_data = $node_data; } /** * Checks an array of statements for validity * * @param array<PhpParser\Node\Stmt> $stmts * @param Context|null $global_context * @param bool $root_scope * * @return null|false */ public function analyze( array $stmts, Context $context, Context $global_context = null, $root_scope = false ) { if (!$stmts) { return; } // hoist functions to the top $this->hoistFunctions($stmts); $project_analyzer = $this->getFileAnalyzer()->project_analyzer; $codebase = $project_analyzer->getCodebase(); if ($codebase->config->hoist_constants) { self::hoistConstants($this, $stmts, $context); } foreach ($stmts as $stmt) { if (self::analyzeStatement($this, $stmt, $context, $global_context) === false) { return false; } } if ($root_scope && !$context->collect_initializations && $codebase->find_unused_variables && $context->check_variables ) { $this->checkUnreferencedVars($stmts); } if ($codebase->alter_code && $root_scope && $this->vars_to_initialize) { $file_contents = $codebase->getFileContents($this->getFilePath()); foreach ($this->vars_to_initialize as $var_id => $branch_point) { $newline_pos = (int)strrpos($file_contents, "\n", $branch_point - strlen($file_contents)) + 1; $indentation = substr($file_contents, $newline_pos, $branch_point - $newline_pos); FileManipulationBuffer::add($this->getFilePath(), [ new FileManipulation($branch_point, $branch_point, $var_id . ' = null;' . "\n" . $indentation), ]); } } return null; } /** * @param array<PhpParser\Node\Stmt> $stmts */ private function hoistFunctions(array $stmts) : void { foreach ($stmts as $stmt) { if ($stmt instanceof PhpParser\Node\Stmt\Function_) { $function_name = strtolower($stmt->name->name); if ($ns = $this->getNamespace()) { $fq_function_name = strtolower($ns) . '\\' . $function_name; } else { $fq_function_name = $function_name; } try { $function_analyzer = new FunctionAnalyzer($stmt, $this->source); $this->function_analyzers[$fq_function_name] = $function_analyzer; } catch (\UnexpectedValueException $e) { // do nothing } } } } /** * @param array<PhpParser\Node\Stmt> $stmts */ private static function hoistConstants( StatementsAnalyzer $statements_analyzer, array $stmts, Context $context ) : void { $codebase = $statements_analyzer->getCodebase(); foreach ($stmts as $stmt) { if ($stmt instanceof PhpParser\Node\Stmt\Const_) { foreach ($stmt->consts as $const) { ConstFetchAnalyzer::setConstType( $statements_analyzer, $const->name->name, SimpleTypeInferer::infer( $codebase, $statements_analyzer->node_data, $const->value, $statements_analyzer->getAliases(), $statements_analyzer ) ?: Type::getMixed(), $context ); } } elseif ($stmt instanceof PhpParser\Node\Stmt\Expression && $stmt->expr instanceof PhpParser\Node\Expr\FuncCall && $stmt->expr->name instanceof PhpParser\Node\Name && $stmt->expr->name->parts === ['define'] && isset($stmt->expr->args[1]) ) { $const_name = ConstFetchAnalyzer::getConstName( $stmt->expr->args[0]->value, $statements_analyzer->node_data, $codebase, $statements_analyzer->getAliases() ); if ($const_name !== null) { ConstFetchAnalyzer::setConstType( $statements_analyzer, $const_name, Statements\Expression\SimpleTypeInferer::infer( $codebase, $statements_analyzer->node_data, $stmt->expr->args[1]->value, $statements_analyzer->getAliases(), $statements_analyzer ) ?: Type::getMixed(), $context ); } } } } /** * @psalm-return false|null */ private static function analyzeStatement( StatementsAnalyzer $statements_analyzer, PhpParser\Node\Stmt $stmt, Context $context, ?Context $global_context ) { $ignore_variable_property = false; $ignore_variable_method = false; $codebase = $statements_analyzer->getCodebase(); if ($context->has_returned && !$context->collect_initializations && !$context->collect_mutations && !($stmt instanceof PhpParser\Node\Stmt\Nop) && !($stmt instanceof PhpParser\Node\Stmt\InlineHTML) ) { if ($codebase->find_unused_variables) { if (IssueBuffer::accepts( new UnevaluatedCode( 'Expressions after return/throw/continue', new CodeLocation($statements_analyzer->source, $stmt) ), $statements_analyzer->source->getSuppressedIssues() )) { return false; } } return; } if ($statements_analyzer->getProjectAnalyzer()->debug_lines) { fwrite(STDERR, $statements_analyzer->getFilePath() . ':' . $stmt->getLine() . "\n"); } /* if (isset($context->vars_in_scope['$array']) && !$stmt instanceof PhpParser\Node\Stmt\Nop) { var_dump($stmt->getLine(), $context->vars_in_scope['$array']); } */ $new_issues = null; $traced_variables = []; if ($docblock = $stmt->getDocComment()) { $statements_analyzer->parseStatementDocblock($docblock, $stmt, $context); if (isset($statements_analyzer->parsed_docblock->tags['psalm-trace'])) { foreach ($statements_analyzer->parsed_docblock->tags['psalm-trace'] as $traced_variable_line) { $possible_traced_variable_names = preg_split('/[\s]+/', $traced_variable_line); if ($possible_traced_variable_names) { $traced_variables = array_merge( $traced_variables, array_filter($possible_traced_variable_names) ); } } } if (isset($statements_analyzer->parsed_docblock->tags['psalm-ignore-variable-method'])) { $context->ignore_variable_method = $ignore_variable_method = true; } if (isset($statements_analyzer->parsed_docblock->tags['psalm-ignore-variable-property'])) { $context->ignore_variable_property = $ignore_variable_property = true; } if (isset($statements_analyzer->parsed_docblock->tags['psalm-suppress'])) { $suppressed = array_filter( array_map( /** * @param string $line * * @return string */ function ($line): string { return preg_split('/[\s]+/', $line)[0]; }, $statements_analyzer->parsed_docblock->tags['psalm-suppress'] ) ); if ($suppressed) { $new_issues = []; foreach ($suppressed as $offset => $issue_type) { $offset += $docblock->getFilePos(); $new_issues[$offset] = $issue_type; if ($issue_type === 'InaccessibleMethod') { continue; } if ($codebase->track_unused_suppressions) { IssueBuffer::addUnusedSuppression( $statements_analyzer->getFilePath(), $offset, $issue_type ); } } $statements_analyzer->addSuppressedIssues($new_issues); } } if (isset($statements_analyzer->parsed_docblock->combined_tags['var']) && !($stmt instanceof PhpParser\Node\Stmt\Expression && $stmt->expr instanceof PhpParser\Node\Expr\Assign) && !$stmt instanceof PhpParser\Node\Stmt\Foreach_ && !$stmt instanceof PhpParser\Node\Stmt\Return_ ) { $file_path = $statements_analyzer->getRootFilePath(); $file_storage_provider = $codebase->file_storage_provider; $file_storage = $file_storage_provider->get($file_path); $template_type_map = $statements_analyzer->getTemplateTypeMap(); $var_comments = []; try { $var_comments = CommentAnalyzer::arrayToDocblocks( $docblock, $statements_analyzer->parsed_docblock, $statements_analyzer->getSource(), $statements_analyzer->getAliases(), $template_type_map, $file_storage->type_aliases ); } catch (\Psalm\Exception\IncorrectDocblockException $e) { if (IssueBuffer::accepts( new MissingDocblockType( (string)$e->getMessage(), new CodeLocation($statements_analyzer->getSource(), $stmt) ) )) { // fall through } } catch (\Psalm\Exception\DocblockParseException $e) { if (IssueBuffer::accepts( new InvalidDocblock( (string)$e->getMessage(), new CodeLocation($statements_analyzer->getSource(), $stmt) ) )) { // fall through } } foreach ($var_comments as $var_comment) { AssignmentAnalyzer::assignTypeFromVarDocblock( $statements_analyzer, $stmt, $var_comment, $context ); } } } else { $statements_analyzer->parsed_docblock = null; } if ($stmt instanceof PhpParser\Node\Stmt\If_) { if (IfAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\TryCatch) { if (TryAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\For_) { if (ForAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\Foreach_) { if (ForeachAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\While_) { if (WhileAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\Do_) { DoAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Const_) { ConstFetchAnalyzer::analyzeConstAssignment($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Unset_) { Statements\UnsetAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Return_) { ReturnAnalyzer::analyze($statements_analyzer, $stmt, $context); $context->has_returned = true; } elseif ($stmt instanceof PhpParser\Node\Stmt\Throw_) { ThrowAnalyzer::analyze($statements_analyzer, $stmt, $context); $context->has_returned = true; } elseif ($stmt instanceof PhpParser\Node\Stmt\Switch_) { SwitchAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Break_) { Statements\BreakAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Continue_) { Statements\ContinueAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Static_) { Statements\StaticAnalyzer::analyze($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Echo_) { if (Statements\EchoAnalyzer::analyze($statements_analyzer, $stmt, $context) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\Function_) { FunctionAnalyzer::analyzeStatement($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Expression) { if (ExpressionAnalyzer::analyze( $statements_analyzer, $stmt->expr, $context, false, $global_context, true ) === false) { return false; } } elseif ($stmt instanceof PhpParser\Node\Stmt\InlineHTML) { // do nothing } elseif ($stmt instanceof PhpParser\Node\Stmt\Global_) { Statements\GlobalAnalyzer::analyze($statements_analyzer, $stmt, $context, $global_context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Property) { InstancePropertyAssignmentAnalyzer::analyzeStatement($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\ClassConst) { ClassConstFetchAnalyzer::analyzeClassConstAssignment($statements_analyzer, $stmt, $context); } elseif ($stmt instanceof PhpParser\Node\Stmt\Class_) { try { $class_analyzer = new ClassAnalyzer( $stmt, $statements_analyzer->source, $stmt->name ? $stmt->name->name : null ); $class_analyzer->analyze(null, $global_context); } catch (\InvalidArgumentException $e) { // disregard this exception, we'll likely see it elsewhere in the form // of an issue } } elseif ($stmt instanceof PhpParser\Node\Stmt\Nop) { // do nothing } elseif ($stmt instanceof PhpParser\Node\Stmt\Goto_) { // do nothing } elseif ($stmt instanceof PhpParser\Node\Stmt\Label) { // do nothing } elseif ($stmt instanceof PhpParser\Node\Stmt\Declare_) { foreach ($stmt->declares as $declaration) { if ((string) $declaration->key === 'strict_types' && $declaration->value instanceof PhpParser\Node\Scalar\LNumber && $declaration->value->value === 1 ) { $context->strict_types = true; } } } elseif ($stmt instanceof PhpParser\Node\Stmt\HaltCompiler) { $context->has_returned = true; } else { if (IssueBuffer::accepts( new UnrecognizedStatement( 'Psalm does not understand ' . get_class($stmt), new CodeLocation($statements_analyzer->source, $stmt) ), $statements_analyzer->getSuppressedIssues() )) { return false; } } $codebase = $statements_analyzer->getCodebase(); $plugin_classes = $codebase->config->after_statement_checks; if ($plugin_classes) { $file_manipulations = []; foreach ($plugin_classes as $plugin_fq_class_name) { if ($plugin_fq_class_name::afterStatementAnalysis( $stmt, $context, $statements_analyzer, $codebase, $file_manipulations ) === false) { return false; } } if ($file_manipulations) { FileManipulationBuffer::add($statements_analyzer->getFilePath(), $file_manipulations); } } if ($new_issues) { $statements_analyzer->removeSuppressedIssues($new_issues); } if ($ignore_variable_property) { $context->ignore_variable_property = false; } if ($ignore_variable_method) { $context->ignore_variable_method = false; } foreach ($traced_variables as $traced_variable) { if (isset($context->vars_in_scope[$traced_variable])) { if (IssueBuffer::accepts( new Trace( $traced_variable . ': ' . $context->vars_in_scope[$traced_variable]->getId(), new CodeLocation($statements_analyzer->source, $stmt) ), $statements_analyzer->getSuppressedIssues() )) { // fall through } } else { if (IssueBuffer::accepts( new UndefinedTrace( 'Attempt to trace undefined variable ' . $traced_variable, new CodeLocation($statements_analyzer->source, $stmt) ), $statements_analyzer->getSuppressedIssues() )) { // fall through } } } } private function parseStatementDocblock( PhpParser\Comment\Doc $docblock, PhpParser\Node\Stmt $stmt, Context $context ) : void { $codebase = $this->getCodebase(); try { $this->parsed_docblock = DocComment::parsePreservingLength($docblock); } catch (DocblockParseException $e) { if (IssueBuffer::accepts( new InvalidDocblock( (string)$e->getMessage(), new CodeLocation($this->getSource(), $stmt, null, true) ) )) { // fall through } $this->parsed_docblock = null; } $comments = $this->parsed_docblock; if (isset($comments->tags['psalm-scope-this'])) { $trimmed = trim(\reset($comments->tags['psalm-scope-this'])); if (!$codebase->classExists($trimmed)) { if (IssueBuffer::accepts( new \Psalm\Issue\UndefinedDocblockClass( 'Scope class ' . $trimmed . ' does not exist', new CodeLocation($this->getSource(), $stmt, null, true), $trimmed ) )) { // fall through } } else { $this_type = Type::parseString($trimmed); $context->self = $trimmed; $context->vars_in_scope['$this'] = $this_type; $this->setFQCLN($trimmed); } } } /** * @param array<PhpParser\Node\Stmt> $stmts * @return void */ public function checkUnreferencedVars(array $stmts) { $source = $this->getSource(); $codebase = $source->getCodebase(); $function_storage = $source instanceof FunctionLikeAnalyzer ? $source->getFunctionLikeStorage($this) : null; if ($codebase->alter_code) { // Reverse array to deal with chain of assignments $this->unused_var_locations = array_reverse($this->unused_var_locations, true); } $var_list = array_column($this->unused_var_locations, 0); $loc_list = array_column($this->unused_var_locations, 1); $project_analyzer = $this->getProjectAnalyzer(); $unused_var_remover = new Statements\UnusedAssignmentRemover(); foreach ($this->unused_var_locations as $hash => [$var_id, $original_location]) { if (substr($var_id, 0, 2) === '$_' || isset($this->used_var_locations[$hash])) { continue; } if ((!$function_storage || !array_key_exists(substr($var_id, 1), $function_storage->param_lookup)) && !isset($this->byref_uses[$var_id]) && !VariableFetchAnalyzer::isSuperGlobal($var_id) ) { $issue = new UnusedVariable( 'Variable ' . $var_id . ' is never referenced', $original_location ); if ($codebase->alter_code && !$unused_var_remover->checkIfVarRemoved($var_id, $original_location) && isset($project_analyzer->getIssuesToFix()['UnusedVariable']) && !IssueBuffer::isSuppressed($issue, $this->getSuppressedIssues()) ) { $unused_var_remover->findUnusedAssignment( $this->getCodebase(), $stmts, array_combine($var_list, $loc_list), $var_id, $original_location ); } if (IssueBuffer::accepts( $issue, $this->getSuppressedIssues(), true )) { // fall through } } } } /** * @param string $var_name * */ public function hasVariable($var_name): bool { return isset($this->all_vars[$var_name]); } /** * @param string $var_id * @param int|null $branch_point * * @return void */ public function registerVariable($var_id, CodeLocation $location, $branch_point) { $this->all_vars[$var_id] = $location; if ($branch_point) { $this->var_branch_points[$var_id] = $branch_point; } $this->registerVariableAssignment($var_id, $location); } /** * @param string $var_id * * @return void */ public function registerVariableAssignment($var_id, CodeLocation $location) { $this->unused_var_locations[$location->getHash()] = [$var_id, $location]; } /** * @param array<string, CodeLocation> $locations * @return void */ public function registerVariableUses(array $locations) { foreach ($locations as $hash => $_) { unset($this->unused_var_locations[$hash]); $this->used_var_locations[$hash] = true; } } /** * @return array<string, array{0: string, 1: CodeLocation}> */ public function getUnusedVarLocations(): array { return \array_diff_key($this->unused_var_locations, $this->used_var_locations); } /** * The first appearance of the variable in this set of statements being evaluated * * @param string $var_id * */ public function getFirstAppearance($var_id): ?CodeLocation { return isset($this->all_vars[$var_id]) ? $this->all_vars[$var_id] : null; } /** * @param string $var_id * */ public function getBranchPoint($var_id): ?int { return isset($this->var_branch_points[$var_id]) ? $this->var_branch_points[$var_id] : null; } /** * @param string $var_id * @param int $branch_point * * @return void */ public function addVariableInitialization($var_id, $branch_point) { $this->vars_to_initialize[$var_id] = $branch_point; } public function getFileAnalyzer() : FileAnalyzer { return $this->file_analyzer; } public function getCodebase() : Codebase { return $this->codebase; } /** * @return array<string, FunctionAnalyzer> */ public function getFunctionAnalyzers(): array { return $this->function_analyzers; } /** * @param array<string, bool> $byref_uses * @return void */ public function setByRefUses(array $byref_uses) { $this->byref_uses = $byref_uses; } /** * @return array<string, array<array-key, CodeLocation>> */ public function getUncaughtThrows(Context $context): array { $uncaught_throws = []; if ($context->collect_exceptions) { if ($context->possibly_thrown_exceptions) { $config = $this->codebase->config; $ignored_exceptions = array_change_key_case( $context->is_global ? $config->ignored_exceptions_in_global_scope : $config->ignored_exceptions ); $ignored_exceptions_and_descendants = array_change_key_case( $context->is_global ? $config->ignored_exceptions_and_descendants_in_global_scope : $config->ignored_exceptions_and_descendants ); foreach ($context->possibly_thrown_exceptions as $possibly_thrown_exception => $codelocations) { if (isset($ignored_exceptions[strtolower($possibly_thrown_exception)])) { continue; } $is_expected = false; foreach ($ignored_exceptions_and_descendants as $expected_exception => $_) { try { if ($expected_exception === strtolower($possibly_thrown_exception) || $this->codebase->classExtends($possibly_thrown_exception, $expected_exception) ) { $is_expected = true; break; } } catch (\InvalidArgumentException $e) { $is_expected = true; break; } } if (!$is_expected) { $uncaught_throws[$possibly_thrown_exception] = $codelocations; } } } } return $uncaught_throws; } public function getFunctionAnalyzer(string $function_id) : ?FunctionAnalyzer { return $this->function_analyzers[$function_id] ?? null; } public function getParsedDocblock() : ?ParsedDocblock { return $this->parsed_docblock; } public function getFQCLN(): ?string { if ($this->fake_this_class) { return $this->fake_this_class; } return parent::getFQCLN(); } public function setFQCLN(string $fake_this_class) : void { $this->fake_this_class = $fake_this_class; } public function getNodeTypeProvider() : \Psalm\NodeTypeProvider { return $this->node_data; } }
1
9,051
Can also drop corresponding types from docblock here
vimeo-psalm
php
@@ -105,7 +105,7 @@ public class RequestHandler implements Comparable<RequestHandler> { public void process() { switch (request.getRequestType()) { case START_SESSION: - log.info("Got a request to create a new session: " + log.finest("Got a request to create a new session: " + new DesiredCapabilities(request.getDesiredCapabilities())); try { registry.addNewSessionRequest(this);
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.grid.web.servlet.handler; import com.google.common.collect.ImmutableMap; import org.openqa.grid.common.exception.ClientGoneException; import org.openqa.grid.common.exception.GridException; import org.openqa.grid.internal.ExternalSessionKey; import org.openqa.grid.internal.GridRegistry; import org.openqa.grid.internal.RemoteProxy; import org.openqa.grid.internal.SessionTerminationReason; import org.openqa.grid.internal.TestSession; import org.openqa.grid.internal.exception.NewSessionException; import org.openqa.grid.internal.listeners.TestSessionListener; import org.openqa.grid.internal.utils.configuration.GridHubConfiguration; import org.openqa.selenium.remote.DesiredCapabilities; import org.openqa.selenium.remote.NewSessionPayload; import java.io.IOException; import java.net.SocketTimeoutException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.logging.Level; import java.util.logging.Logger; import javax.servlet.http.HttpServletResponse; /** * Base stuff to handle the request coming from a remote. * * Threading notes; RequestHandlers are instantiated per-request, run on the servlet container * thread. The instance is also accessed by the matcher thread. */ @SuppressWarnings("JavaDoc") public class RequestHandler implements Comparable<RequestHandler> { private static final Logger log = Logger.getLogger(RequestHandler.class.getName()); private final GridRegistry registry; private final SeleniumBasedRequest request; private final HttpServletResponse response; private final CountDownLatch sessionAssigned = new CountDownLatch(1); private final Thread waitingThread; private volatile TestSession session = null; public RequestHandler( SeleniumBasedRequest request, HttpServletResponse response, GridRegistry registry) { this.request = request; this.response = response; this.registry = registry; this.waitingThread = Thread.currentThread(); } /** * Forward the new session request to the TestSession that has been assigned, and parse the * response to extract and return the external key assigned by the remote. * * @param session session * @throws NewSessionException in case anything wrong happens during the new session process. */ public void forwardNewSessionRequestAndUpdateRegistry(TestSession session) throws NewSessionException { try (NewSessionPayload payload = NewSessionPayload.create( ImmutableMap.of("desiredCapabilities", session.getRequestedCapabilities()))) { StringBuilder json = new StringBuilder(); payload.writeTo(json); request.setBody(json.toString()); session.forward(getRequest(), getResponse(), true); } catch (IOException e) { //log.warning("Error forwarding the request " + e.getMessage()); throw new NewSessionException("Error forwarding the request " + e.getMessage(), e); } } protected void forwardRequest(TestSession session, RequestHandler handler) throws IOException { session.forward(request, response, false); } /** * forwards the request to the remote, allocating / releasing the resources if necessary. */ public void process() { switch (request.getRequestType()) { case START_SESSION: log.info("Got a request to create a new session: " + new DesiredCapabilities(request.getDesiredCapabilities())); try { registry.addNewSessionRequest(this); waitForSessionBound(); beforeSessionEvent(); forwardNewSessionRequestAndUpdateRegistry(session); } catch (Exception e) { cleanup(); log.log(Level.INFO, "Error forwarding the new session " + e.getMessage(), e); throw new GridException("Error forwarding the new session " + e.getMessage(), e); } break; case REGULAR: case STOP_SESSION: session = getSession(); if (session == null) { ExternalSessionKey sessionKey = null; try { sessionKey = request.extractSession(); } catch (RuntimeException ignore) {} throw new GridException("Session [" + sessionKey + "] not available - " + registry.getActiveSessions()); } try { forwardRequest(session, this); } catch (ClientGoneException e) { log.log(Level.WARNING, "The client is gone for session " + session + ", terminating"); registry.terminate(session, SessionTerminationReason.CLIENT_GONE); } catch (SocketTimeoutException e) { log.log(Level.SEVERE, "Socket timed out for session " + session + ", " + e.getMessage()); registry.terminate(session, SessionTerminationReason.SO_TIMEOUT); } catch (Throwable t) { log.log(Level.SEVERE, "cannot forward the request " + t.getMessage(), t); registry.terminate(session, SessionTerminationReason.FORWARDING_TO_NODE_FAILED); throw new GridException("cannot forward the request " + t.getMessage(), t); } if (request.getRequestType() == RequestType.STOP_SESSION) { registry.terminate(session, SessionTerminationReason.CLIENT_STOPPED_SESSION); } break; default: throw new RuntimeException("NI"); } } private void cleanup() { registry.removeNewSessionRequest(this); if (session != null) { registry.terminate(session, SessionTerminationReason.CREATIONFAILED); } } /** * calls the TestSessionListener is the proxy for that node has one specified. * * @throws NewSessionException in case anything goes wrong with the listener. */ private void beforeSessionEvent() throws NewSessionException { RemoteProxy p = session.getSlot().getProxy(); if (p instanceof TestSessionListener) { try { ((TestSessionListener) p).beforeSession(session); } catch (Exception e) { log.severe("Error running the beforeSessionListener : " + e.getMessage()); e.printStackTrace(); throw new NewSessionException("The listener threw an exception ( listener bug )", e); } } } /** * wait for the registry to match the request with a TestSlot. * * @throws InterruptedException Interrupted exception * @throws TimeoutException if the request reaches the new session wait timeout before being * assigned. */ public void waitForSessionBound() throws InterruptedException, TimeoutException { // Maintain compatibility with Grid 1.x, which had the ability to // specify how long to wait before canceling a request. GridHubConfiguration configuration = getRegistry().getHub().getConfiguration(); Integer newSessionWaitTimeout = configuration.newSessionWaitTimeout != null ? configuration.newSessionWaitTimeout : 0; if (newSessionWaitTimeout > 0) { if (!sessionAssigned.await(newSessionWaitTimeout.longValue(), TimeUnit.MILLISECONDS)) { throw new TimeoutException("Request timed out waiting for a node to become available."); } } else { // Wait until a proxy becomes available to handle the request. sessionAssigned.await(); } } /** * @return the SeleniumBasedRequest this handler is processing. */ public SeleniumBasedRequest getRequest() { return request; } /** * @return the HttpServletResponse the handler is writing to. */ public HttpServletResponse getResponse() { return response; } @Override public int compareTo(RequestHandler o) { GridHubConfiguration configuration = getRegistry().getHub().getConfiguration(); if (configuration.prioritizer != null) { return configuration.prioritizer.compareTo( this.getRequest().getDesiredCapabilities(), o.getRequest().getDesiredCapabilities()); } return 0; } protected void setSession(TestSession session) { this.session = session; } public void bindSession(TestSession session) { this.session = session; sessionAssigned.countDown(); } public TestSession getSession() { if (session == null) { ExternalSessionKey externalKey = request.extractSession(); session = registry.getExistingSession(externalKey); } return session; } /** * @return the session from the server ( = opaque handle used by the server to determine where to * route session-specific commands from the JSON wire protocol ). will be null until the request * has been processed. */ public ExternalSessionKey getServerSession() { if (session == null) { return null; } return session.getExternalKey(); } public void stop() { waitingThread.interrupt(); } @Override public String toString() { StringBuilder b = new StringBuilder(); b.append("session:").append(session).append(", "); b.append("caps: ").append(request.getDesiredCapabilities()); b.append("\n"); return b.toString(); } public String debug() { StringBuilder b = new StringBuilder(); b.append("\nmethod: ").append(request.getMethod()); b.append("\npathInfo: ").append(request.getPathInfo()); b.append("\nuri: ").append(request.getRequestURI()); b.append("\ncontent :").append(request.getBody()); return b.toString(); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((session == null) ? 0 : session.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } RequestHandler other = (RequestHandler) obj; if (session == null) { if (other.session != null) { return false; } } else if (!session.equals(other.session)) { return false; } return true; } public GridRegistry getRegistry() { return registry; } }
1
16,457
This change is unhelpful to users.
SeleniumHQ-selenium
py
@@ -59,6 +59,8 @@ public struct Vec3 : IFlatbufferObject } public static Offset<MyGame.Example.Vec3> Pack(FlatBufferBuilder builder, Vec3T _o) { if (_o == null) return default(Offset<MyGame.Example.Vec3>); + var _test3_a = _o.Test3.A; + var _test3_b = _o.Test3.B; return CreateVec3( builder, _o.X,
1
// <auto-generated> // automatically generated by the FlatBuffers compiler, do not modify // </auto-generated> namespace MyGame.Example { using global::System; using global::System.Collections.Generic; using global::FlatBuffers; public struct Vec3 : IFlatbufferObject { private Struct __p; public ByteBuffer ByteBuffer { get { return __p.bb; } } public void __init(int _i, ByteBuffer _bb) { __p = new Struct(_i, _bb); } public Vec3 __assign(int _i, ByteBuffer _bb) { __init(_i, _bb); return this; } public float X { get { return __p.bb.GetFloat(__p.bb_pos + 0); } } public void MutateX(float x) { __p.bb.PutFloat(__p.bb_pos + 0, x); } public float Y { get { return __p.bb.GetFloat(__p.bb_pos + 4); } } public void MutateY(float y) { __p.bb.PutFloat(__p.bb_pos + 4, y); } public float Z { get { return __p.bb.GetFloat(__p.bb_pos + 8); } } public void MutateZ(float z) { __p.bb.PutFloat(__p.bb_pos + 8, z); } public double Test1 { get { return __p.bb.GetDouble(__p.bb_pos + 16); } } public void MutateTest1(double test1) { __p.bb.PutDouble(__p.bb_pos + 16, test1); } public MyGame.Example.Color Test2 { get { return (MyGame.Example.Color)__p.bb.Get(__p.bb_pos + 24); } } public void MutateTest2(MyGame.Example.Color test2) { __p.bb.Put(__p.bb_pos + 24, (byte)test2); } public MyGame.Example.Test Test3 { get { return (new MyGame.Example.Test()).__assign(__p.bb_pos + 26, __p.bb); } } public static Offset<MyGame.Example.Vec3> CreateVec3(FlatBufferBuilder builder, float X, float Y, float Z, double Test1, MyGame.Example.Color Test2, short test3_A, sbyte test3_B) { builder.Prep(8, 32); builder.Pad(2); builder.Prep(2, 4); builder.Pad(1); builder.PutSbyte(test3_B); builder.PutShort(test3_A); builder.Pad(1); builder.PutByte((byte)Test2); builder.PutDouble(Test1); builder.Pad(4); builder.PutFloat(Z); builder.PutFloat(Y); builder.PutFloat(X); return new Offset<MyGame.Example.Vec3>(builder.Offset); } public Vec3T UnPack() { var _o = new Vec3T(); this.UnPackTo(_o); return _o; } public void UnPackTo(Vec3T _o) { _o.X = this.X; _o.Y = this.Y; _o.Z = this.Z; _o.Test1 = this.Test1; _o.Test2 = this.Test2; _o.Test3 = this.Test3.UnPack(); } public static Offset<MyGame.Example.Vec3> Pack(FlatBufferBuilder builder, Vec3T _o) { if (_o == null) return default(Offset<MyGame.Example.Vec3>); return CreateVec3( builder, _o.X, _o.Y, _o.Z, _o.Test1, _o.Test2, _o.Test3.A, _o.Test3.B); } }; public class Vec3T { [Newtonsoft.Json.JsonProperty("x")] public float X { get; set; } [Newtonsoft.Json.JsonProperty("y")] public float Y { get; set; } [Newtonsoft.Json.JsonProperty("z")] public float Z { get; set; } [Newtonsoft.Json.JsonProperty("test1")] public double Test1 { get; set; } [Newtonsoft.Json.JsonProperty("test2")] public MyGame.Example.Color Test2 { get; set; } [Newtonsoft.Json.JsonProperty("test3")] public MyGame.Example.TestT Test3 { get; set; } public Vec3T() { this.X = 0.0f; this.Y = 0.0f; this.Z = 0.0f; this.Test1 = 0.0; this.Test2 = 0; this.Test3 = new MyGame.Example.TestT(); } } }
1
17,797
why is this variable introduced? Please stick `_o.Test3.A` directly in the call below.
google-flatbuffers
java
@@ -88,7 +88,7 @@ module.exports.app = (options = {}) => { app.get('/:providerName/logout', middlewares.hasSessionAndProvider, middlewares.gentleVerifyToken, controllers.logout) app.get('/:providerName/authorized', middlewares.hasSessionAndProvider, middlewares.gentleVerifyToken, controllers.authorized) app.get('/:providerName/list/:id?', middlewares.hasSessionAndProvider, middlewares.verifyToken, controllers.list) - app.post('/:providerName/get/:id', middlewares.hasSessionAndProvider, middlewares.verifyToken, controllers.get) + app.post('/:providerName/get/:id', middlewares.hasSessionAndProvider, middlewares.gentleVerifyToken, controllers.get) app.get('/:providerName/thumbnail/:id', middlewares.hasSessionAndProvider, middlewares.cookieAuthToken, middlewares.verifyToken, controllers.thumbnail) app.param('providerName', providerManager.getProviderMiddleware(providers))
1
const express = require('express') // @ts-ignore const Grant = require('grant-express') const grantConfig = require('./config/grant')() const providerManager = require('./server/provider') const controllers = require('./server/controllers') const s3 = require('./server/controllers/s3') const url = require('./server/controllers/url') const SocketServer = require('ws').Server const emitter = require('./server/emitter') const merge = require('lodash.merge') const redis = require('./server/redis') const cookieParser = require('cookie-parser') const { jsonStringify, getURLBuilder } = require('./server/helpers/utils') const jobs = require('./server/jobs') const interceptor = require('express-interceptor') const logger = require('./server/logger') const { STORAGE_PREFIX } = require('./server/Uploader') const middlewares = require('./server/middlewares') const providers = providerManager.getDefaultProviders() const defaultOptions = { server: { protocol: 'http', path: '' }, providerOptions: { s3: { acl: 'public-read', endpoint: 'https://{service}.{region}.amazonaws.com', conditions: [], getKey: (req, filename) => filename } }, debug: true } /** * Entry point into initializing the Companion app. * * @param {object} options */ module.exports.app = (options = {}) => { options = merge({}, defaultOptions, options) providerManager.addProviderOptions(options, grantConfig) const customProviders = options.customProviders if (customProviders) { providerManager.addCustomProviders(customProviders, providers, grantConfig) } // create singleton redis client if (options.redisUrl) { redis.client(merge({ url: options.redisUrl }, options.redisOptions || {})) } emitter(options.multipleInstances && options.redisUrl) const app = express() app.use(cookieParser()) // server tokens are added to cookies app.use(interceptGrantErrorResponse) app.use(new Grant(grantConfig)) app.use((req, res, next) => { res.header( 'Access-Control-Allow-Headers', [res.get('Access-Control-Allow-Headers'), 'uppy-auth-token'].join(', ') ) next() }) if (options.sendSelfEndpoint) { app.use('*', (req, res, next) => { const { protocol } = options.server res.header('i-am', `${protocol}://${options.sendSelfEndpoint}`) // add it to the exposed custom headers. res.header('Access-Control-Expose-Headers', [res.get('Access-Control-Expose-Headers'), 'i-am'].join(', ')) next() }) } // add uppy options to the request object so it can be accessed by subsequent handlers. app.use('*', getOptionsMiddleware(options)) app.use('/s3', s3(options.providerOptions.s3)) app.use('/url', url()) app.get('/:providerName/callback', middlewares.hasSessionAndProvider, controllers.callback) app.get('/:providerName/connect', middlewares.hasSessionAndProvider, controllers.connect) app.get('/:providerName/redirect', middlewares.hasSessionAndProvider, controllers.redirect) app.get('/:providerName/logout', middlewares.hasSessionAndProvider, middlewares.gentleVerifyToken, controllers.logout) app.get('/:providerName/authorized', middlewares.hasSessionAndProvider, middlewares.gentleVerifyToken, controllers.authorized) app.get('/:providerName/list/:id?', middlewares.hasSessionAndProvider, middlewares.verifyToken, controllers.list) app.post('/:providerName/get/:id', middlewares.hasSessionAndProvider, middlewares.verifyToken, controllers.get) app.get('/:providerName/thumbnail/:id', middlewares.hasSessionAndProvider, middlewares.cookieAuthToken, middlewares.verifyToken, controllers.thumbnail) app.param('providerName', providerManager.getProviderMiddleware(providers)) if (app.get('env') !== 'test') { jobs.startCleanUpJob(options.filePath) } return app } /** * the socket is used to send progress events during an upload * * @param {object} server */ module.exports.socket = (server) => { const wss = new SocketServer({ server }) const redisClient = redis.client() // A new connection is usually created when an upload begins, // or when connection fails while an upload is on-going and, // client attempts to reconnect. wss.on('connection', (ws) => { // @ts-ignore const fullPath = ws.upgradeReq.url // the token identifies which ongoing upload's progress, the socket // connection wishes to listen to. const token = fullPath.replace(/^.*\/api\//, '') logger.info(`connection received from ${token}`, 'socket.connect') /** * * @param {{action: string, payload: object}} data */ function sendProgress (data) { ws.send(jsonStringify(data), (err) => { if (err) logger.error(err, 'socket.progress.error') }) } // if the redisClient is available, then we attempt to check the storage // if we have any already stored progress data on the upload. if (redisClient) { redisClient.get(`${STORAGE_PREFIX}:${token}`, (err, data) => { if (err) logger.error(err, 'socket.redis.error') if (data) { const dataObj = JSON.parse(data.toString()) if (dataObj.action) sendProgress(dataObj) } }) } emitter().emit(`connection:${token}`) emitter().on(token, sendProgress) ws.on('message', (jsonData) => { const data = JSON.parse(jsonData.toString()) // whitelist triggered actions if (data.action === 'pause' || data.action === 'resume') { emitter().emit(`${data.action}:${token}`) } }) ws.on('close', () => { emitter().removeListener(token, sendProgress) }) }) } // intercepts grantJS' default response error when something goes // wrong during oauth process. const interceptGrantErrorResponse = interceptor((req, res) => { return { isInterceptable: () => { // match grant.js' callback url return /^\/connect\/\w+\/callback/.test(req.path) }, intercept: (body, send) => { const unwantedBody = 'error=Grant%3A%20missing%20session%20or%20misconfigured%20provider' if (body === unwantedBody) { logger.error(`grant.js responded with error: ${body}`, 'grant.oauth.error') send([ 'Companion was unable to complete the OAuth process :(', '(Hint, try clearing your cookies and try again)' ].join('\n')) } else { send(body) } } } }) /** * * @param {object} options */ const getOptionsMiddleware = (options) => { let s3Client = null if (options.providerOptions.s3) { const S3 = require('aws-sdk/clients/s3') const AWS = require('aws-sdk') const config = options.providerOptions.s3 // Use credentials to allow assumed roles to pass STS sessions in. // If the user doesn't specify key and secret, the default credentials (process-env) // will be used by S3 in calls below. let credentials if (config.key && config.secret) { credentials = new AWS.Credentials(config.key, config.secret, config.sessionToken) } s3Client = new S3({ region: config.region, endpoint: config.endpoint, credentials, signatureVersion: 'v4' }) } /** * * @param {object} req * @param {object} res * @param {function} next */ const middleware = (req, res, next) => { req.uppy = { options, s3Client, authToken: req.header('uppy-auth-token'), buildURL: getURLBuilder(options) } next() } return middleware }
1
11,423
do you mind sharing what is the reason for this change?
transloadit-uppy
js
@@ -27,11 +27,11 @@ class ProposalsController < ApplicationController @pending_data = listing.pending @pending_review_data = listing.pending_review @completed_data = listing.completed.alter_query { |rel| rel.limit(@closed_proposal_limit) } - @canceled_data = listing.canceled + @canceled_data = listing.canceled.alter_query { |rel| rel.limit(@closed_proposal_limit) } end def archive - @proposals_data = listing.closed + redirect_to query_proposals_path(text: "status:completed") end def cancel_form
1
class ProposalsController < ApplicationController include TokenAuth skip_before_action :authenticate_user!, only: [:approve, :complete] skip_before_action :check_disabled_client, only: [:approve, :complete] # TODO use Policy for all actions before_action -> { authorize proposal }, only: [:show, :cancel, :cancel_form, :history] before_action :needs_token_on_get, only: [:approve, :complete] before_action :validate_access, only: [:approve, :complete] add_template_helper ProposalsHelper rescue_from Pundit::NotAuthorizedError, with: :auth_errors def show @proposal = proposal.decorate cookies[:detail] = params[:detail] mode = cookies[:detail] if mode == "new" @client_data_instance ||= proposal.client_data @subscriber_list = SubscriberList.new(proposal).triples @observation = Observation.new(proposal: proposal) render "show_next" end end def index @closed_proposal_limit = ENV.fetch("CLOSED_PROPOSAL_LIMIT", 10).to_i @pending_data = listing.pending @pending_review_data = listing.pending_review @completed_data = listing.completed.alter_query { |rel| rel.limit(@closed_proposal_limit) } @canceled_data = listing.canceled end def archive @proposals_data = listing.closed end def cancel_form @proposal = proposal.decorate end def cancel if params[:reason_input].present? cancel_proposal_and_send_cancelation_emails flash[:success] = "Your request has been canceled" redirect_to proposal_path(proposal) else redirect_to( cancel_form_proposal_path(params[:id]), alert: "A reason for cancelation is required. Please indicate why this request needs to be canceled." ) end end def approve complete end def complete step = proposal.existing_or_delegated_actionable_step_for(current_user) if step complete_step(step) else flash[:alert] = I18n.t("errors.policies.proposal.step_complete") end redirect_to proposal end def query check_search_params query_listing = listing unless @proposals_data = try_search(query_listing) redirect_to proposals_path end @start_date = query_listing.start_date @end_date = query_listing.end_date end def query_count set_search_params if !valid_search_params? render json: { total: 0 } elsif @proposals_data = try_search(listing) render json: { total: @proposals_data.es_response.results.total } else render json: { error: flash[:error] } end end def download params[:size] = :all params.delete(:page) begin build_csv_download rescue SearchBadQuery, SearchUnavailable => error flash[:error] = error.message redirect_to proposals_path end end def history @container = ProposalVersionsQuery.new(proposal).container @container.state_from_params = params end protected def try_search(query_listing) begin @proposals_data = query_listing.query rescue SearchBadQuery, SearchUnavailable => error flash[:error] = error.message false end end def build_csv_download query_listing = listing @proposals_data = query_listing.query timestamp = Time.current.utc.strftime("%Y-%m-%d-%H-%M-%S") headers["Content-Disposition"] = %(attachment; filename="C2-Proposals-#{timestamp}.csv") headers["Content-Type"] = "text/csv" end def cancel_proposal_and_send_cancelation_emails comments = "Request canceled with comments: " + params[:reason_input] proposal.cancel! proposal.comments.create!(comment_text: comments, user: current_user) DispatchFinder.run(proposal).deliver_cancelation_emails(current_user, params[:reason_input]) end def proposal @cached_proposal ||= Proposal.find(params[:id]) end def auth_errors(exception) if %w(cancel cancel_form).include?(params[:action]) redirect_to proposal_path, alert: exception.message else super end end def listing ProposalListingQuery.new(current_user, params) end def set_search_params @dsl = build_search_dsl @text = params[:text] @adv_search = @dsl.client_query build_search_query find_search_report end def check_search_params set_search_params unless valid_search_params? flash[:alert] = "Please enter one or more search criteria" redirect_to proposals_path end end def valid_search_params? @text.present? || @adv_search.present? || (params[:start_date].present? && params[:end_date].present?) end def find_search_report if params[:report] @report = Report.find params[:report] end end def build_search_dsl ProposalSearchDsl.new( params: params, current_user: current_user, query: params[:text], client_data_type: current_user.client_model.to_s ) end def build_search_query @search_query = { "humanized" => @dsl.humanized_query_string } if @text.present? @search_query["text"] = @text end if @adv_search.present? @search_query[current_user.client_model_slug] = @adv_search.to_h end end def complete_step(step) step.update_attributes!(completer: current_user) step.complete! flash[:success] = "You have approved #{proposal.public_id}." end end
1
17,384
thoughts on putting `alter_query { |rel| rel.limit(@closed_proposal_limit) }` in a method that we can call here? That way we can have a test for this logic without needing a controller spec.
18F-C2
rb
@@ -31,7 +31,12 @@ module Api response.last_modified = node.timestamp if node.visible - render :xml => node.to_xml.to_s + @node = node + + # Render the result + respond_to do |format| + format.xml + end else head :gone end
1
# The NodeController is the RESTful interface to Node objects module Api class NodesController < ApiController require "xml/libxml" before_action :authorize, :only => [:create, :update, :delete] authorize_resource before_action :require_public_data, :only => [:create, :update, :delete] before_action :check_api_writable, :only => [:create, :update, :delete] before_action :check_api_readable, :except => [:create, :update, :delete] around_action :api_call_handle_error, :api_call_timeout # Create a node from XML. def create assert_method :put node = Node.from_xml(request.raw_post, true) # Assume that Node.from_xml has thrown an exception if there is an error parsing the xml node.create_with_history current_user render :plain => node.id.to_s end # Dump the details on a node given in params[:id] def show node = Node.find(params[:id]) response.last_modified = node.timestamp if node.visible render :xml => node.to_xml.to_s else head :gone end end # Update a node from given XML def update node = Node.find(params[:id]) new_node = Node.from_xml(request.raw_post) raise OSM::APIBadUserInput, "The id in the url (#{node.id}) is not the same as provided in the xml (#{new_node.id})" unless new_node && new_node.id == node.id node.update_from(new_node, current_user) render :plain => node.version.to_s end # Delete a node. Doesn't actually delete it, but retains its history # in a wiki-like way. We therefore treat it like an update, so the delete # method returns the new version number. def delete node = Node.find(params[:id]) new_node = Node.from_xml(request.raw_post) raise OSM::APIBadUserInput, "The id in the url (#{node.id}) is not the same as provided in the xml (#{new_node.id})" unless new_node && new_node.id == node.id node.delete_with_history!(new_node, current_user) render :plain => node.version.to_s end # Dump the details on many nodes whose ids are given in the "nodes" parameter. def index raise OSM::APIBadUserInput, "The parameter nodes is required, and must be of the form nodes=id[,id[,id...]]" unless params["nodes"] ids = params["nodes"].split(",").collect(&:to_i) raise OSM::APIBadUserInput, "No nodes were given to search for" if ids.empty? doc = OSM::API.new.get_xml_doc Node.find(ids).each do |node| doc.root << node.to_xml_node end render :xml => doc.to_s end end end
1
11,893
Perhaps `@node` throughout
openstreetmap-openstreetmap-website
rb
@@ -195,8 +195,14 @@ module Beaker @cmd_options[:validate] = bool end - opts.on '--collect-perf-data', 'Use sysstat on linux hosts to collect performance and load data' do - @cmd_options[:collect_perf_data] = true + opts.on '--collect-perf-data [MODE]', + 'Collect SUT performance and load data', + 'Possible values:', + 'aggressive (poll every minute)', + 'normal (poll every 10 minutes)', + 'none (do not collect perf data)', + '(default: normal)' do |mode| + @cmd_options[:collect_perf_data] = mode || 'normal' end opts.on('--version', 'Report currently running version of beaker' ) do
1
module Beaker module Options #An object that parses arguments in the format ['--option', 'value', '--option2', 'value2', '--switch'] class CommandLineParser # @example Create a CommanLineParser # a = CommandLineParser.new # # @note All of Beaker's supported command line options are defined here def initialize @cmd_options = Beaker::Options::OptionsHash.new @optparse = OptionParser.new do|opts| # Set a banner opts.banner = "Usage: #{File.basename($0)} [options...]" opts.on '-h', '--hosts FILE', 'Use host configuration FILE', '(default sample.cfg)' do |file| @cmd_options[:hosts_file] = file end opts.on '-o', '--options-file FILE', 'Read options from FILE', 'This should evaluate to a ruby hash.', 'CLI optons are given precedence.' do |file| @cmd_options[:options_file] = file end opts.on '--helper PATH/TO/SCRIPT', 'Ruby file evaluated prior to tests', '(a la spec_helper)' do |script| @cmd_options[:helper] = script end opts.on '--load-path /PATH/TO/DIR,/ADDITIONAL/DIR/PATHS', 'Add paths to LOAD_PATH' do |value| @cmd_options[:load_path] = value end opts.on '-t', '--tests /PATH/TO/DIR,/ADDITIONA/DIR/PATHS,/PATH/TO/FILE.rb', 'Execute tests from paths and files' do |value| @cmd_options[:tests] = value end opts.on '--pre-suite /PRE-SUITE/DIR/PATH,/ADDITIONAL/DIR/PATHS,/PATH/TO/FILE.rb', 'Path to project specific steps to be run BEFORE testing' do |value| @cmd_options[:pre_suite] = value end opts.on '--post-suite /POST-SUITE/DIR/PATH,/OPTIONAL/ADDITONAL/DIR/PATHS,/PATH/TO/FILE.rb', 'Path to project specific steps to be run AFTER testing' do |value| @cmd_options[:post_suite] = value end opts.on '--[no-]provision', 'Do not provision vm images before testing', '(default: true)' do |bool| @cmd_options[:provision] = bool unless bool @cmd_options[:validate] = false @cmd_options[:configure] = false end end opts.on '--[no-]configure', 'Do not configure vm images before testing', '(default: true)' do |bool| @cmd_options[:configure] = bool end opts.on '--preserve-hosts [MODE]', 'How should SUTs be treated post test', 'Possible values:', 'always (keep SUTs alive)', 'onfail (keep SUTs alive if failures occur during testing)', 'onpass (keep SUTs alive if no failures occur during testing)', 'never (cleanup SUTs - shutdown and destroy any changes made during testing)', '(default: never)' do |mode| @cmd_options[:preserve_hosts] = mode || 'always' end opts.on '--root-keys', 'Install puppetlabs pubkeys for superuser', '(default: false)' do |bool| @cmd_options[:root_keys] = bool end opts.on '--keyfile /PATH/TO/SSH/KEY', 'Specify alternate SSH key', '(default: ~/.ssh/id_rsa)' do |key| @cmd_options[:keyfile] = key end opts.on '--timeout TIMEOUT', '(vCloud only) Specify a provisioning timeout (in seconds)', '(default: 300)' do |value| @cmd_options[:timeout] = value end opts.on '-i URI', '--install URI', 'Install a project repo/app on the SUTs', 'Provide full git URI or use short form KEYWORD/name', 'supported keywords: PUPPET, FACTER, HIERA, HIERA-PUPPET' do |value| @cmd_options[:install] = value end opts.on('-m', '--modules URI', 'Select puppet module git install URI') do |value| @cmd_options[:modules] = value end opts.on '-q', '--[no-]quiet', 'Do not log output to STDOUT', '(default: false)' do |bool| @cmd_options[:quiet] = bool end opts.on '--[no-]color', 'Do not display color in log output', '(default: true)' do |bool| @cmd_options[:color] = bool end opts.on '--[no-]color-host-output', 'Ensure SUT colored output is preserved', '(default: false)' do |bool| @cmd_options[:color_host_output] = bool if bool @cmd_options[:color_host_output] = true end end opts.on '--log-level LEVEL', 'Log level', 'Supported LEVEL keywords:', 'trace : all messages, full stack trace of errors, file copy details', 'debug : all messages, plus full stack trace of errors', 'verbose : all messages', 'info : info messages, notifications and warnings', 'notify : notifications and warnings', 'warn : warnings only', '(default: info)' do |val| @cmd_options[:log_level] = val end opts.on '--log-prefix PREFIX', 'Use a custom prefix for your Beaker log files', 'can provide nested directories (ie. face/man)', '(defaults to hostfile name. ie. ../i/07.yml --> "07")' do |val| @cmd_options[:log_prefix] = val end opts.on '-d', '--[no-]dry-run', 'Report what would happen on targets', '(default: false)' do |bool| @cmd_options[:dry_run] = bool $dry_run = bool end opts.on '--fail-mode [MODE]', 'How should the harness react to errors/failures', 'Possible values:', 'fast (skip all subsequent tests)', 'slow (attempt to continue run post test failure)', 'stop (DEPRECATED, please use fast)', '(default: slow)' do |mode| @cmd_options[:fail_mode] = mode =~ /stop/ ? 'fast' : mode end opts.on '--[no-]ntp', 'Sync time on SUTs before testing', '(default: false)' do |bool| @cmd_options[:timesync] = bool end opts.on '--repo-proxy', 'Proxy packaging repositories on ubuntu, debian, cumulus and solaris-11', '(default: false)' do @cmd_options[:repo_proxy] = true end opts.on '--add-el-extras', 'Add Extra Packages for Enterprise Linux (EPEL) repository to el-* hosts', '(default: false)' do @cmd_options[:add_el_extras] = true end opts.on '--package-proxy URL', 'Set proxy url for package managers (yum and apt)' do |value| @cmd_options[:package_proxy] = value end opts.on '--[no-]validate', 'Validate that SUTs are correctly provisioned before running tests', '(default: true)' do |bool| @cmd_options[:validate] = bool end opts.on '--collect-perf-data', 'Use sysstat on linux hosts to collect performance and load data' do @cmd_options[:collect_perf_data] = true end opts.on('--version', 'Report currently running version of beaker' ) do @cmd_options[:beaker_version_print] = true end opts.on('--parse-only', 'Display beaker parsed options and exit' ) do @cmd_options[:parse_only] = true end opts.on('--help', 'Display this screen' ) do @cmd_options[:help] = true end opts.on '-c', '--config FILE', 'DEPRECATED, use --hosts' do |file| @cmd_options[:hosts_file] = file end opts.on '--[no-]debug', 'DEPRECATED, use --log-level' do |bool| @cmd_options[:log_level] = bool ? 'debug' : 'info' end opts.on '-x', '--[no-]xml', 'DEPRECATED - JUnit XML now generated by default' do #noop end opts.on '--type TYPE', 'DEPRECATED - pe/foss/aio determined during runtime' do |type| #backwards compatability, oh how i hate you @cmd_options[:type] = type end opts.on '--tag TAGS', 'Run the set of tests matching ALL of the provided single or comma separated list of tags' do |value| @cmd_options[:tag_includes] = value end opts.on '--exclude-tag TAGS', 'Run the set of tests that do not contain ANY of the provided single or command separated list of tags' do |value| @cmd_options[:tag_excludes] = value end opts.on '--xml-time-order', 'Output an additional JUnit XML file, sorted by execution time' do |bool| @cmd_options[:xml_time_enabled] = bool end end end # Parse an array of arguments into a Hash of options # @param [Array] args The array of arguments to consume # # @example # args = ['--option', 'value', '--option2', 'value2', '--switch'] # parser = CommandLineParser.new # parser.parse(args) == {:option => 'value, :options2 => value, :switch => true} # # @return [Hash] Return the Hash of options def parse( args = ARGV ) @optparse.parse(args) @cmd_options end # Generate a string representing the supported arguments # # @example # parser = CommandLineParser.new # parser.usage = "Options: ..." # # @return [String] Return a string representing the available arguments def usage @optparse.help end end end end
1
11,332
Shouldn't this default be 'normal' ?
voxpupuli-beaker
rb
@@ -131,7 +131,7 @@ class TelemetryEntry( namedtuple( "TelemetryEntry", "action client_time elapsed_time event_id instance_id pipeline_name_hash " - "num_pipelines_in_repo repo_hash python_version metadata version dagster_version os_desc os_platform", + "num_pipelines_in_repo num_schedules_in_repo num_sensors_in_repo repo_hash python_version metadata version dagster_version os_desc os_platform", ) ): """
1
"""As an open source project, we collect usage statistics to inform development priorities. For more information, check out the docs at https://docs.dagster.io/install#telemetry' To see the logs we send, inspect $DAGSTER_HOME/logs/ if $DAGSTER_HOME is set or ~/.dagster/logs/ See class TelemetryEntry for logged fields. For local development: Spin up local telemetry server and set DAGSTER_TELEMETRY_URL = 'http://localhost:3000/actions' To test RotatingFileHandler, can set MAX_BYTES = 500 """ import datetime import hashlib import json import logging import os import platform import sys import uuid from collections import namedtuple from functools import wraps from logging.handlers import RotatingFileHandler import click import yaml from dagster import check from dagster.core.definitions.pipeline_base import IPipeline from dagster.core.definitions.reconstructable import ( ReconstructablePipeline, ReconstructableRepository, get_ephemeral_repository_name, ) from dagster.core.errors import DagsterInvariantViolationError from dagster.core.instance import DagsterInstance from dagster.utils import merge_dicts from dagster.version import __version__ as dagster_module_version TELEMETRY_STR = ".telemetry" INSTANCE_ID_STR = "instance_id" ENABLED_STR = "enabled" DAGSTER_HOME_FALLBACK = "~/.dagster" MAX_BYTES = 10485760 # 10 MB = 10 * 1024 * 1024 bytes UPDATE_REPO_STATS = "update_repo_stats" START_DAGIT_WEBSERVER = "start_dagit_webserver" DAEMON_ALIVE = "daemon_alive" SCHEDULED_RUN_CREATED = "scheduled_run_created" SENSOR_RUN_CREATED = "sensor_run_created" BACKFILL_RUN_CREATED = "backfill_run_created" TELEMETRY_VERSION = "0.2" OS_DESC = platform.platform() OS_PLATFORM = platform.system() TELEMETRY_WHITELISTED_FUNCTIONS = { "_logged_execute_pipeline", "execute_execute_command", "execute_launch_command", "_daemon_run_command", } def telemetry_wrapper(metadata): """ Wrapper around functions that are logged. Will log the function_name, client_time, and elapsed_time, and success. Wrapped function must be in the list of whitelisted function, and must have a DagsterInstance parameter named 'instance' in the signature. """ if callable(metadata): return _telemetry_wrapper(metadata) def _wraps(f): return _telemetry_wrapper(f, metadata) return _wraps def _telemetry_wrapper(f, metadata=None): metadata = check.opt_dict_param(metadata, "metadata", key_type=str, value_type=str) if f.__name__ not in TELEMETRY_WHITELISTED_FUNCTIONS: raise DagsterInvariantViolationError( "Attempted to log telemetry for function {name} that is not in telemetry whitelisted " "functions list: {whitelist}.".format( name=f.__name__, whitelist=TELEMETRY_WHITELISTED_FUNCTIONS ) ) var_names = f.__code__.co_varnames try: instance_index = var_names.index("instance") except ValueError: raise DagsterInvariantViolationError( "Attempted to log telemetry for function {name} that does not take a DagsterInstance " "in a parameter called 'instance'" ) @wraps(f) def wrap(*args, **kwargs): instance = _check_telemetry_instance_param(args, kwargs, instance_index) start_time = datetime.datetime.now() log_action( instance=instance, action=f.__name__ + "_started", client_time=start_time, metadata=metadata, ) result = f(*args, **kwargs) end_time = datetime.datetime.now() success_metadata = {"success": getattr(result, "success", None)} log_action( instance=instance, action=f.__name__ + "_ended", client_time=end_time, elapsed_time=end_time - start_time, metadata=merge_dicts(success_metadata, metadata), ) return result return wrap def get_python_version(): version = sys.version_info return "{}.{}.{}".format(version.major, version.minor, version.micro) class TelemetryEntry( namedtuple( "TelemetryEntry", "action client_time elapsed_time event_id instance_id pipeline_name_hash " "num_pipelines_in_repo repo_hash python_version metadata version dagster_version os_desc os_platform", ) ): """ Schema for telemetry logs. Currently, log entries are coerced to the same schema to enable storing all entries in one DB table with unified schema. action - Name of function called i.e. `execute_pipeline_started` (see: fn telemetry_wrapper) client_time - Client time elapsed_time - Time elapsed between start of function and end of function call event_id - Unique id for the event instance_id - Unique id for dagster instance pipeline_name_hash - Hash of pipeline name, if any python_version - Python version repo_hash - Hash of repo name, if any num_pipelines_in_repo - Number of pipelines in repo, if any metadata - More information i.e. pipeline success (boolean) version - Schema version dagster_version - Version of the project being used. os_desc - String describing OS in use os_platform - Terse string describing OS platform - linux, windows, darwin, etc. If $DAGSTER_HOME is set, then use $DAGSTER_HOME/logs/ Otherwise, use ~/.dagster/logs/ """ def __new__( cls, action, client_time, event_id, instance_id, elapsed_time=None, pipeline_name_hash=None, num_pipelines_in_repo=None, repo_hash=None, metadata=None, ): action = check.str_param(action, "action") client_time = check.str_param(client_time, "action") elapsed_time = check.opt_str_param(elapsed_time, "elapsed_time", "") event_id = check.str_param(event_id, "event_id") instance_id = check.str_param(instance_id, "instance_id") metadata = check.opt_dict_param(metadata, "metadata") pipeline_name_hash = check.opt_str_param( pipeline_name_hash, "pipeline_name_hash", default="" ) num_pipelines_in_repo = check.opt_str_param( num_pipelines_in_repo, "num_pipelines_in_repo", default="" ) repo_hash = check.opt_str_param(repo_hash, "repo_hash", default="") return super(TelemetryEntry, cls).__new__( cls, action=action, client_time=client_time, elapsed_time=elapsed_time, event_id=event_id, instance_id=instance_id, pipeline_name_hash=pipeline_name_hash, num_pipelines_in_repo=num_pipelines_in_repo, repo_hash=repo_hash, python_version=get_python_version(), metadata=metadata, version=TELEMETRY_VERSION, dagster_version=dagster_module_version, os_desc=OS_DESC, os_platform=OS_PLATFORM, ) def _dagster_home_if_set(): dagster_home_path = os.getenv("DAGSTER_HOME") if not dagster_home_path: return None return os.path.expanduser(dagster_home_path) def get_dir_from_dagster_home(target_dir): """ If $DAGSTER_HOME is set, return $DAGSTER_HOME/<target_dir>/ Otherwise, return ~/.dagster/<target_dir>/ The 'logs' directory is used to cache logs before upload The '.logs_queue' directory is used to temporarily store logs during upload. This is to prevent dropping events or double-sending events that occur during the upload process. The '.telemetry' directory is used to store the instance id. """ dagster_home_path = _dagster_home_if_set() if dagster_home_path is None: dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK) dagster_home_logs_path = os.path.join(dagster_home_path, target_dir) if not os.path.exists(dagster_home_logs_path): os.makedirs(dagster_home_logs_path) return dagster_home_logs_path def get_log_queue_dir(): """ Get the directory where we store log queue files, creating the directory if needed. The log queue directory is used to temporarily store logs during upload. This is to prevent dropping events or double-sending events that occur during the upload process. If $DAGSTER_HOME is set, return $DAGSTER_HOME/.logs_queue/ Otherwise, return ~/.dagster/.logs_queue/ """ dagster_home_path = _dagster_home_if_set() if dagster_home_path is None: dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK) dagster_home_logs_queue_path = dagster_home_path + "/.logs_queue/" if not os.path.exists(dagster_home_logs_queue_path): os.makedirs(dagster_home_logs_queue_path) return dagster_home_logs_queue_path def _check_telemetry_instance_param(args, kwargs, instance_index): if "instance" in kwargs: return check.inst_param( kwargs["instance"], "instance", DagsterInstance, "'instance' parameter passed as keyword argument must be a DagsterInstance", ) else: check.invariant(len(args) > instance_index) return check.inst_param( args[instance_index], "instance", DagsterInstance, "'instance' argument at position {position} must be a DagsterInstance".format( position=instance_index ), ) def _get_telemetry_logger(): logger = logging.getLogger("dagster_telemetry_logger") if len(logger.handlers) == 0: handler = RotatingFileHandler( os.path.join(get_dir_from_dagster_home("logs"), "event.log"), maxBytes=MAX_BYTES, backupCount=10, ) logger.setLevel(logging.INFO) logger.addHandler(handler) return logger # For use in test teardown def cleanup_telemetry_logger(): logger = logging.getLogger("dagster_telemetry_logger") if len(logger.handlers) == 0: return check.invariant(len(logger.handlers) == 1) handler = next(iter(logger.handlers)) handler.close() logger.removeHandler(handler) def write_telemetry_log_line(log_line): logger = _get_telemetry_logger() logger.info(json.dumps(log_line)) def _get_instance_telemetry_info(instance): check.inst_param(instance, "instance", DagsterInstance) dagster_telemetry_enabled = _get_instance_telemetry_enabled(instance) instance_id = None if dagster_telemetry_enabled: instance_id = _get_or_set_instance_id() return (dagster_telemetry_enabled, instance_id) def _get_instance_telemetry_enabled(instance): return instance.telemetry_enabled def _get_or_set_instance_id(): instance_id = _get_telemetry_instance_id() if instance_id == None: instance_id = _set_telemetry_instance_id() return instance_id # Gets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml def _get_telemetry_instance_id(): telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), "id.yaml") if not os.path.exists(telemetry_id_path): return with open(telemetry_id_path, "r") as telemetry_id_file: telemetry_id_yaml = yaml.safe_load(telemetry_id_file) if INSTANCE_ID_STR in telemetry_id_yaml and isinstance( telemetry_id_yaml[INSTANCE_ID_STR], str ): return telemetry_id_yaml[INSTANCE_ID_STR] return None # Sets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml def _set_telemetry_instance_id(): click.secho(TELEMETRY_TEXT) click.secho(SLACK_PROMPT) telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), "id.yaml") instance_id = str(uuid.uuid4()) try: # In case we encounter an error while writing to user's file system with open(telemetry_id_path, "w") as telemetry_id_file: yaml.dump({INSTANCE_ID_STR: instance_id}, telemetry_id_file, default_flow_style=False) return instance_id except Exception: return "<<unable_to_write_instance_id>>" def hash_name(name): return hashlib.sha256(name.encode("utf-8")).hexdigest() def log_external_repo_stats(instance, source, external_repo, external_pipeline=None): from dagster.core.host_representation.external import ( ExternalPipeline, ExternalRepository, ) check.inst_param(instance, "instance", DagsterInstance) check.str_param(source, "source") check.inst_param(external_repo, "external_repo", ExternalRepository) check.opt_inst_param(external_pipeline, "external_pipeline", ExternalPipeline) if _get_instance_telemetry_enabled(instance): instance_id = _get_or_set_instance_id() pipeline_name_hash = hash_name(external_pipeline.name) if external_pipeline else "" repo_hash = hash_name(external_repo.name) num_pipelines_in_repo = len(external_repo.get_all_external_pipelines()) write_telemetry_log_line( TelemetryEntry( action=UPDATE_REPO_STATS, client_time=str(datetime.datetime.now()), event_id=str(uuid.uuid4()), instance_id=instance_id, pipeline_name_hash=pipeline_name_hash, num_pipelines_in_repo=str(num_pipelines_in_repo), repo_hash=repo_hash, metadata={"source": source}, )._asdict() ) def log_repo_stats(instance, source, pipeline=None, repo=None): check.inst_param(instance, "instance", DagsterInstance) check.str_param(source, "source") check.opt_inst_param(pipeline, "pipeline", IPipeline) check.opt_inst_param(repo, "repo", ReconstructableRepository) if _get_instance_telemetry_enabled(instance): instance_id = _get_or_set_instance_id() if isinstance(pipeline, ReconstructablePipeline): pipeline_name_hash = hash_name(pipeline.get_definition().name) repository = pipeline.get_reconstructable_repository().get_definition() repo_hash = hash_name(repository.name) num_pipelines_in_repo = len(repository.pipeline_names) elif isinstance(repo, ReconstructableRepository): pipeline_name_hash = "" repository = repo.get_definition() repo_hash = hash_name(repository.name) num_pipelines_in_repo = len(repository.pipeline_names) else: pipeline_name_hash = hash_name(pipeline.get_definition().name) repo_hash = hash_name(get_ephemeral_repository_name(pipeline.get_definition().name)) num_pipelines_in_repo = 1 write_telemetry_log_line( TelemetryEntry( action=UPDATE_REPO_STATS, client_time=str(datetime.datetime.now()), event_id=str(uuid.uuid4()), instance_id=instance_id, pipeline_name_hash=pipeline_name_hash, num_pipelines_in_repo=str(num_pipelines_in_repo), repo_hash=repo_hash, metadata={"source": source}, )._asdict() ) def log_workspace_stats(instance, workspace_process_context): from dagster.core.workspace import IWorkspaceProcessContext check.inst_param(instance, "instance", DagsterInstance) check.inst_param( workspace_process_context, "workspace_process_context", IWorkspaceProcessContext ) request_context = workspace_process_context.create_request_context() for repo_location in request_context.repository_locations: for external_repo in repo_location.get_repositories().values(): log_external_repo_stats(instance, source="dagit", external_repo=external_repo) def log_action( instance, action, client_time=None, elapsed_time=None, metadata=None, pipeline_name_hash=None, repo_hash=None, ): check.inst_param(instance, "instance", DagsterInstance) if client_time is None: client_time = datetime.datetime.now() (dagster_telemetry_enabled, instance_id) = _get_instance_telemetry_info(instance) if dagster_telemetry_enabled: # Log general statistics write_telemetry_log_line( TelemetryEntry( action=action, client_time=str(client_time), elapsed_time=str(elapsed_time), event_id=str(uuid.uuid4()), instance_id=instance_id, metadata=metadata, repo_hash=repo_hash, pipeline_name_hash=pipeline_name_hash, )._asdict() ) TELEMETRY_TEXT = """ %(telemetry)s As an open source project, we collect usage statistics to inform development priorities. For more information, read https://docs.dagster.io/install#telemetry. We will not see or store solid definitions, pipeline definitions, modes, resources, context, or any data that is processed within solids and pipelines. To opt-out, add the following to $DAGSTER_HOME/dagster.yaml, creating that file if necessary: telemetry: enabled: false """ % { "telemetry": click.style("Telemetry:", fg="blue", bold=True) } SLACK_PROMPT = """ %(welcome)s If you have any questions or would like to engage with the Dagster team, please join us on Slack (https://bit.ly/39dvSsF). """ % { "welcome": click.style("Welcome to Dagster!", bold=True) }
1
18,504
nit: type this
dagster-io-dagster
py
@@ -318,4 +318,11 @@ public interface DriverCommand { // Mobile API String GET_NETWORK_CONNECTION = "getNetworkConnection"; String SET_NETWORK_CONNECTION = "setNetworkConnection"; + + // Cast Media Router API + String GET_CAST_SINKS = "getCastSinks"; + String SET_CAST_SINK_TO_USE = "selectCastSink"; + String START_CAST_TAB_MIRRORING = "startCastTabMirroring"; + String GET_CAST_ISSUE_MESSAGE = "getCastIssueMessage"; + String STOP_CASTING = "stopCasting"; }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote; import com.google.common.collect.ImmutableMap; import org.openqa.selenium.Capabilities; import org.openqa.selenium.Cookie; import org.openqa.selenium.Dimension; import org.openqa.selenium.Point; import org.openqa.selenium.WindowType; import org.openqa.selenium.interactions.Sequence; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; /** * An empty interface defining constants for the standard commands defined in the WebDriver JSON * wire protocol. * * @author [email protected] (Jason Leyba) */ public interface DriverCommand { String GET_ALL_SESSIONS = "getAllSessions"; String GET_CAPABILITIES = "getCapabilities"; String NEW_SESSION = "newSession"; static CommandPayload NEW_SESSION(Capabilities capabilities) { return new CommandPayload(NEW_SESSION, ImmutableMap.of("desiredCapabilities", capabilities)); } String STATUS = "status"; String CLOSE = "close"; String QUIT = "quit"; String GET = "get"; static CommandPayload GET(String url) { return new CommandPayload(GET, ImmutableMap.of("url", url)); } String GO_BACK = "goBack"; String GO_FORWARD = "goForward"; String REFRESH = "refresh"; String ADD_COOKIE = "addCookie"; static CommandPayload ADD_COOKIE(Cookie cookie) { return new CommandPayload(ADD_COOKIE, ImmutableMap.of("cookie", cookie)); } String GET_ALL_COOKIES = "getCookies"; String GET_COOKIE = "getCookie"; String DELETE_COOKIE = "deleteCookie"; static CommandPayload DELETE_COOKIE(String name) { return new CommandPayload(DELETE_COOKIE, ImmutableMap.of("name", name)); } String DELETE_ALL_COOKIES = "deleteAllCookies"; String FIND_ELEMENT = "findElement"; static CommandPayload FIND_ELEMENT(String strategy, String value) { return new CommandPayload(FIND_ELEMENT, ImmutableMap.of("using", strategy, "value", value)); } String FIND_ELEMENTS = "findElements"; static CommandPayload FIND_ELEMENTS(String strategy, String value) { return new CommandPayload(FIND_ELEMENTS, ImmutableMap.of("using", strategy, "value", value)); } String FIND_CHILD_ELEMENT = "findChildElement"; static CommandPayload FIND_CHILD_ELEMENT(String id, String strategy, String value) { return new CommandPayload(FIND_CHILD_ELEMENT, ImmutableMap.of("id", id, "using", strategy, "value", value)); } String FIND_CHILD_ELEMENTS = "findChildElements"; static CommandPayload FIND_CHILD_ELEMENTS(String id, String strategy, String value) { return new CommandPayload(FIND_CHILD_ELEMENTS, ImmutableMap.of("id", id, "using", strategy, "value", value)); } String CLEAR_ELEMENT = "clearElement"; static CommandPayload CLEAR_ELEMENT(String id) { return new CommandPayload(CLEAR_ELEMENT, ImmutableMap.of("id", id)); } String CLICK_ELEMENT = "clickElement"; static CommandPayload CLICK_ELEMENT(String id) { return new CommandPayload(CLICK_ELEMENT, ImmutableMap.of("id", id)); } String SEND_KEYS_TO_ELEMENT = "sendKeysToElement"; static CommandPayload SEND_KEYS_TO_ELEMENT(String id, CharSequence[] keysToSend) { return new CommandPayload(SEND_KEYS_TO_ELEMENT, ImmutableMap.of("id", id, "value", keysToSend)); } String SEND_KEYS_TO_ACTIVE_ELEMENT = "sendKeysToActiveElement"; String SUBMIT_ELEMENT = "submitElement"; static CommandPayload SUBMIT_ELEMENT(String id) { return new CommandPayload(SUBMIT_ELEMENT, ImmutableMap.of("id", id)); } String UPLOAD_FILE = "uploadFile"; static CommandPayload UPLOAD_FILE(String file) { return new CommandPayload(UPLOAD_FILE, ImmutableMap.of("file", file)); } String GET_CURRENT_WINDOW_HANDLE = "getCurrentWindowHandle"; String GET_WINDOW_HANDLES = "getWindowHandles"; String GET_CURRENT_CONTEXT_HANDLE = "getCurrentContextHandle"; String GET_CONTEXT_HANDLES = "getContextHandles"; String SWITCH_TO_WINDOW = "switchToWindow"; static CommandPayload SWITCH_TO_WINDOW(String windowHandleOrName) { return new CommandPayload(SWITCH_TO_WINDOW, ImmutableMap.of("handle", windowHandleOrName)); } String SWITCH_TO_NEW_WINDOW = "newWindow"; static CommandPayload SWITCH_TO_NEW_WINDOW(WindowType typeHint) { return new CommandPayload(SWITCH_TO_NEW_WINDOW, ImmutableMap.of("type", typeHint.toString())); } String SWITCH_TO_CONTEXT = "switchToContext"; String SWITCH_TO_FRAME = "switchToFrame"; static CommandPayload SWITCH_TO_FRAME(Object frame) { return new CommandPayload(SWITCH_TO_FRAME, Collections.singletonMap("id", frame)); } String SWITCH_TO_PARENT_FRAME = "switchToParentFrame"; String GET_ACTIVE_ELEMENT = "getActiveElement"; String GET_CURRENT_URL = "getCurrentUrl"; String GET_PAGE_SOURCE = "getPageSource"; String GET_TITLE = "getTitle"; String EXECUTE_SCRIPT = "executeScript"; static CommandPayload EXECUTE_SCRIPT(String script, List<Object> args) { return new CommandPayload(EXECUTE_SCRIPT, ImmutableMap.of("script", script, "args", args)); } String EXECUTE_ASYNC_SCRIPT = "executeAsyncScript"; static CommandPayload EXECUTE_ASYNC_SCRIPT(String script, List<Object> args) { return new CommandPayload(EXECUTE_ASYNC_SCRIPT, ImmutableMap.of("script", script, "args", args)); } String GET_ELEMENT_TEXT = "getElementText"; static CommandPayload GET_ELEMENT_TEXT(String id) { return new CommandPayload(GET_ELEMENT_TEXT, ImmutableMap.of("id", id)); } String GET_ELEMENT_TAG_NAME = "getElementTagName"; static CommandPayload GET_ELEMENT_TAG_NAME(String id) { return new CommandPayload(GET_ELEMENT_TAG_NAME, ImmutableMap.of("id", id)); } String IS_ELEMENT_SELECTED = "isElementSelected"; static CommandPayload IS_ELEMENT_SELECTED(String id) { return new CommandPayload(IS_ELEMENT_SELECTED, ImmutableMap.of("id", id)); } String IS_ELEMENT_ENABLED = "isElementEnabled"; static CommandPayload IS_ELEMENT_ENABLED(String id) { return new CommandPayload(IS_ELEMENT_ENABLED, ImmutableMap.of("id", id)); } String IS_ELEMENT_DISPLAYED = "isElementDisplayed"; static CommandPayload IS_ELEMENT_DISPLAYED(String id) { return new CommandPayload(IS_ELEMENT_DISPLAYED, ImmutableMap.of("id", id)); } String GET_ELEMENT_RECT = "getElementRect"; static CommandPayload GET_ELEMENT_RECT(String id) { return new CommandPayload(GET_ELEMENT_RECT, ImmutableMap.of("id", id)); } String GET_ELEMENT_LOCATION = "getElementLocation"; static CommandPayload GET_ELEMENT_LOCATION(String id) { return new CommandPayload(GET_ELEMENT_LOCATION, ImmutableMap.of("id", id)); } String GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW = "getElementLocationOnceScrolledIntoView"; static CommandPayload GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW(String id) { return new CommandPayload(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW, ImmutableMap.of("id", id)); } String GET_ELEMENT_SIZE = "getElementSize"; static CommandPayload GET_ELEMENT_SIZE(String id) { return new CommandPayload(GET_ELEMENT_SIZE, ImmutableMap.of("id", id)); } String GET_ELEMENT_ATTRIBUTE = "getElementAttribute"; static CommandPayload GET_ELEMENT_ATTRIBUTE(String id, String name) { return new CommandPayload(GET_ELEMENT_ATTRIBUTE, ImmutableMap.of("id", id, "name", name)); } String GET_ELEMENT_PROPERTY = "getElementProperty"; String GET_ELEMENT_VALUE_OF_CSS_PROPERTY = "getElementValueOfCssProperty"; static CommandPayload GET_ELEMENT_VALUE_OF_CSS_PROPERTY(String id, String name) { return new CommandPayload(GET_ELEMENT_VALUE_OF_CSS_PROPERTY, ImmutableMap.of("id", id, "propertyName", name)); } String ELEMENT_EQUALS = "elementEquals"; String SCREENSHOT = "screenshot"; String ELEMENT_SCREENSHOT = "elementScreenshot"; static CommandPayload ELEMENT_SCREENSHOT(String id) { return new CommandPayload(ELEMENT_SCREENSHOT, ImmutableMap.of("id", id)); } String ACCEPT_ALERT = "acceptAlert"; String DISMISS_ALERT = "dismissAlert"; String GET_ALERT_TEXT = "getAlertText"; String SET_ALERT_VALUE = "setAlertValue"; static CommandPayload SET_ALERT_VALUE(String keysToSend) { return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("text", keysToSend)); } String SET_ALERT_CREDENTIALS = "setAlertCredentials"; String SET_TIMEOUT = "setTimeout"; static CommandPayload SET_IMPLICIT_WAIT_TIMEOUT(long time, TimeUnit unit) { return new CommandPayload( SET_TIMEOUT, ImmutableMap.of("implicit", TimeUnit.MILLISECONDS.convert(time, unit))); } static CommandPayload SET_SCRIPT_TIMEOUT(long time, TimeUnit unit) { return new CommandPayload( SET_TIMEOUT, ImmutableMap.of("script", TimeUnit.MILLISECONDS.convert(time, unit))); } static CommandPayload SET_PAGE_LOAD_TIMEOUT(long time, TimeUnit unit) { return new CommandPayload( SET_TIMEOUT, ImmutableMap.of("pageLoad", TimeUnit.MILLISECONDS.convert(time, unit))); } String IMPLICITLY_WAIT = "implicitlyWait"; String SET_SCRIPT_TIMEOUT = "setScriptTimeout"; String GET_LOCATION = "getLocation"; String SET_LOCATION = "setLocation"; String GET_APP_CACHE = "getAppCache"; String GET_APP_CACHE_STATUS = "getStatus"; String CLEAR_APP_CACHE = "clearAppCache"; String IS_BROWSER_ONLINE = "isBrowserOnline"; String SET_BROWSER_ONLINE = "setBrowserOnline"; String GET_LOCAL_STORAGE_ITEM = "getLocalStorageItem"; String GET_LOCAL_STORAGE_KEYS = "getLocalStorageKeys"; String SET_LOCAL_STORAGE_ITEM = "setLocalStorageItem"; String REMOVE_LOCAL_STORAGE_ITEM = "removeLocalStorageItem"; String CLEAR_LOCAL_STORAGE = "clearLocalStorage"; String GET_LOCAL_STORAGE_SIZE = "getLocalStorageSize"; String GET_SESSION_STORAGE_ITEM = "getSessionStorageItem"; String GET_SESSION_STORAGE_KEYS = "getSessionStorageKey"; String SET_SESSION_STORAGE_ITEM = "setSessionStorageItem"; String REMOVE_SESSION_STORAGE_ITEM = "removeSessionStorageItem"; String CLEAR_SESSION_STORAGE = "clearSessionStorage"; String GET_SESSION_STORAGE_SIZE = "getSessionStorageSize"; String SET_SCREEN_ORIENTATION = "setScreenOrientation"; String GET_SCREEN_ORIENTATION = "getScreenOrientation"; String SET_SCREEN_ROTATION = "setScreenRotation"; String GET_SCREEN_ROTATION = "getScreenRotation"; // W3C Actions APIs String ACTIONS = "actions"; static CommandPayload ACTIONS(Collection<Sequence> actions) { return new CommandPayload(ACTIONS, ImmutableMap.of("actions", actions)); } String CLEAR_ACTIONS_STATE = "clearActionState"; // These belong to the Advanced user interactions - an element is // optional for these commands. String CLICK = "mouseClick"; String DOUBLE_CLICK = "mouseDoubleClick"; String MOUSE_DOWN = "mouseButtonDown"; String MOUSE_UP = "mouseButtonUp"; String MOVE_TO = "mouseMoveTo"; // Those allow interactions with the Input Methods installed on // the system. String IME_GET_AVAILABLE_ENGINES = "imeGetAvailableEngines"; String IME_GET_ACTIVE_ENGINE = "imeGetActiveEngine"; String IME_IS_ACTIVATED = "imeIsActivated"; String IME_DEACTIVATE = "imeDeactivate"; String IME_ACTIVATE_ENGINE = "imeActivateEngine"; static CommandPayload IME_ACTIVATE_ENGINE(String engine) { return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("engine", engine)); } // These belong to the Advanced Touch API String TOUCH_SINGLE_TAP = "touchSingleTap"; String TOUCH_DOWN = "touchDown"; String TOUCH_UP = "touchUp"; String TOUCH_MOVE = "touchMove"; String TOUCH_SCROLL = "touchScroll"; String TOUCH_DOUBLE_TAP = "touchDoubleTap"; String TOUCH_LONG_PRESS = "touchLongPress"; String TOUCH_FLICK = "touchFlick"; // Window API String SET_CURRENT_WINDOW_POSITION = "setWindowPosition"; static CommandPayload SET_CURRENT_WINDOW_POSITION(Point targetPosition) { return new CommandPayload( SET_CURRENT_WINDOW_POSITION, ImmutableMap.of("x", targetPosition.x, "y", targetPosition.y)); } String GET_CURRENT_WINDOW_POSITION = "getWindowPosition"; static CommandPayload GET_CURRENT_WINDOW_POSITION() { return new CommandPayload( GET_CURRENT_WINDOW_POSITION, ImmutableMap.of("windowHandle", "current")); } // W3C compatible Window API String SET_CURRENT_WINDOW_SIZE = "setCurrentWindowSize"; static CommandPayload SET_CURRENT_WINDOW_SIZE(Dimension targetSize) { return new CommandPayload( SET_CURRENT_WINDOW_SIZE, ImmutableMap.of("width", targetSize.width, "height", targetSize.height)); } String GET_CURRENT_WINDOW_SIZE = "getCurrentWindowSize"; String MAXIMIZE_CURRENT_WINDOW = "maximizeCurrentWindow"; String FULLSCREEN_CURRENT_WINDOW = "fullscreenCurrentWindow"; // Logging API String GET_AVAILABLE_LOG_TYPES = "getAvailableLogTypes"; String GET_LOG = "getLog"; String GET_SESSION_LOGS = "getSessionLogs"; // Mobile API String GET_NETWORK_CONNECTION = "getNetworkConnection"; String SET_NETWORK_CONNECTION = "setNetworkConnection"; }
1
16,690
These command names are specific to Chromium-based browsers. Please move to `ChromiumDriverCommand`
SeleniumHQ-selenium
java
@@ -234,7 +234,8 @@ func CheckIfBDBelongsToNode(nodeName string) Validate { func (bd *BlockDevice) CheckIfBDBelongsToNode(nodeName string) error { if !bd.IsBelongToNode(nodeName) { return errors.Errorf( - "block device doesn't belongs to node %s", + "block device %s doesn't belongs to node %s", + bd.Object.Name, bd.Object.Spec.NodeAttributes.NodeName, ) }
1
/* Copyright 2019 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha2 import ( ndm "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "github.com/pkg/errors" ) //TODO: While using these packages UnitTest //must be written to corresponding function // BlockDeviceState is label for block device states type BlockDeviceState string const ( // BlockDeviceStateActive is active state of the block device BlockDeviceStateActive BlockDeviceState = "Active" ) // DefaultBlockDeviceCount is a map containing the // default block device count of various raid types. var DefaultBlockDeviceCount = map[string]int{ string(apis.PoolTypeMirroredCPV): int(apis.MirroredBlockDeviceCountCPV), string(apis.PoolTypeStripedCPV): int(apis.StripedBlockDeviceCountCPV), string(apis.PoolTypeRaidzCPV): int(apis.RaidzBlockDeviceCountCPV), string(apis.PoolTypeRaidz2CPV): int(apis.Raidz2BlockDeviceCountCPV), } // BlockDevice encapsulates BlockDevice api object. type BlockDevice struct { // actual block device object Object *ndm.BlockDevice } // BlockDeviceList holds the list of BlockDevice api type BlockDeviceList struct { // list of blockdevices ObjectList *ndm.BlockDeviceList } // Predicate defines an abstraction to determine conditional checks against the // provided block device instance type Predicate func(*BlockDevice) bool // predicateList holds the list of Predicates type predicateList []Predicate // all returns true if all the predicates succeed against the provided block // device instance. func (l predicateList) all(c *BlockDevice) bool { for _, pred := range l { if !pred(c) { return false } } return true } // Validate defines an abstraction to determine conditional check against the // provided block device type Validate func(*BlockDevice) error // HasAnnotation is predicate to filter out based on // annotation in BDC instances func HasAnnotation(key, value string) Predicate { return func(bd *BlockDevice) bool { return bd.HasAnnotation(key, value) } } // HasAnnotation return true if provided annotation // key and value are present in the the provided block device List // instance func (bd *BlockDevice) HasAnnotation(key, value string) bool { val, ok := bd.Object.GetAnnotations()[key] if ok { return val == value } return false } // IsSparse filters the block device based on type of the disk func IsSparse() Predicate { return func(bd *BlockDevice) bool { return bd.IsSparse() } } // IsSparse returns true if the block device is of sparse type func (bd *BlockDevice) IsSparse() bool { return bd.Object.Spec.Details.DeviceType == string(apis.TypeBlockDeviceCPV) } // IsActive filters the block device based on the active status func IsActive() Predicate { return func(bd *BlockDevice) bool { return bd.IsActive() } } // IsActive returns true if the block device is active. func (bd *BlockDevice) IsActive() bool { return bd.Object.Status.State == string(BlockDeviceStateActive) } // IsUnclaimed filters the block device based on unclaimed status func IsUnclaimed() Predicate { return func(bd *BlockDevice) bool { return bd.IsUnclaimed() } } // IsUnclaimed returns true if the block device is unclaimed func (bd *BlockDevice) IsUnclaimed() bool { return bd.Object.Status.ClaimState == ndm.BlockDeviceUnclaimed } // IsClaimed filters the block deive based on claimed status func IsClaimed() Predicate { return func(bd *BlockDevice) bool { return bd.IsClaimed() } } // IsClaimed returns true if the block device is claimed func (bd *BlockDevice) IsClaimed() bool { return bd.Object.Status.ClaimState == ndm.BlockDeviceClaimed } // HasFileSystem returns true if the block device has filesystem func (bd *BlockDevice) HasFileSystem() bool { return bd.Object.Spec.FileSystem.Type != "" } // IsUsable filters the block device based on usage of disk func IsUsable(usedBlockDevices map[string]int) Predicate { return func(bd *BlockDevice) bool { return bd.IsUsable(usedBlockDevices) } } // IsUsable returns true if this block device // can be used for pool provisioning. // The argument usedBlockDevice is a map containing // key as block device cr name and value as integer. // If the value of map is greater than 0 , // then this corresponding block device is not usable. func (bd *BlockDevice) IsUsable(usedBD map[string]int) bool { return usedBD[bd.Object.Name] == 0 } // IsUsableNode filters the block device based on usage of node func IsUsableNode(usedNodes map[string]bool) Predicate { return func(bd *BlockDevice) bool { return bd.IsUsableNode(usedNodes) } } // IsUsableNode returns true if block device of this node can be used // for pool provisioning. The argument usedNodes is a map containing // key as node name and value as bool. If the value of map is greater // than false, then this corresponding node is not usable. func (bd *BlockDevice) IsUsableNode(usedNodes map[string]bool) bool { return !usedNodes[bd.GetNodeName()] } // IsBelongToNode returns true if the block device belongs to the provided node. func IsBelongToNode(nodeName string) Predicate { return func(bd *BlockDevice) bool { return bd.IsBelongToNode(nodeName) } } // IsBelongToNode returns true if the block device belongs to the provided node. func (bd *BlockDevice) IsBelongToNode(nodeName string) bool { return bd.GetNodeName() == nodeName } // IsValidPoolTopology returns true if the block device count // is multiples of default block device count of various raid types func IsValidPoolTopology(poolType string, bdCount int) bool { return DefaultBlockDeviceCount[poolType]%bdCount == 0 } // GetNodeName returns the node name to which the block device is attached func (bd *BlockDevice) GetNodeName() string { return bd.Object.Spec.NodeAttributes.NodeName } // CheckIfBDIsActive validates the block device based on status func CheckIfBDIsActive() Validate { return func(bd *BlockDevice) error { return bd.CheckIfBDIsActive() } } // CheckIfBDIsActive returns error only when block device presents in other than // active state or else return nil func (bd *BlockDevice) CheckIfBDIsActive() error { if !bd.IsActive() { return errors.Errorf( "block device is in not in active state", ) } return nil } // CheckIfBDBelongsToNode validates the block device based on nodeName provided // via argument func CheckIfBDBelongsToNode(nodeName string) Validate { return func(bd *BlockDevice) error { return bd.CheckIfBDBelongsToNode(nodeName) } } // CheckIfBDBelongsToNode returns error only when the block device node name // doesn't matched with provided node name func (bd *BlockDevice) CheckIfBDBelongsToNode(nodeName string) error { if !bd.IsBelongToNode(nodeName) { return errors.Errorf( "block device doesn't belongs to node %s", bd.Object.Spec.NodeAttributes.NodeName, ) } return nil } // CheckIfBDIsNonFsType validates the block device based on filesystem type func CheckIfBDIsNonFsType() Validate { return func(bd *BlockDevice) error { return bd.CheckIfBDIsNonFsType() } } // CheckIfBDIsNonFsType return error only when the block device has filesystem func (bd *BlockDevice) CheckIfBDIsNonFsType() error { if bd.HasFileSystem() { return errors.Errorf("block device has file system {%s}", bd.Object.Spec.FileSystem.Type, ) } return nil } // ValidateBlockDevice validates the block device based on the arguments // provided and returns error if validation fails func (bd *BlockDevice) ValidateBlockDevice(v ...Validate) error { var err error for _, validate := range v { if err = validate(bd); err != nil { return errors.Wrapf( err, "block device %s validation failed", bd.Object.Name, ) } } return nil } // Filter will filter the block device instances if all the predicates succeed // against that block device. func (l *BlockDeviceList) Filter(p ...Predicate) *BlockDeviceList { var plist predicateList plist = append(plist, p...) if len(plist) == 0 { return l } filtered := NewListBuilder().List() for _, bdAPI := range l.ObjectList.Items { bdAPI := bdAPI // pin it BlockDevice := BuilderForAPIObject(&bdAPI).BlockDevice if plist.all(BlockDevice) { filtered.ObjectList.Items = append( filtered.ObjectList.Items, *BlockDevice.Object) } } return filtered } // GetDeviceID returns the device link of the block device. // If device link is not found it returns device path. // For a cstor pool creation -- this link or path is used. // For convenience, we call it as device ID. // Hence, device ID can either be a device link or device path // depending on what was available in block device cr. func (bd *BlockDevice) GetDeviceID() string { deviceID := bd.GetLink() if deviceID != "" { return deviceID } return bd.GetPath() } // GetLink returns the link of the block device // if present else return empty string func (bd *BlockDevice) GetLink() string { if len(bd.Object.Spec.DevLinks) != 0 && len(bd.Object.Spec.DevLinks[0].Links) != 0 { return bd.Object.Spec.DevLinks[0].Links[0] } return "" } // GetPath returns path of the block device func (bd *BlockDevice) GetPath() string { return bd.Object.Spec.Path } // Len returns the length og BlockDeviceList. func (l *BlockDeviceList) Len() int { return len(l.ObjectList.Items) }
1
17,938
lets print nodeName that got passed also
openebs-maya
go
@@ -38,6 +38,9 @@ try: except NameError: pass +if sys.version > '3': + long = int + class WebElement(object): """Represents a DOM element.
1
#!/usr/bin/python # # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import hashlib import os import zipfile try: from StringIO import StringIO as IOStream except ImportError: # 3+ from io import BytesIO as IOStream import base64 from .command import Command from selenium.common.exceptions import WebDriverException from selenium.common.exceptions import InvalidSelectorException from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys try: str = basestring except NameError: pass class WebElement(object): """Represents a DOM element. Generally, all interesting operations that interact with a document will be performed through this interface. All method calls will do a freshness check to ensure that the element reference is still valid. This essentially determines whether or not the element is still attached to the DOM. If this test fails, then an ``StaleElementReferenceException`` is thrown, and all future calls to this instance will fail.""" def __init__(self, parent, id_): self._parent = parent self._id = id_ @property def tag_name(self): """This element's ``tagName`` property.""" return self._execute(Command.GET_ELEMENT_TAG_NAME)['value'] @property def text(self): """The text of the element.""" return self._execute(Command.GET_ELEMENT_TEXT)['value'] def click(self): """Clicks the element.""" self._execute(Command.CLICK_ELEMENT) def submit(self): """Submits a form.""" self._execute(Command.SUBMIT_ELEMENT) def clear(self): """Clears the text if it's a text entry element.""" self._execute(Command.CLEAR_ELEMENT) def get_attribute(self, name): """Gets the given attribute or property of the element. This method will first try to return the value of a property with the given name. If a property with that name doesn't exist, it returns the value of the attribute with the same name. If there's no attribute with that name, ``None`` is returned. Values which are considered truthy, that is equals "true" or "false", are returned as booleans. All other non-``None`` values are returned as strings. For attributes or properties which do not exist, ``None`` is returned. :Args: - name - Name of the attribute/property to retrieve. Example:: # Check if the "active" CSS class is applied to an element. is_active = "active" in target_element.get_attribute("class") """ resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name}) attributeValue = '' if resp['value'] is None: attributeValue = None else: attributeValue = resp['value'] if name != 'value' and attributeValue.lower() in ('true', 'false'): attributeValue = attributeValue.lower() return attributeValue def is_selected(self): """Returns whether the element is selected. Can be used to check if a checkbox or radio button is selected. """ return self._execute(Command.IS_ELEMENT_SELECTED)['value'] def is_enabled(self): """Returns whether the element is enabled.""" return self._execute(Command.IS_ELEMENT_ENABLED)['value'] def find_element_by_id(self, id_): """Finds element within this element's children by ID. :Args: - id_ - ID of child element to locate. """ return self.find_element(by=By.ID, value=id_) def find_elements_by_id(self, id_): """Finds a list of elements within this element's children by ID. :Args: - id_ - Id of child element to find. """ return self.find_elements(by=By.ID, value=id_) def find_element_by_name(self, name): """Finds element within this element's children by name. :Args: - name - name property of the element to find. """ return self.find_element(by=By.NAME, value=name) def find_elements_by_name(self, name): """Finds a list of elements within this element's children by name. :Args: - name - name property to search for. """ return self.find_elements(by=By.NAME, value=name) def find_element_by_link_text(self, link_text): """Finds element within this element's children by visible link text. :Args: - link_text - Link text string to search for. """ return self.find_element(by=By.LINK_TEXT, value=link_text) def find_elements_by_link_text(self, link_text): """Finds a list of elements within this element's children by visible link text. :Args: - link_text - Link text string to search for. """ return self.find_elements(by=By.LINK_TEXT, value=link_text) def find_element_by_partial_link_text(self, link_text): """Finds element within this element's children by partially visible link text. :Args: - link_text - Link text string to search for. """ return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_elements_by_partial_link_text(self, link_text): """Finds a list of elements within this element's children by link text. :Args: - link_text - Link text string to search for. """ return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_element_by_tag_name(self, name): """Finds element within this element's children by tag name. :Args: - name - name of html tag (eg: h1, a, span) """ return self.find_element(by=By.TAG_NAME, value=name) def find_elements_by_tag_name(self, name): """Finds a list of elements within this element's children by tag name. :Args: - name - name of html tag (eg: h1, a, span) """ return self.find_elements(by=By.TAG_NAME, value=name) def find_element_by_xpath(self, xpath): """Finds element by xpath. :Args: xpath - xpath of element to locate. "//input[@class='myelement']" Note: The base path will be relative to this element's location. This will select the first link under this element. :: myelement.find_elements_by_xpath(".//a") However, this will select the first link on the page. :: myelement.find_elements_by_xpath("//a") """ return self.find_element(by=By.XPATH, value=xpath) def find_elements_by_xpath(self, xpath): """Finds elements within the element by xpath. :Args: - xpath - xpath locator string. Note: The base path will be relative to this element's location. This will select all links under this element. :: myelement.find_elements_by_xpath(".//a") However, this will select all links in the page itself. :: myelement.find_elements_by_xpath("//a") """ return self.find_elements(by=By.XPATH, value=xpath) def find_element_by_class_name(self, name): """Finds element within this element's children by class name. :Args: - name - class name to search for. """ return self.find_element(by=By.CLASS_NAME, value=name) def find_elements_by_class_name(self, name): """Finds a list of elements within this element's children by class name. :Args: - name - class name to search for. """ return self.find_elements(by=By.CLASS_NAME, value=name) def find_element_by_css_selector(self, css_selector): """Finds element within this element's children by CSS selector. :Args: - css_selector - CSS selctor string, ex: 'a.nav#home' """ return self.find_element(by=By.CSS_SELECTOR, value=css_selector) def find_elements_by_css_selector(self, css_selector): """Finds a list of elements within this element's children by CSS selector. :Args: - css_selector - CSS selctor string, ex: 'a.nav#home' """ return self.find_elements(by=By.CSS_SELECTOR, value=css_selector) def send_keys(self, *value): """Simulates typing into the element. :Args: - value - A string for typing, or setting form fields. For setting file inputs, this could be a local file path. Use this to send simple key events or to fill out form fields:: form_textfield = driver.find_element_by_name('username') form_textfield.send_keys("admin") This can also be used to set file inputs. :: file_input = driver.find_element_by_name('profilePic') file_input.send_keys("path/to/profilepic.gif") # Generally it's better to wrap the file path in one of the methods # in os.path to return the actual path to support cross OS testing. # file_input.send_keys(os.path.abspath("path/to/profilepic.gif")) """ # transfer file to another machine only if remote driver is used # the same behaviour as for java binding if self.parent._is_remote: local_file = self.parent.file_detector.is_local_file(*value) if local_file is not None: value = self._upload(local_file) typing = [] for val in value: if isinstance(val, Keys): typing.append(val) elif isinstance(val, int): val = val.__str__() for i in range(len(val)): typing.append(val[i]) else: for i in range(len(val)): typing.append(val[i]) self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing}) # RenderedWebElement Items def is_displayed(self): """Whether the element is visible to a user.""" return self._execute(Command.IS_ELEMENT_DISPLAYED)['value'] @property def location_once_scrolled_into_view(self): """THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover where on the screen an element is so that we can click it. This method should cause the element to be scrolled into view. Returns the top lefthand corner location on the screen, or ``None`` if the element is not visible. """ return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value'] @property def size(self): """The size of the element.""" size = self._execute(Command.GET_ELEMENT_SIZE)['value'] new_size = {} new_size["height"] = size["height"] new_size["width"] = size["width"] return new_size def value_of_css_property(self, property_name): """The value of a CSS property.""" return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY, {'propertyName': property_name})['value'] @property def location(self): """The location of the element in the renderable canvas.""" old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value'] new_loc = {"x": old_loc['x'], "y": old_loc['y']} return new_loc @property def rect(self): """A dictionary with the size and location of the element.""" return self._execute(Command.GET_ELEMENT_RECT)['value'] @property def parent(self): """Internal reference to the WebDriver instance this element was found from.""" return self._parent @property def id(self): """Internal ID used by selenium. This is mainly for internal use. Simple use cases such as checking if 2 webelements refer to the same element, can be done using ``==``:: if element1 == element2: print("These 2 are equal") """ return self._id def __eq__(self, element): if self._id == element.id: return True else: return self._execute(Command.ELEMENT_EQUALS, {'other': element.id})['value'] # Private Methods def _execute(self, command, params=None): """Executes a command against the underlying HTML element. Args: command: The name of the command to _execute as a string. params: A dictionary of named parameters to send with the command. Returns: The command's JSON response loaded into a dictionary object. """ if not params: params = {} params['id'] = self._id return self._parent.execute(command, params) def find_element(self, by=By.ID, value=None): if not By.is_valid(by) or not isinstance(value, str): raise InvalidSelectorException("Invalid locator values passed in") return self._execute(Command.FIND_CHILD_ELEMENT, {"using": by, "value": value})['value'] def find_elements(self, by=By.ID, value=None): if not By.is_valid(by) or not isinstance(value, str): raise InvalidSelectorException("Invalid locator values passed in") return self._execute(Command.FIND_CHILD_ELEMENTS, {"using": by, "value": value})['value'] def __hash__(self): return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16) def _upload(self, filename): fp = IOStream() zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED) zipped.write(filename, os.path.split(filename)[1]) zipped.close() content = base64.encodestring(fp.getvalue()) if not isinstance(content, str): content = content.decode('utf-8') try: return self._execute(Command.UPLOAD_FILE, {'file': content})['value'] except WebDriverException as e: if "Unrecognized command: POST" in e.__str__(): return filename elif "Command not found: POST " in e.__str__(): return filename elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__(): return filename else: raise e
1
11,987
Should be `if sys.version_info[0] > 2:`
SeleniumHQ-selenium
java
@@ -1254,6 +1254,7 @@ int main(int argc, char *argv[]) random_init(); debug_config(argv[0]); + debug_config_file_size(string_metric_parse("0"));//to set debug file size to "don't delete anything" s = getenv("MAKEFLOW_BATCH_QUEUE_TYPE"); if(s) {
1
/* Copyright (C) 2008- The University of Notre Dame This software is distributed under the GNU General Public License. See the file COPYING for details. */ #include "auth_all.h" #include "auth_ticket.h" #include "batch_job.h" #include "cctools.h" #include "copy_stream.h" #include "create_dir.h" #include "debug.h" #include "getopt_aux.h" #include "hash_table.h" #include "int_sizes.h" #include "itable.h" #include "link.h" #include "list.h" #include "load_average.h" #include "macros.h" #include "path.h" #include "random.h" #include "rmonitor.h" #include "stringtools.h" #include "work_queue.h" #include "work_queue_catalog.h" #include "xxmalloc.h" #include "jx.h" #include "jx_match.h" #include "jx_parse.h" #include "jx_getopt.h" #include "create_dir.h" #include "sha1.h" #include "dag.h" #include "dag_node.h" #include "dag_node_footprint.h" #include "dag_visitors.h" #include "parser.h" #include "parser_jx.h" #include "makeflow_summary.h" #include "makeflow_gc.h" #include "makeflow_log.h" #include "makeflow_wrapper.h" #include "makeflow_wrapper_umbrella.h" #include "makeflow_mounts.h" #include "makeflow_wrapper_enforcement.h" #include "makeflow_archive.h" #include "makeflow_catalog_reporter.h" #include "makeflow_local_resources.h" #include "makeflow_hook.h" #include <fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <libgen.h> #include <assert.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> /* Code organization notes: - The modules dag/dag_node/dag_file etc contain the data structures that represent the dag structure by itself. Functions named dag_*() create and manipulate those data structures, but do not execute the dag itself. These are shared between makeflow and other tools that read and manipulate the dag, like makeflow_viz, makeflow_linker, and so forth. - The modules makeflow/makeflow_log/makeflow_gc etc contain the functions that execute the dag by invoking batch operations, processing the log, etc. These are all functions named makeflow_*() to distinguish them from dag_*(). - The separation between dag structure and execution state is imperfect, because some of the execution state (note states, node counts, etc) is stored in struct dag and struct dag_node. Perhaps this can be improved. - All operations on files should use the batch_fs_*() functions, rather than invoking Unix I/O directly. This is because some batch systems (Hadoop, Confuga, etc) also include the storage where the files to be accessed are located. - APIs like work_queue_* should be indirectly accessed by setting options in Batch Job using batch_queue_set_option. See batch_job_work_queue.c for an example. */ #define MAX_REMOTE_JOBS_DEFAULT 100 static sig_atomic_t makeflow_abort_flag = 0; static int makeflow_failed_flag = 1; // Makeflow fails by default. This is changed at dag start to indicate correct start. static int makeflow_submit_timeout = 3600; static int makeflow_retry_flag = 0; static int makeflow_retry_max = 5; /* makeflow_gc_method indicates the type of garbage collection * indicated by the user. Refer to makeflow_gc.h for specifics */ static makeflow_gc_method_t makeflow_gc_method = MAKEFLOW_GC_NONE; /* Disk size at which point GC is run */ static uint64_t makeflow_gc_size = 0; /* # of files after which GC is run */ static int makeflow_gc_count = -1; /* Iterations of wait loop prior ot GC check */ static int makeflow_gc_barrier = 1; /* Determines next gc_barrier to make checks less frequent with large number of tasks */ static double makeflow_gc_task_ratio = 0.05; static batch_queue_type_t batch_queue_type = BATCH_QUEUE_TYPE_LOCAL; static struct batch_queue *local_queue = 0; static struct batch_queue *remote_queue = 0; struct batch_queue * makeflow_get_remote_queue(){ return remote_queue; } struct batch_queue * makeflow_get_local_queue(){ return local_queue; } struct batch_queue * makeflow_get_queue(struct dag_node *n){ if(n->local_job && local_queue) { return local_queue; } else { return remote_queue; } } static struct rmsummary *local_resources = 0; static int local_jobs_max = 1; static int remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT; static char *project = NULL; static int port = 0; static int output_len_check = 0; static int skip_file_check = 0; static int cache_mode = 1; static char *parrot_path = "./parrot_run"; /* Wait upto this many seconds for an output file of a succesfull task to appear on the local filesystem (e.g, to deal with NFS semantics. */ static int file_creation_patience_wait_time = 0; /* Write a verbose transaction log with SYMBOL tags. SYMBOLs are category labels (SYMBOLs should be deprecated once weaver/pbui tools are updated.) */ static int log_verbose_mode = 0; static struct makeflow_wrapper *wrapper = 0; static struct makeflow_wrapper *enforcer = 0; static struct makeflow_wrapper_umbrella *umbrella = 0; static int catalog_reporting_on = 0; static char *mountfile = NULL; static char *mount_cache = NULL; static int use_mountfile = 0; static int should_send_all_local_environment = 0; static struct list *shared_fs_list = NULL; static int did_find_archived_job = 0; /* Determines if this is a local job that will consume local resources, regardless of the batch queue type. */ static int is_local_job( struct dag_node *n ) { return n->local_job || batch_queue_type==BATCH_QUEUE_TYPE_LOCAL; } /* Generates file list for node based on node files, wrapper input files, and monitor input files. Relies on %% nodeid replacement for monitor file names. */ void makeflow_generate_files( struct dag_node *n, struct batch_task *task ) { if(wrapper) makeflow_wrapper_generate_files(task, wrapper->input_files, wrapper->output_files, n, wrapper); if(enforcer) makeflow_wrapper_generate_files(task, enforcer->input_files, enforcer->output_files, n, enforcer); if(umbrella) makeflow_wrapper_generate_files(task, umbrella->wrapper->input_files, umbrella->wrapper->output_files, n, umbrella->wrapper); } /* Expand a dag_node into a text list of input files, output files, and a command, by applying all wrappers and settings. Used at both job submission and completion to obtain identical strings. */ static void makeflow_node_expand( struct dag_node *n, struct batch_queue *queue, struct batch_task *task ) { makeflow_generate_files(n, task); /* Expand the command according to each of the wrappers */ makeflow_wrap_wrapper(task, n, wrapper); makeflow_wrap_enforcer(task, n, enforcer); makeflow_wrap_umbrella(task, n, umbrella, queue); } /* Abort one job in a given batch queue. */ static void makeflow_abort_job( struct dag *d, struct dag_node *n, struct batch_queue *q, UINT64_T jobid, const char *name ) { printf("aborting %s job %" PRIu64 "\n", name, jobid); batch_job_remove(q, jobid); makeflow_hook_node_abort(n); makeflow_log_state_change(d, n, DAG_NODE_STATE_ABORTED); struct batch_file *bf; struct dag_file *df; /* Create generic task if one does not exist. This occurs in log recovery. */ if(!n->task){ n->task = dag_node_to_batch_task(n, makeflow_get_queue(n), should_send_all_local_environment); /* This augments the task struct, should be replaced with hook in future. */ makeflow_node_expand(n, q, n->task); } /* Clean all files associated with task, includes node and hook files. */ list_first_item(n->task->output_files); while((bf = list_next_item(n->task->output_files))){ df = dag_file_lookup_or_create(d, bf->outer_name); makeflow_clean_file(d, q, df); } makeflow_clean_node(d, q, n); } /* Abort the dag by removing all batch jobs from all queues. */ static void makeflow_abort_all(struct dag *d) { UINT64_T jobid; struct dag_node *n; printf("got abort signal...\n"); itable_firstkey(d->local_job_table); while(itable_nextkey(d->local_job_table, &jobid, (void **) &n)) { makeflow_abort_job(d,n,local_queue,jobid,"local"); } itable_firstkey(d->remote_job_table); while(itable_nextkey(d->remote_job_table, &jobid, (void **) &n)) { makeflow_abort_job(d,n,remote_queue,jobid,"remote"); } } static void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n); /* Decide whether to rerun a node based on batch and file system status. The silent option was added for to prevent confusing debug output when in clean mode. When clean_mode is not NONE we silence the node reseting output. */ void makeflow_node_decide_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n, int silent) { struct dag_file *f; if(itable_lookup(rerun_table, n->nodeid)) return; // Below are a bunch of situations when a node has to be rerun. // If a job was submitted to Condor, then just reconnect to it. if(n->state == DAG_NODE_STATE_RUNNING && !(n->local_job && local_queue) && batch_queue_type == BATCH_QUEUE_TYPE_CONDOR) { // Reconnect the Condor jobs if(!silent) fprintf(stderr, "rule still running: %s\n", n->command); itable_insert(d->remote_job_table, n->jobid, n); // Otherwise, we cannot reconnect to the job, so rerun it } else if(n->state == DAG_NODE_STATE_RUNNING || n->state == DAG_NODE_STATE_FAILED || n->state == DAG_NODE_STATE_ABORTED) { if(!silent) fprintf(stderr, "will retry failed rule: %s\n", n->command); goto rerun; } // Rerun if an input file has been updated since the last execution. list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(dag_file_should_exist(f)) { continue; } else { if(!f->created_by) { if(!silent) fprintf(stderr, "makeflow: input file %s does not exist and is not created by any rule.\n", f->filename); exit(1); } else { /* If input file is missing, but node completed and file was garbage, then avoid rerunning. */ if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE) { continue; } goto rerun; } } } // Rerun if an output file is missing. list_first_item(n->target_files); while((f = list_next_item(n->target_files))) { if(dag_file_should_exist(f)) continue; /* If output file is missing, but node completed and file was gc'ed, then avoid rerunning. */ if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE) continue; goto rerun; } // Do not rerun this node return; rerun: makeflow_node_force_rerun(rerun_table, d, n); } /* Reset all state to cause a node to be re-run. */ void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n) { struct dag_node *p; struct batch_file *bf; struct dag_file *f1; struct dag_file *f2; int child_node_found; if(itable_lookup(rerun_table, n->nodeid)) return; // Mark this node as having been rerun already itable_insert(rerun_table, n->nodeid, n); // Remove running batch jobs if(n->state == DAG_NODE_STATE_RUNNING) { if(n->local_job && local_queue) { batch_job_remove(local_queue, n->jobid); itable_remove(d->local_job_table, n->jobid); } else { batch_job_remove(remote_queue, n->jobid); itable_remove(d->remote_job_table, n->jobid); } } if(!n->task){ n->task = dag_node_to_batch_task(n, makeflow_get_queue(n), should_send_all_local_environment); /* This augments the task struct, should be replaced with hook in future. */ makeflow_node_expand(n, makeflow_get_queue(n), n->task); } // Clean up things associated with this node list_first_item(n->task->output_files); while((bf = list_next_item(n->task->output_files))) { f1 = dag_file_lookup_or_create(d, bf->outer_name); makeflow_clean_file(d, remote_queue, f1); } makeflow_clean_node(d, remote_queue, n); makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); // For each parent node, rerun it if input file was garbage collected list_first_item(n->source_files); while((f1 = list_next_item(n->source_files))) { if(dag_file_should_exist(f1)) continue; p = f1->created_by; if(p) { makeflow_node_force_rerun(rerun_table, d, p); f1->reference_count += 1; } } // For each child node, rerun it list_first_item(n->target_files); while((f1 = list_next_item(n->target_files))) { for(p = d->nodes; p; p = p->next) { child_node_found = 0; list_first_item(p->source_files); while((f2 = list_next_item(n->source_files))) { if(!strcmp(f1->filename, f2->filename)) { child_node_found = 1; break; } } if(child_node_found) { makeflow_node_force_rerun(rerun_table, d, p); } } } } /* Update nested jobs with appropriate number of local jobs (total local jobs max / maximum number of concurrent nests). */ static void makeflow_prepare_nested_jobs(struct dag *d) { int dag_nested_width = dag_width(d, 1); int update_dag_nests = 1; char *s = getenv("MAKEFLOW_UPDATE_NESTED_JOBS"); if(s) update_dag_nests = atoi(s); if(dag_nested_width > 0 && update_dag_nests) { dag_nested_width = MIN(dag_nested_width, local_jobs_max); struct dag_node *n; for(n = d->nodes; n; n = n->next) { if(n->nested_job && ((n->local_job && local_queue) || batch_queue_type == BATCH_QUEUE_TYPE_LOCAL)) { char *command = xxmalloc(strlen(n->command) + 20); sprintf(command, "%s -j %d", n->command, local_jobs_max / dag_nested_width); free((char *) n->command); n->command = command; } } } } /* Match a filename (/home/fred) to a path stem (/home). Returns 0 on match, non-zero otherwise. */ static int prefix_match(void *stem, const void *filename) { assert(stem); assert(filename); return strncmp(stem, filename, strlen(stem)); } /* Returns true if the given filename is located in a shared filesystem, as given by the shared_fs_list. */ static int makeflow_file_on_sharedfs( const char *filename ) { return !list_iterate(shared_fs_list,prefix_match,filename); } /* Submit one fully formed job, retrying failures up to the makeflow_submit_timeout. This is necessary because busy batch systems occasionally do not accept a job submission. */ static batch_job_id_t makeflow_node_submit_retry( struct batch_queue *queue, struct batch_task *task) { time_t stoptime = time(0) + makeflow_submit_timeout; int waittime = 1; batch_job_id_t jobid = 0; /* Display the fully elaborated command, just like Make does. */ printf("submitting job: %s\n", task->command); makeflow_hook_batch_submit(task); while(1) { if(makeflow_abort_flag) break; /* This will eventually be replaced by submit (queue, task )... */ jobid = batch_job_submit(queue, task->command, batch_files_to_string(queue, task->input_files), batch_files_to_string(queue, task->output_files), task->envlist, task->resources); if(jobid >= 0) { printf("submitted job %"PRIbjid"\n", jobid); task->jobid = jobid; return jobid; } fprintf(stderr, "couldn't submit batch job, still trying...\n"); if(makeflow_abort_flag) break; if(time(0) > stoptime) { fprintf(stderr, "unable to submit job after %d seconds!\n", makeflow_submit_timeout); break; } sleep(waittime); waittime *= 2; if(waittime > 60) waittime = 60; } return 0; } /* Submit a node to the appropriate batch system, after materializing the necessary list of input and output files, and applying all wrappers and options. */ static void makeflow_node_submit(struct dag *d, struct dag_node *n, const struct rmsummary *resources) { struct batch_queue *queue = makeflow_get_queue(n); /* Before setting the batch job options (stored in the "BATCH_OPTIONS" * variable), we must save the previous global queue value, and then * restore it after we submit. */ struct dag_variable_lookup_set s = { d, n->category, n, NULL }; char *batch_options = dag_variable_lookup_string("BATCH_OPTIONS", &s); char *previous_batch_options = NULL; if(batch_queue_get_option(queue, "batch-options")) previous_batch_options = xxstrdup(batch_queue_get_option(queue, "batch-options")); if(batch_options) { debug(D_MAKEFLOW_RUN, "Batch options: %s\n", batch_options); batch_queue_set_option(queue, "batch-options", batch_options); free(batch_options); } /* Create task from node information */ struct batch_task *task = dag_node_to_batch_task(n, queue, should_send_all_local_environment); batch_queue_set_int_option(queue, "task-id", task->taskid); /* This augments the task struct, should be replaced with node_submit in future. */ makeflow_node_expand(n, queue, task); makeflow_hook_node_submit(n, task); /* Logs the expectation of output files. */ makeflow_log_batch_file_list_state_change(d,task->output_files,DAG_FILE_STATE_EXPECT); /* check archiving directory to see if node has already been preserved */ /* This does not jive with task yet. Discussion needed on what the goal of archive is (node level or task level). */ if (d->should_read_archive && makeflow_archive_is_preserved(d, n, task->command, n->source_files, n->target_files)){ printf("node %d already exists in archive, replicating output files\n", n->nodeid); /* copy archived files to working directory and update state for node and dag_files */ makeflow_archive_copy_preserved_files(d, n, n->target_files); n->state = DAG_NODE_STATE_RUNNING; makeflow_log_batch_file_list_state_change(d,task->output_files, DAG_FILE_STATE_EXISTS); makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE); did_find_archived_job = 1; } else { /* Now submit the actual job, retrying failures as needed. */ n->jobid = makeflow_node_submit_retry(queue, task); /* Update all of the necessary data structures. */ if(n->jobid >= 0) { /* Not sure if this is necessary/what it does. */ memcpy(n->resources_allocated, task->resources, sizeof(struct rmsummary)); makeflow_log_state_change(d, n, DAG_NODE_STATE_RUNNING); n->task = task; if(is_local_job(n)) { makeflow_local_resources_subtract(local_resources,n); } if(n->local_job && local_queue) { itable_insert(d->local_job_table, n->jobid, n); } else { itable_insert(d->remote_job_table, n->jobid, n); } } else { makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED); batch_task_delete(task); makeflow_failed_flag = 1; } } /* Restore old batch job options. */ if(previous_batch_options) { batch_queue_set_option(queue, "batch-options", previous_batch_options); free(previous_batch_options); } } static int makeflow_node_ready(struct dag *d, struct dag_node *n, const struct rmsummary *resources) { struct dag_file *f; if(n->state != DAG_NODE_STATE_WAITING) return 0; if(is_local_job(n)) { if(!makeflow_local_resources_available(local_resources,resources)) return 0; } if(n->local_job && local_queue) { if(dag_local_jobs_running(d) >= local_jobs_max) return 0; } else { if(dag_remote_jobs_running(d) >= remote_jobs_max) return 0; } list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(dag_file_should_exist(f)) { continue; } else { return 0; } } /* If all makeflow checks pass for this node we will return the result of the hooks, which will be 1 if all pass and 0 if any fail. */ return (makeflow_hook_node_check(n, remote_queue) == MAKEFLOW_HOOK_SUCCESS); } int makeflow_nodes_local_waiting_count(const struct dag *d) { int count = 0; struct dag_node *n; for(n = d->nodes; n; n = n->next) { if(n->state == DAG_NODE_STATE_WAITING && is_local_job(n)) count++; } return count; } /* Find all jobs ready to be run, then submit them. */ static void makeflow_dispatch_ready_jobs(struct dag *d) { struct dag_node *n; for(n = d->nodes; n; n = n->next) { if(dag_remote_jobs_running(d) >= remote_jobs_max && dag_local_jobs_running(d) >= local_jobs_max) { break; } const struct rmsummary *resources = dag_node_dynamic_label(n); if(makeflow_node_ready(d, n, resources)) { makeflow_node_submit(d, n, resources); } } } /* Check the the indicated file was created and log, error, or retry as appropriate. */ int makeflow_node_check_file_was_created(struct dag *d, struct dag_node *n, struct dag_file *f) { struct stat buf; int file_created = 0; int64_t start_check = time(0); while(!file_created) { if(batch_fs_stat(remote_queue, f->filename, &buf) < 0) { fprintf(stderr, "%s did not create file %s\n", n->command, f->filename); } else if(output_len_check && buf.st_size <= 0) { debug(D_MAKEFLOW_RUN, "%s created a file of length %ld\n", n->command, (long) buf.st_size); } else { /* File was created and has length larger than zero. */ debug(D_MAKEFLOW_RUN, "File %s created by rule %d.\n", f->filename, n->nodeid); f->actual_size = buf.st_size; d->total_file_size += f->actual_size; makeflow_log_file_state_change(n->d, f, DAG_FILE_STATE_EXISTS); file_created = 1; break; } if(file_creation_patience_wait_time > 0 && time(0) - start_check < file_creation_patience_wait_time) { /* Failed to see the file. Sleep and try again. */ debug(D_MAKEFLOW_RUN, "Checking again for file %s.\n", f->filename); sleep(1); } else { /* Failed was not seen by makeflow in the aloted tries. */ debug(D_MAKEFLOW_RUN, "File %s was not created by rule %d.\n", f->filename, n->nodeid); file_created = 0; break; } } return file_created; } /* Mark the given task as completing, using the batch_job_info completion structure provided by batch_job. */ static void makeflow_node_complete(struct dag *d, struct dag_node *n, struct batch_queue *queue, struct batch_task *task) { struct batch_file *bf; struct dag_file *f; int job_failed = 0; /* As integration moves forward batch_task will also be passed. */ /* This is intended for changes to the batch_task that need no no context from dag_node/dag, such as shared_fs. */ makeflow_hook_batch_retrieve(task); if(n->state != DAG_NODE_STATE_RUNNING) return; if(is_local_job(n)) { makeflow_local_resources_add(local_resources,n); } makeflow_hook_node_end(n, task); if (task->info->exited_normally && task->info->exit_code == 0) { list_first_item(n->task->output_files); while ((bf = list_next_item(n->task->output_files))) { f = dag_file_lookup_or_create(d, bf->outer_name); if (!makeflow_node_check_file_was_created(d, n, f)) { job_failed = 1; } } } else { if(task->info->exited_normally) { fprintf(stderr, "%s failed with exit code %d\n", n->command, task->info->exit_code); } else { fprintf(stderr, "%s crashed with signal %d (%s)\n", n->command, task->info->exit_signal, strsignal(task->info->exit_signal)); } job_failed = 1; } if(job_failed) { /* As integration moves forward batch_task will also be passed. */ /* If a hook indicates failure here, it is not fatal, but will result in a failed task. */ int hook_success = makeflow_hook_node_fail(n, task); makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED); /* Clean files created in node. Clean existing and expected and record deletion. */ list_first_item(n->task->output_files); while((bf = list_next_item(n->task->output_files))) { f = dag_file_lookup_or_create(d, bf->outer_name); /* Either the file was created and not confirmed or a hook removed the file. */ if(f->state == DAG_FILE_STATE_EXPECT || f->state == DAG_FILE_STATE_DELETE) { makeflow_clean_file(d, remote_queue, f); } else { makeflow_clean_file(d, remote_queue, f); } } if(task->info->disk_allocation_exhausted) { fprintf(stderr, "\nrule %d failed because it exceeded its loop device allocation capacity.\n", n->nodeid); if(n->resources_measured) { rmsummary_print(stderr, n->resources_measured, /* pprint */ 0, /* extra fields */ NULL); fprintf(stderr, "\n"); } } if (!hook_success || makeflow_retry_flag || task->info->exit_code == 101) { n->failure_count++; if (n->failure_count > makeflow_retry_max) { notice(D_MAKEFLOW_RUN, "job %s failed too many times.", n->command); makeflow_failed_flag = 1; } else { notice(D_MAKEFLOW_RUN, "will retry failed job %s", n->command); makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING); } } else { makeflow_failed_flag = 1; } } else { /* Mark source files that have been used by this node */ list_first_item(task->input_files); while((bf = list_next_item(task->input_files))) { f = dag_file_lookup_or_create(d, bf->inner_name); f->reference_count+= -1; if(f->reference_count == 0 && f->state == DAG_FILE_STATE_EXISTS){ makeflow_log_file_state_change(d, f, DAG_FILE_STATE_COMPLETE); makeflow_hook_file_complete(f); } } /* store node into archiving directory */ if (d->should_write_to_archive) { printf("archiving node within archiving directory\n"); makeflow_archive_populate(d, n, task->command, n->source_files, n->target_files, task->info); } /* node_success is after file_complete to allow for the final state of the files to be reflected in the structs. Allows for cleanup or archiving.*/ makeflow_hook_node_success(n, task); makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE); } /* Clear TEMP files */ list_first_item(task->input_files); while((bf = list_next_item(task->input_files))) { f = dag_file_lookup_or_create(d, bf->inner_name); if(f->type == DAG_FILE_TYPE_TEMP){ makeflow_clean_file(d, makeflow_get_queue(n), f); } } } /* Check the dag for consistency, and emit errors if input dependencies, etc are missing. */ static int makeflow_check(struct dag *d) { struct stat buf; struct dag_node *n; struct dag_file *f; int error = 0; debug(D_MAKEFLOW_RUN, "checking rules for consistency...\n"); for(n = d->nodes; n; n = n->next) { list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if(f->created_by) { continue; } if(skip_file_check || batch_fs_stat(remote_queue, f->filename, &buf) >= 0) { continue; } if(f->source) { continue; } fprintf(stderr, "makeflow: %s does not exist, and is not created by any rule.\n", f->filename); error++; } } if(error) { fprintf(stderr, "makeflow: found %d errors during consistency check.\n", error); return 0; } else { return 1; } } /* Used to check that features used are supported by the batch system. This would be where we added checking of selected options to verify they are supported by the batch system, such as work_queue specific options. */ static int makeflow_check_batch_consistency(struct dag *d) { struct dag_node *n; struct dag_file *f; int error = 0; debug(D_MAKEFLOW_RUN, "checking for consistency of batch system support...\n"); for(n = d->nodes; n; n = n->next) { if(itable_size(n->remote_names) > 0 || (wrapper && wrapper->uses_remote_rename)){ if(n->local_job) { debug(D_ERROR, "Remote renaming is not supported with -Tlocal or LOCAL execution. Rule %d (line %d).\n", n->nodeid, n->linenum); error = 1; break; } else if (!batch_queue_supports_feature(remote_queue, "remote_rename")) { debug(D_ERROR, "Remote renaming is not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum); error = 1; break; } } if(!batch_queue_supports_feature(remote_queue, "absolute_path") && !n->local_job){ list_first_item(n->source_files); while((f = list_next_item(n->source_files)) && !error) { const char *remotename = dag_node_get_remote_name(n, f->filename); if (makeflow_file_on_sharedfs(f->filename)) { if (remotename) fatal("Remote renaming for %s is not supported on a shared filesystem", f->filename); continue; } if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) { debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum); error = 1; break; } } list_first_item(n->target_files); while((f = list_next_item(n->target_files)) && !error) { const char *remotename = dag_node_get_remote_name(n, f->filename); if (makeflow_file_on_sharedfs(f->filename)) { if (remotename) fatal("Remote renaming for %s is not supported on a shared filesystem", f->filename); continue; } if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) { debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum); error = 1; break; } } } } if(error) { return 0; } else { return 1; } } /* Main loop for running a makeflow: submit jobs, wait for completion, keep going until everything done. */ static void makeflow_run( struct dag *d ) { struct dag_node *n; batch_job_id_t jobid; struct batch_job_info info; // Start Catalog at current time timestamp_t start = timestamp_get(); // Last Report is created stall for first reporting. timestamp_t last_time = start - (60 * 1000 * 1000); //reporting to catalog if(catalog_reporting_on){ makeflow_catalog_summary(d, project, batch_queue_type, start); } while(!makeflow_abort_flag) { did_find_archived_job = 0; makeflow_dispatch_ready_jobs(d); /* We continue the loop under 4 conditions: 1. We have local jobs running 2. We have remote jobs running 3. We have archival jobs to be found (See Note) 4. We have cleaned completed jobs to ensure allocated jobs can run Note: Due to the fact that archived jobs are never "run", no local or remote jobs are added to the remote or local job table if all ready jobs were found within the archive. Thus makeflow_dispatch_ready_jobs must run at least once more if an archived job was found. */ if(dag_local_jobs_running(d)==0 && dag_remote_jobs_running(d)==0 && (makeflow_hook_dag_loop(d) == MAKEFLOW_HOOK_END) && did_find_archived_job == 0) break; if(dag_remote_jobs_running(d)) { int tmp_timeout = 5; jobid = batch_job_wait_timeout(remote_queue, &info, time(0) + tmp_timeout); if(jobid > 0) { printf("job %"PRIbjid" completed\n",jobid); debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid); n = itable_remove(d->remote_job_table, jobid); if(n){ // Stop gap until batch_job_wait returns task struct batch_task_set_info(n->task, &info); makeflow_node_complete(d, n, remote_queue, n->task); } } } if(dag_local_jobs_running(d)) { time_t stoptime; int tmp_timeout = 5; if(dag_remote_jobs_running(d)) { stoptime = time(0); } else { stoptime = time(0) + tmp_timeout; } jobid = batch_job_wait_timeout(local_queue, &info, stoptime); if(jobid > 0) { debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid); n = itable_remove(d->local_job_table, jobid); if(n){ // Stop gap until batch_job_wait returns task struct batch_task_set_info(n->task, &info); makeflow_node_complete(d, n, local_queue, n->task); } } } /* Report to catalog */ timestamp_t now = timestamp_get(); /* If in reporting mode and 1 min has transpired */ if(catalog_reporting_on && ((now-last_time) > (60 * 1000 * 1000))){ makeflow_catalog_summary(d, project,batch_queue_type,start); last_time = now; } /* Rather than try to garbage collect after each time in this * wait loop, perform garbage collection after a proportional * amount of tasks have passed. */ makeflow_gc_barrier--; if(makeflow_gc_method != MAKEFLOW_GC_NONE && makeflow_gc_barrier == 0) { makeflow_gc(d, remote_queue, makeflow_gc_method, makeflow_gc_size, makeflow_gc_count); makeflow_gc_barrier = MAX(d->nodeid_counter * makeflow_gc_task_ratio, 1); } } /* Always make final report to catalog when workflow ends. */ if(catalog_reporting_on){ makeflow_catalog_summary(d, project,batch_queue_type,start); } if(makeflow_abort_flag) { makeflow_abort_all(d); } else if(!makeflow_failed_flag && makeflow_gc_method != MAKEFLOW_GC_NONE) { makeflow_gc(d,remote_queue,MAKEFLOW_GC_ALL,0,0); } } /* Signal handler to catch abort signals. Note that permissible actions in signal handlers are very limited, so we emit a message to the terminal and update a global variable noticed by makeflow_run. */ static void handle_abort(int sig) { int fd = open("/dev/tty", O_WRONLY); if (fd >= 0) { char buf[256]; snprintf(buf, sizeof(buf), "received signal %d (%s), cleaning up remote jobs and files...\n",sig,strsignal(sig)); write(fd, buf, strlen(buf)); close(fd); } makeflow_abort_flag = 1; } static void set_archive_directory_string(char **archive_directory, char *option_arg) { if (*archive_directory != NULL) { // need to free archive directory to avoid memory leak since it has already been set once free(*archive_directory); } if (option_arg) { *archive_directory = xxstrdup(option_arg); } else { char *uid = xxmalloc(10); sprintf(uid, "%d", getuid()); *archive_directory = xxmalloc(sizeof(MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY) + 20 * sizeof(char)); sprintf(*archive_directory, "%s%s", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY, uid); free(uid); } } static void show_help_run(const char *cmd) { /* Stars indicate 80-column limit. Try to keep things within 79 columns. */ /********************************************************************************/ printf("Use: ./makeflow [options] <dagfile>\n"); printf("Basic Options:\n"); printf(" -c,--clean=<type> Clean up logfile and all temporary files.\n"); printf(" -d,--debug=<subsystem> Enable debugging for this subsystem\n"); printf(" -o,--debug-file=<file> Send debugging to this file.\n"); printf(" --debug-rotate-max=<bytes> Rotate debug file once it reaches this size.\n"); printf(" -T,--batch-type=<type> Select batch system: %s\n",batch_queue_type_string()); printf(" --argv=<file> Include command line arguments from a JSON file.\n"); printf(" -v,--version Show version string\n"); printf(" -h,--help Show this help screen.\n"); /********************************************************************************/ printf("\nWorkflow Handling:\n"); printf(" -a,--advertise Advertise workflow status to the global catalog.\n"); printf(" -L,--batch-log=<logfile> Use this file for the batch system log.\n"); printf(" -m,--email=<email> Send summary of workflow to this email at end\n"); printf(" --json Use JSON format for the workflow specification.\n"); printf(" --jx Use JX format for the workflow specification.\n"); printf(" --jx-args=<file> Evaluate the JX input with keys and values in file defined as variables.\n"); printf(" --jx-context=<file> Deprecated. Equivalent to --jx-args.\n"); printf(" --jx-define=<VAR>=<EXPR> Set the JX variable VAR to the JX expression EXPR.\n"); printf(" --log-verbose Add node id symbol tags in the makeflow log.\n"); printf(" -j,--max-local=<#> Max number of local jobs to run at once.\n"); printf(" -J,--max-remote=<#> Max number of remote jobs to run at once.\n"); printf(" -l,--makeflow-log=<logfile> Use this file for the makeflow log.\n"); printf(" -R,--retry Retry failed batch jobs up to 5 times.\n"); printf(" -r,--retry-count=<n> Retry failed batch jobs up to n times.\n"); printf(" --send-environment Send all local environment variables in remote execution.\n"); printf(" -S,--submission-timeout=<#> Time to retry failed batch job submission.\n"); printf(" -f,--summary-log=<file> Write summary of workflow to this file at end.\n"); /********************************************************************************/ printf("\nData Handling:\n"); printf(" --archive=<dir> Archive job outputs in <dir> for future reuse.\n"); printf(" --archive-read=<dir> Same as --archive, but read-only.\n"); printf(" --archive-write=<dir> Same as --archive, but write-only.\n"); printf(" -A,--disable-afs-check Disable the check for AFS. (experts only.)\n"); printf(" --cache=<dir> Use this dir to cache downloaded mounted files.\n"); printf(" -X,--change-directory=<dir> Change to <dir> before executing the workflow.\n"); printf(" -g,--gc=<type> Enable garbage collection. (ref_cnt|on_demand|all)\n"); printf(" --gc-size=<int> Set disk size to trigger GC. (on_demand only)\n"); printf(" -G,--gc-count=<int> Set number of files to trigger GC. (ref_cnt only)\n"); printf(" --mounts=<mountfile> Use this file as a mountlist.\n"); printf(" --skip-file-check Do not check for file existence before running.\n"); printf(" --do-not-save-failed-output Disables moving output of failed nodes to directory.\n"); printf(" --shared-fs=<dir> Assume that <dir> is in a shared filesystem.\n"); printf(" --storage-limit=<int> Set storage limit for Makeflow (default is off)\n"); printf(" --storage-type=<type> Type of storage limit(0:MAX,1:MIN,2:OUTPUT,3:OFF\n"); printf(" --storage-print=<file> Print storage limit calculated by Makeflow\n"); printf(" --wait-for-files-upto=<n> Wait up to <n> seconds for files to be created.\n"); printf(" -z,--zero-length-error Consider zero-length files to be erroneous.\n"); /********************************************************************************/ printf("\nWork Queue Options:\n"); printf(" -C,--catalog-server=<hst:port> Select alternate catalog server.\n"); printf(" --password Password file for authenticating workers.\n"); printf(" -p,--port=<port> Port number to use with Work Queue.\n"); printf(" -Z,--port-file=<file> Select port at random and write it to this file.\n"); printf(" -P,--priority=<integer> Priority. Higher the value, higher the priority.\n"); printf(" -N,--project-name=<project> Set the Work Queue project name.\n"); printf(" -F,--wq-fast-abort=<#> Set the Work Queue fast abort multiplier.\n"); printf(" -t,--wq-keepalive-timeout=<#> Work Queue keepalive timeout. (default: 30s)\n"); printf(" -u,--wq-keepalive-interval=<#> Work Queue keepalive interval. (default: 120s)\n"); printf(" -W,--wq-schedule=<mode> Work Queue scheduling algor. (time|files|fcfs)\n"); printf(" --work-queue-preferred-connection Preferred connection: by_ip | by_hostname\n"); /********************************************************************************/ printf("\nBatch System Options:\n"); printf(" --amazon-config Amazon EC2 config file from makeflow_ec2_setup.\n"); printf(" --lambda-config Amazon Lambda config file from makeflow_lambda_setup.\n"); printf(" -B,--batch-options=<options> Add these options to all batch submit files.\n"); printf(" --disable-cache Disable batch system caching.\n"); printf(" --local-cores=# Max number of local cores to use.\n"); printf(" --local-memory=# Max amount of local memory (MB) to use.\n"); printf(" --local-disk=# Max amount of local disk (MB) to use.\n"); printf(" --working-dir=<dir|url> Working directory for the batch system.\n"); /********************************************************************************/ printf("\nContainers and Wrappers:\n"); printf(" --docker=<image> Run each task using the named Docker image.\n"); printf(" --docker-tar=<tar file> Load docker image from this tar file.\n"); printf(" --docker-opt=<string> Pass docker command line options.\n"); printf(" --singularity=<image> Run each task using Singularity exec with image.\n"); printf(" --singularity-opt=<string> Pass singularity command line options.\n"); printf(" --umbrella-spec=<file> Run each task using this Umbrella spec.\n"); printf(" --umbrella-binary=<file> Path to Umbrella binary.\n"); printf(" --umbrella-log-prefix=<string> Umbrella log file prefix\n"); printf(" --umbrella-mode=<mode> Umbrella execution mode. (default is local)\n"); printf(" --wrapper=<cmd> Wrap all commands with this prefix.\n"); printf(" --wrapper-input=<cmd> Wrapper command requires this input file.\n"); printf(" --wrapper-output=<cmd> Wrapper command produces this output file.\n"); printf(" --enforcement Enforce access to only named inputs/outputs.\n"); printf(" --parrot-path=<path> Path to parrot_run for --enforcement.\n"); printf(" --mesos-master=<hostname:port> Mesos master address and port\n"); printf(" --mesos-path=<path> Path to mesos python2 site-packages.\n"); printf(" --mesos-preload=<path> Path to libraries needed by Mesos.\n"); printf(" --k8s-image=<path> Container image used by kubernetes.\n"); /********************************************************************************/ printf("\nResource Monitoring Options:\n"); printf(" --monitor=<dir> Enable resource monitor, write logs to <dir>\n"); printf(" --monitor-interval=<#> Set monitor interval, in seconds. (default: 1s)\n"); printf(" --monitor-with-time-series Enable monitor time series.\n"); printf(" --monitor-with-opened-files Enable monitoring of opened files.\n"); printf(" --monitor-log-fmt=<fmt> Format for monitor logs. (def: resource-rule-%%)\n"); } int main(int argc, char *argv[]) { int c; char *dagfile = NULL; char *change_dir = NULL; char *batchlogfilename = NULL; const char *batch_submit_options = getenv("BATCH_OPTIONS"); makeflow_clean_depth clean_mode = MAKEFLOW_CLEAN_NONE; char *email_summary_to = NULL; int explicit_remote_jobs_max = 0; int explicit_local_jobs_max = 0; int explicit_local_cores = 0; int explicit_local_memory = 0; int explicit_local_disk = 0; char *logfilename = NULL; int port_set = 0; timestamp_t runtime = 0; int disable_afs_check = 0; int should_read_archive = 0; int should_write_to_archive = 0; timestamp_t time_completed = 0; const char *work_queue_keepalive_interval = NULL; const char *work_queue_keepalive_timeout = NULL; const char *work_queue_master_mode = "standalone"; const char *work_queue_port_file = NULL; double wq_option_fast_abort_multiplier = -1.0; const char *amazon_config = NULL; const char *lambda_config = NULL; const char *priority = NULL; char *work_queue_password = NULL; char *wq_wait_queue_size = 0; int did_explicit_auth = 0; char *chirp_tickets = NULL; char *working_dir = NULL; char *work_queue_preferred_connection = NULL; char *write_summary_to = NULL; char *s; char *archive_directory = NULL; category_mode_t allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED; shared_fs_list = list_create(); char *mesos_master = "127.0.0.1:5050/"; char *mesos_path = NULL; char *mesos_preload = NULL; dag_syntax_type dag_syntax = DAG_SYNTAX_MAKE; struct jx *jx_args = jx_object(NULL); struct jx *hook_args = jx_object(NULL); char *k8s_image = NULL; extern struct makeflow_hook makeflow_hook_docker; extern struct makeflow_hook makeflow_hook_example; extern struct makeflow_hook makeflow_hook_fail_dir; /* Using fail directories is on by default */ int save_failure = 1; extern struct makeflow_hook makeflow_hook_resource_monitor; extern struct makeflow_hook makeflow_hook_sandbox; extern struct makeflow_hook makeflow_hook_singularity; extern struct makeflow_hook makeflow_hook_storage_allocation; random_init(); debug_config(argv[0]); s = getenv("MAKEFLOW_BATCH_QUEUE_TYPE"); if(s) { batch_queue_type = batch_queue_type_from_string(s); if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) { fprintf(stderr, "makeflow: unknown batch queue type: %s (from $MAKEFLOW_BATCH_QUEUE_TYPE)\n", s); return 1; } } s = getenv("WORK_QUEUE_MASTER_MODE"); if(s) { work_queue_master_mode = s; } s = getenv("WORK_QUEUE_NAME"); if(s) { project = xxstrdup(s); } s = getenv("WORK_QUEUE_FAST_ABORT_MULTIPLIER"); if(s) { wq_option_fast_abort_multiplier = atof(s); } enum { LONG_OPT_AUTH = UCHAR_MAX+1, LONG_OPT_ARGV, LONG_OPT_CACHE, LONG_OPT_DEBUG_ROTATE_MAX, LONG_OPT_DISABLE_BATCH_CACHE, LONG_OPT_DOT_CONDENSE, LONG_OPT_HOOK_EXAMPLE, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME, LONG_OPT_FAIL_DIR, LONG_OPT_GC_SIZE, LONG_OPT_LOCAL_CORES, LONG_OPT_LOCAL_MEMORY, LONG_OPT_LOCAL_DISK, LONG_OPT_MONITOR, LONG_OPT_MONITOR_INTERVAL, LONG_OPT_MONITOR_LOG_NAME, LONG_OPT_MONITOR_OPENED_FILES, LONG_OPT_MONITOR_TIME_SERIES, LONG_OPT_MOUNTS, LONG_OPT_SANDBOX, LONG_OPT_STORAGE_TYPE, LONG_OPT_STORAGE_LIMIT, LONG_OPT_STORAGE_PRINT, LONG_OPT_PASSWORD, LONG_OPT_TICKETS, LONG_OPT_VERBOSE_PARSING, LONG_OPT_LOG_VERBOSE_MODE, LONG_OPT_WORKING_DIR, LONG_OPT_PREFERRED_CONNECTION, LONG_OPT_WQ_WAIT_FOR_WORKERS, LONG_OPT_WRAPPER, LONG_OPT_WRAPPER_INPUT, LONG_OPT_WRAPPER_OUTPUT, LONG_OPT_DOCKER, LONG_OPT_DOCKER_OPT, LONG_OPT_DOCKER_TAR, LONG_OPT_AMAZON_CONFIG, LONG_OPT_LAMBDA_CONFIG, LONG_OPT_JSON, LONG_OPT_JX, LONG_OPT_JX_ARGS, LONG_OPT_JX_DEFINE, LONG_OPT_SKIP_FILE_CHECK, LONG_OPT_UMBRELLA_BINARY, LONG_OPT_UMBRELLA_LOG_PREFIX, LONG_OPT_UMBRELLA_MODE, LONG_OPT_UMBRELLA_SPEC, LONG_OPT_ALLOCATION_MODE, LONG_OPT_ENFORCEMENT, LONG_OPT_PARROT_PATH, LONG_OPT_SINGULARITY, LONG_OPT_SINGULARITY_OPT, LONG_OPT_SHARED_FS, LONG_OPT_ARCHIVE, LONG_OPT_ARCHIVE_READ_ONLY, LONG_OPT_ARCHIVE_WRITE_ONLY, LONG_OPT_MESOS_MASTER, LONG_OPT_MESOS_PATH, LONG_OPT_MESOS_PRELOAD, LONG_OPT_SEND_ENVIRONMENT, LONG_OPT_K8S_IMG, }; static const struct option long_options_run[] = { {"advertise", no_argument, 0, 'a'}, {"allocation", required_argument, 0, LONG_OPT_ALLOCATION_MODE}, {"argv", required_argument, 0, LONG_OPT_ARGV}, {"auth", required_argument, 0, LONG_OPT_AUTH}, {"batch-log", required_argument, 0, 'L'}, {"batch-options", required_argument, 0, 'B'}, {"batch-type", required_argument, 0, 'T'}, {"cache", required_argument, 0, LONG_OPT_CACHE}, {"catalog-server", required_argument, 0, 'C'}, {"clean", optional_argument, 0, 'c'}, {"debug", required_argument, 0, 'd'}, {"debug-file", required_argument, 0, 'o'}, {"debug-rotate-max", required_argument, 0, LONG_OPT_DEBUG_ROTATE_MAX}, {"disable-afs-check", no_argument, 0, 'A'}, {"disable-cache", no_argument, 0, LONG_OPT_DISABLE_BATCH_CACHE}, {"email", required_argument, 0, 'm'}, {"enable_hook_example", no_argument, 0, LONG_OPT_HOOK_EXAMPLE}, {"wait-for-files-upto", required_argument, 0, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME}, {"gc", required_argument, 0, 'g'}, {"gc-size", required_argument, 0, LONG_OPT_GC_SIZE}, {"gc-count", required_argument, 0, 'G'}, {"help", no_argument, 0, 'h'}, {"local-cores", required_argument, 0, LONG_OPT_LOCAL_CORES}, {"local-memory", required_argument, 0, LONG_OPT_LOCAL_MEMORY}, {"local-disk", required_argument, 0, LONG_OPT_LOCAL_DISK}, {"makeflow-log", required_argument, 0, 'l'}, {"max-local", required_argument, 0, 'j'}, {"max-remote", required_argument, 0, 'J'}, {"monitor", required_argument, 0, LONG_OPT_MONITOR}, {"monitor-interval", required_argument, 0, LONG_OPT_MONITOR_INTERVAL}, {"monitor-log-name", required_argument, 0, LONG_OPT_MONITOR_LOG_NAME}, {"monitor-with-opened-files", no_argument, 0, LONG_OPT_MONITOR_OPENED_FILES}, {"monitor-with-time-series", no_argument, 0, LONG_OPT_MONITOR_TIME_SERIES}, {"mounts", required_argument, 0, LONG_OPT_MOUNTS}, {"password", required_argument, 0, LONG_OPT_PASSWORD}, {"port", required_argument, 0, 'p'}, {"port-file", required_argument, 0, 'Z'}, {"priority", required_argument, 0, 'P'}, {"project-name", required_argument, 0, 'N'}, {"retry", no_argument, 0, 'R'}, {"retry-count", required_argument, 0, 'r'}, {"do-not-save-failed-output", no_argument, 0, LONG_OPT_FAIL_DIR}, {"sandbox", no_argument, 0, LONG_OPT_SANDBOX}, {"send-environment", no_argument, 0, LONG_OPT_SEND_ENVIRONMENT}, {"shared-fs", required_argument, 0, LONG_OPT_SHARED_FS}, {"show-output", no_argument, 0, 'O'}, {"storage-type", required_argument, 0, LONG_OPT_STORAGE_TYPE}, {"storage-limit", required_argument, 0, LONG_OPT_STORAGE_LIMIT}, {"storage-print", required_argument, 0, LONG_OPT_STORAGE_PRINT}, {"submission-timeout", required_argument, 0, 'S'}, {"summary-log", required_argument, 0, 'f'}, {"tickets", required_argument, 0, LONG_OPT_TICKETS}, {"version", no_argument, 0, 'v'}, {"log-verbose", no_argument, 0, LONG_OPT_LOG_VERBOSE_MODE}, {"working-dir", required_argument, 0, LONG_OPT_WORKING_DIR}, {"skip-file-check", no_argument, 0, LONG_OPT_SKIP_FILE_CHECK}, {"umbrella-binary", required_argument, 0, LONG_OPT_UMBRELLA_BINARY}, {"umbrella-log-prefix", required_argument, 0, LONG_OPT_UMBRELLA_LOG_PREFIX}, {"umbrella-mode", required_argument, 0, LONG_OPT_UMBRELLA_MODE}, {"umbrella-spec", required_argument, 0, LONG_OPT_UMBRELLA_SPEC}, {"work-queue-preferred-connection", required_argument, 0, LONG_OPT_PREFERRED_CONNECTION}, {"wq-estimate-capacity", no_argument, 0, 'E'}, {"wq-fast-abort", required_argument, 0, 'F'}, {"wq-keepalive-interval", required_argument, 0, 'u'}, {"wq-keepalive-timeout", required_argument, 0, 't'}, {"wq-schedule", required_argument, 0, 'W'}, {"wq-wait-queue-size", required_argument, 0, LONG_OPT_WQ_WAIT_FOR_WORKERS}, {"wrapper", required_argument, 0, LONG_OPT_WRAPPER}, {"wrapper-input", required_argument, 0, LONG_OPT_WRAPPER_INPUT}, {"wrapper-output", required_argument, 0, LONG_OPT_WRAPPER_OUTPUT}, {"zero-length-error", no_argument, 0, 'z'}, {"change-directory", required_argument, 0, 'X'}, {"docker", required_argument, 0, LONG_OPT_DOCKER}, {"docker-tar", required_argument, 0, LONG_OPT_DOCKER_TAR}, {"docker-opt", required_argument, 0, LONG_OPT_DOCKER_OPT}, {"amazon-config", required_argument, 0, LONG_OPT_AMAZON_CONFIG}, {"lambda-config", required_argument, 0, LONG_OPT_LAMBDA_CONFIG}, {"json", no_argument, 0, LONG_OPT_JSON}, {"jx", no_argument, 0, LONG_OPT_JX}, {"jx-context", required_argument, 0, LONG_OPT_JX_ARGS}, {"jx-args", required_argument, 0, LONG_OPT_JX_ARGS}, {"jx-define", required_argument, 0, LONG_OPT_JX_DEFINE}, {"enforcement", no_argument, 0, LONG_OPT_ENFORCEMENT}, {"parrot-path", required_argument, 0, LONG_OPT_PARROT_PATH}, {"singularity", required_argument, 0, LONG_OPT_SINGULARITY}, {"singularity-opt", required_argument, 0, LONG_OPT_SINGULARITY_OPT}, {"archive", optional_argument, 0, LONG_OPT_ARCHIVE}, {"archive-read", optional_argument, 0, LONG_OPT_ARCHIVE_READ_ONLY}, {"archive-write", optional_argument, 0, LONG_OPT_ARCHIVE_WRITE_ONLY}, {"mesos-master", required_argument, 0, LONG_OPT_MESOS_MASTER}, {"mesos-path", required_argument, 0, LONG_OPT_MESOS_PATH}, {"mesos-preload", required_argument, 0, LONG_OPT_MESOS_PRELOAD}, {"k8s-image", required_argument, 0, LONG_OPT_K8S_IMG}, {0, 0, 0, 0} }; static const char option_string_run[] = "aAB:c::C:d:Ef:F:g:G:hj:J:l:L:m:M:N:o:Op:P:r:RS:t:T:u:vW:X:zZ:"; while((c = jx_getopt(argc, argv, option_string_run, long_options_run, NULL)) >= 0) { switch (c) { case 'a': work_queue_master_mode = "catalog"; break; case 'A': disable_afs_check = 1; break; case 'B': batch_submit_options = optarg; break; case 'c': clean_mode = MAKEFLOW_CLEAN_ALL; if(optarg){ if(strcasecmp(optarg, "intermediates") == 0){ clean_mode = MAKEFLOW_CLEAN_INTERMEDIATES; } else if(strcasecmp(optarg, "outputs") == 0){ clean_mode = MAKEFLOW_CLEAN_OUTPUTS; } else if(strcasecmp(optarg, "cache") == 0){ clean_mode = MAKEFLOW_CLEAN_CACHE; } else if(strcasecmp(optarg, "all") != 0){ fprintf(stderr, "makeflow: unknown clean option %s", optarg); exit(1); } } break; case 'C': setenv("CATALOG_HOST", optarg, 1); break; case 'd': debug_flags_set(optarg); break; case 'E': // This option is deprecated. Capacity estimation is now on by default. break; case LONG_OPT_AUTH: if (!auth_register_byname(optarg)) fatal("could not register authentication method `%s': %s", optarg, strerror(errno)); did_explicit_auth = 1; break; case LONG_OPT_TICKETS: chirp_tickets = strdup(optarg); break; case 'f': write_summary_to = xxstrdup(optarg); break; case 'F': wq_option_fast_abort_multiplier = atof(optarg); break; case 'g': if(strcasecmp(optarg, "none") == 0) { makeflow_gc_method = MAKEFLOW_GC_NONE; } else if(strcasecmp(optarg, "ref_cnt") == 0) { makeflow_gc_method = MAKEFLOW_GC_COUNT; if(makeflow_gc_count < 0) makeflow_gc_count = 16; /* Try to collect at most 16 files. */ } else if(strcasecmp(optarg, "on_demand") == 0) { makeflow_gc_method = MAKEFLOW_GC_ON_DEMAND; if(makeflow_gc_count < 0) makeflow_gc_count = 16; /* Try to collect at most 16 files. */ } else if(strcasecmp(optarg, "all") == 0) { makeflow_gc_method = MAKEFLOW_GC_ALL; if(makeflow_gc_count < 0) makeflow_gc_count = 1 << 14; /* Inode threshold of 2^14. */ } else { fprintf(stderr, "makeflow: invalid garbage collection method: %s\n", optarg); exit(1); } break; case LONG_OPT_GC_SIZE: makeflow_gc_size = string_metric_parse(optarg); break; case 'G': makeflow_gc_count = atoi(optarg); break; case LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME: file_creation_patience_wait_time = MAX(0,atoi(optarg)); break; case 'h': show_help_run(argv[0]); return 0; case 'j': explicit_local_jobs_max = atoi(optarg); break; case 'J': explicit_remote_jobs_max = atoi(optarg); break; case 'l': logfilename = xxstrdup(optarg); break; case 'L': batchlogfilename = xxstrdup(optarg); break; case 'm': email_summary_to = xxstrdup(optarg); break; case LONG_OPT_LOCAL_CORES: explicit_local_cores = atoi(optarg); break; case LONG_OPT_LOCAL_MEMORY: explicit_local_memory = atoi(optarg); break; case LONG_OPT_LOCAL_DISK: explicit_local_disk = atoi(optarg); break; case LONG_OPT_MONITOR: makeflow_hook_register(&makeflow_hook_resource_monitor); jx_insert(hook_args, jx_string("resource_monitor_log_dir"), jx_string(optarg)); break; case LONG_OPT_MONITOR_INTERVAL: makeflow_hook_register(&makeflow_hook_resource_monitor); jx_insert(hook_args, jx_string("resource_monitor_interval"), jx_integer(atoi(optarg))); break; case LONG_OPT_MONITOR_TIME_SERIES: makeflow_hook_register(&makeflow_hook_resource_monitor); jx_insert(hook_args, jx_string("resource_monitor_enable_time_series"), jx_integer(1)); break; case LONG_OPT_MONITOR_OPENED_FILES: makeflow_hook_register(&makeflow_hook_resource_monitor); jx_insert(hook_args, jx_string("resource_monitor_enable_list_files"), jx_integer(1)); break; case LONG_OPT_MONITOR_LOG_NAME: makeflow_hook_register(&makeflow_hook_resource_monitor); jx_insert(hook_args, jx_string("resource_monitor_log_format"), jx_string(optarg)); break; case LONG_OPT_CACHE: mount_cache = xxstrdup(optarg); break; case LONG_OPT_MOUNTS: mountfile = xxstrdup(optarg); break; case LONG_OPT_AMAZON_CONFIG: amazon_config = xxstrdup(optarg); break; case LONG_OPT_LAMBDA_CONFIG: lambda_config = xxstrdup(optarg); break; case 'M': case 'N': free(project); project = xxstrdup(optarg); work_queue_master_mode = "catalog"; catalog_reporting_on = 1; //set to true break; case 'o': debug_config_file(optarg); break; case 'p': port_set = 1; port = atoi(optarg); break; case 'P': priority = optarg; break; case 'r': makeflow_retry_flag = 1; makeflow_retry_max = atoi(optarg); break; case 'R': makeflow_retry_flag = 1; break; case 'S': makeflow_submit_timeout = atoi(optarg); break; case 't': work_queue_keepalive_timeout = optarg; break; case 'T': batch_queue_type = batch_queue_type_from_string(optarg); if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) { fprintf(stderr, "makeflow: unknown batch queue type: %s\n", optarg); return 1; } break; case 'u': work_queue_keepalive_interval = optarg; break; case 'v': cctools_version_print(stdout, argv[0]); return 0; case 'W': if(!strcmp(optarg, "files")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_FILES; } else if(!strcmp(optarg, "time")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_TIME; } else if(!strcmp(optarg, "fcfs")) { wq_option_scheduler = WORK_QUEUE_SCHEDULE_FCFS; } else { fprintf(stderr, "makeflow: unknown scheduling mode %s\n", optarg); return 1; } break; case 'X': change_dir = optarg; break; case 'z': output_len_check = 1; break; case 'Z': work_queue_port_file = optarg; port = 0; port_set = 1; //WQ is going to set the port, so we continue as if already set. break; case LONG_OPT_PASSWORD: if(copy_file_to_buffer(optarg, &work_queue_password, NULL) < 0) { fprintf(stderr, "makeflow: couldn't open %s: %s\n", optarg, strerror(errno)); return 1; } break; case LONG_OPT_DISABLE_BATCH_CACHE: cache_mode = 0; break; case LONG_OPT_HOOK_EXAMPLE: makeflow_hook_register(&makeflow_hook_example); break; case LONG_OPT_WQ_WAIT_FOR_WORKERS: wq_wait_queue_size = optarg; break; case LONG_OPT_WORKING_DIR: free(working_dir); working_dir = xxstrdup(optarg); break; case LONG_OPT_PREFERRED_CONNECTION: free(work_queue_preferred_connection); work_queue_preferred_connection = xxstrdup(optarg); break; case LONG_OPT_DEBUG_ROTATE_MAX: debug_config_file_size(string_metric_parse(optarg)); break; case LONG_OPT_LOG_VERBOSE_MODE: log_verbose_mode = 1; break; case LONG_OPT_WRAPPER: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_command(wrapper, optarg); break; case LONG_OPT_WRAPPER_INPUT: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_input_file(wrapper, optarg); break; case LONG_OPT_WRAPPER_OUTPUT: if(!wrapper) wrapper = makeflow_wrapper_create(); makeflow_wrapper_add_output_file(wrapper, optarg); break; case LONG_OPT_SHARED_FS: assert(shared_fs_list); if (optarg[0] != '/') fatal("Shared fs must be specified as an absolute path"); list_push_head(shared_fs_list, xxstrdup(optarg)); break; case LONG_OPT_STORAGE_TYPE: makeflow_hook_register(&makeflow_hook_storage_allocation); jx_insert(hook_args, jx_string("storage_allocation_type"), jx_integer(atoi(optarg))); break; case LONG_OPT_STORAGE_LIMIT: makeflow_hook_register(&makeflow_hook_storage_allocation); jx_insert(hook_args, jx_string("storage_allocation_limit"), jx_integer(string_metric_parse(optarg))); break; case LONG_OPT_STORAGE_PRINT: makeflow_hook_register(&makeflow_hook_storage_allocation); jx_insert(hook_args, jx_string("storage_allocation_print"), jx_string(optarg)); break; case LONG_OPT_DOCKER: makeflow_hook_register(&makeflow_hook_docker); jx_insert(hook_args, jx_string("docker_container_image"), jx_string(optarg)); break; case LONG_OPT_SKIP_FILE_CHECK: skip_file_check = 1; break; case LONG_OPT_DOCKER_TAR: makeflow_hook_register(&makeflow_hook_docker); jx_insert(hook_args, jx_string("docker_container_tar"), jx_string(optarg)); break; case LONG_OPT_DOCKER_OPT: makeflow_hook_register(&makeflow_hook_docker); jx_insert(hook_args, jx_string("docker_container_opt"), jx_string(optarg)); break; case LONG_OPT_SINGULARITY: makeflow_hook_register(&makeflow_hook_singularity); jx_insert(hook_args, jx_string("singularity_container_image"), jx_string(optarg)); break; case LONG_OPT_SINGULARITY_OPT: jx_insert(hook_args, jx_string("singularity_container_options"), jx_string(optarg)); break; case LONG_OPT_ALLOCATION_MODE: if(!strcmp(optarg, "throughput")) { allocation_mode = CATEGORY_ALLOCATION_MODE_MAX_THROUGHPUT; } else if(!strcmp(optarg, "waste")) { allocation_mode = CATEGORY_ALLOCATION_MODE_MIN_WASTE; } else if(!strcmp(optarg, "fixed")) { allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED; } else { fatal("Allocation mode '%s' is not valid. Use one of: throughput waste fixed"); } case LONG_OPT_JSON: dag_syntax = DAG_SYNTAX_JSON; break; case LONG_OPT_JX: dag_syntax = DAG_SYNTAX_JX; break; case LONG_OPT_JX_ARGS: dag_syntax = DAG_SYNTAX_JX; jx_args = jx_parse_cmd_args(jx_args, optarg); if (!jx_args) { fatal("Failed to parse in JX Args File.\n"); } break; case LONG_OPT_JX_DEFINE: dag_syntax = DAG_SYNTAX_JX; if (!jx_parse_cmd_define(jx_args, optarg)) { fatal("Failed to parse in JX Define.\n"); } break; case LONG_OPT_UMBRELLA_BINARY: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_binary(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_LOG_PREFIX: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_log_prefix(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_MODE: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_mode(umbrella, (const char *)xxstrdup(optarg)); break; case LONG_OPT_UMBRELLA_SPEC: if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_set_spec(umbrella, (const char *)xxstrdup(optarg)); case LONG_OPT_MESOS_MASTER: mesos_master = xxstrdup(optarg); break; case LONG_OPT_MESOS_PATH: mesos_path = xxstrdup(optarg); break; case LONG_OPT_MESOS_PRELOAD: mesos_preload = xxstrdup(optarg); break; case LONG_OPT_K8S_IMG: k8s_image = xxstrdup(optarg); break; case LONG_OPT_ARCHIVE: should_read_archive = 1; should_write_to_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; case LONG_OPT_ARCHIVE_READ_ONLY: should_read_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; case LONG_OPT_ARCHIVE_WRITE_ONLY: should_write_to_archive = 1; set_archive_directory_string(&archive_directory, optarg); break; case LONG_OPT_SEND_ENVIRONMENT: should_send_all_local_environment = 1; break; case LONG_OPT_ENFORCEMENT: if(!enforcer) enforcer = makeflow_wrapper_create(); break; case LONG_OPT_PARROT_PATH: parrot_path = xxstrdup(optarg); break; case LONG_OPT_FAIL_DIR: save_failure = 0; break; case LONG_OPT_SANDBOX: makeflow_hook_register(&makeflow_hook_sandbox); break; case LONG_OPT_ARGV: { debug(D_MAKEFLOW, "loading argv from %s", optarg); struct jx *j = jx_parse_file(optarg); if (!j) { fatal("failed to parse JSON argv %s", optarg); } if (!jx_istype(j, JX_OBJECT)) { fatal("argv must be a JX object"); } struct jx *k = jx_string("MAKEFLOW"); struct jx *v = jx_remove(j, k); jx_delete(k); if (v && dagfile) { fatal("only one dagfile can be specified"); } if (v && !jx_match_string(v, &dagfile)) { fatal("dagfile must be a string filename"); } jx_delete(v); jx_getopt_push(j); jx_delete(j); break; } default: show_help_run(argv[0]); return 1; } } cctools_version_debug(D_MAKEFLOW_RUN, argv[0]); if(!did_explicit_auth) auth_register_all(); if(chirp_tickets) { auth_ticket_load(chirp_tickets); free(chirp_tickets); } else { auth_ticket_load(NULL); } // REGISTER HOOKS HERE if (enforcer && umbrella) { fatal("enforcement and Umbrella are mutually exclusive\n"); } if(save_failure){ makeflow_hook_register(&makeflow_hook_fail_dir); } makeflow_hook_create(hook_args); if((argc - optind) == 1) { if (dagfile) { fatal("only one dagfile can be specified"); } dagfile = xxstrdup(argv[optind]); } else if (!dagfile) { int rv = access("./Makeflow", R_OK); if(rv < 0) { fprintf(stderr, "makeflow: No makeflow specified and file \"./Makeflow\" could not be found.\n"); fprintf(stderr, "makeflow: Run \"%s -h\" for help with options.\n", argv[0]); return 1; } dagfile = xxstrdup("./Makeflow"); } if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) { if(strcmp(work_queue_master_mode, "catalog") == 0 && project == NULL) { fprintf(stderr, "makeflow: Makeflow running in catalog mode. Please use '-N' option to specify the name of this project.\n"); fprintf(stderr, "makeflow: Run \"makeflow -h\" for help with options.\n"); return 1; } // Use Work Queue default port in standalone mode when port is not // specified with -p option. In Work Queue catalog mode, Work Queue // would choose an arbitrary port when port is not explicitly specified. if(!port_set && strcmp(work_queue_master_mode, "standalone") == 0) { port_set = 1; port = WORK_QUEUE_DEFAULT_PORT; } if(port_set) { char *value; value = string_format("%d", port); setenv("WORK_QUEUE_PORT", value, 1); free(value); } } if(!logfilename) logfilename = string_format("%s.makeflowlog", dagfile); printf("parsing %s...\n",dagfile); struct dag *d = dag_from_file(dagfile, dag_syntax, jx_args); if(!d) { fatal("makeflow: couldn't load %s: %s\n", dagfile, strerror(errno)); } d->allocation_mode = allocation_mode; /* Measure resources available for local job execution. */ local_resources = rmsummary_create(-1); makeflow_local_resources_measure(local_resources); if(explicit_local_cores) local_resources->cores = explicit_local_cores; if(explicit_local_memory) local_resources->memory = explicit_local_memory; if(explicit_local_disk) local_resources->disk = explicit_local_disk; makeflow_local_resources_print(local_resources); /* Environment variables override explicit settings for maximum jobs. */ s = getenv("MAKEFLOW_MAX_REMOTE_JOBS"); if(s) { explicit_remote_jobs_max = MIN(explicit_remote_jobs_max, atoi(s)); } s = getenv("MAKEFLOW_MAX_LOCAL_JOBS"); if(s) { explicit_local_jobs_max = MIN(explicit_local_jobs_max, atoi(s)); } /* Handle the confusing case of specifying local/remote max jobs when the job type is LOCAL. Take either option to mean both, use the minimum if both are set, and the number of cores if neither is set. */ if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) { int j; if(explicit_remote_jobs_max && !explicit_local_jobs_max) { j = explicit_remote_jobs_max; } else if(explicit_local_jobs_max && !explicit_remote_jobs_max) { j = explicit_local_jobs_max; } else if(explicit_local_jobs_max && explicit_remote_jobs_max) { j = MIN(explicit_local_jobs_max,explicit_remote_jobs_max); } else { j = local_resources->cores; } local_jobs_max = remote_jobs_max = j; } else { /* We are using a separate local and remote queue, so set them separately. */ if(explicit_local_jobs_max) { local_jobs_max = explicit_local_jobs_max; } else { local_jobs_max = local_resources->cores; } if(explicit_remote_jobs_max) { remote_jobs_max = explicit_remote_jobs_max; } else { if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) { remote_jobs_max = 10 * MAX_REMOTE_JOBS_DEFAULT; } else { remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT; } } printf("max running remote jobs: %d\n",remote_jobs_max); } printf("max running local jobs: %d\n",local_jobs_max); remote_queue = batch_queue_create(batch_queue_type); if(!remote_queue) { fprintf(stderr, "makeflow: couldn't create batch queue.\n"); if(port != 0) fprintf(stderr, "makeflow: perhaps port %d is already in use?\n", port); goto EXIT_WITH_FAILURE; } if(!batchlogfilename) { if(batch_queue_supports_feature(remote_queue, "batch_log_name")){ batchlogfilename = string_format(batch_queue_supports_feature(remote_queue, "batch_log_name"), dagfile); } else { batchlogfilename = string_format("%s.batchlog", dagfile); } } if(batch_queue_type == BATCH_QUEUE_TYPE_MESOS) { batch_queue_set_option(remote_queue, "mesos-path", mesos_path); batch_queue_set_option(remote_queue, "mesos-master", mesos_master); batch_queue_set_option(remote_queue, "mesos-preload", mesos_preload); } if(batch_queue_type == BATCH_QUEUE_TYPE_K8S) { batch_queue_set_option(remote_queue, "k8s-image", k8s_image); } if(batch_queue_type == BATCH_QUEUE_TYPE_DRYRUN) { FILE *file = fopen(batchlogfilename,"w"); if(!file) fatal("unable to open log file %s: %s\n", batchlogfilename, strerror(errno)); fprintf(file, "#!/bin/sh\n"); fprintf(file, "set -x\n"); fprintf(file, "set -e\n"); fprintf(file, "\n# %s version %s (released %s)\n\n", argv[0], CCTOOLS_VERSION, CCTOOLS_RELEASE_DATE); fclose(file); } batch_queue_set_logfile(remote_queue, batchlogfilename); batch_queue_set_option(remote_queue, "batch-options", batch_submit_options); batch_queue_set_option(remote_queue, "password", work_queue_password); batch_queue_set_option(remote_queue, "master-mode", work_queue_master_mode); batch_queue_set_option(remote_queue, "name", project); batch_queue_set_option(remote_queue, "priority", priority); batch_queue_set_option(remote_queue, "keepalive-interval", work_queue_keepalive_interval); batch_queue_set_option(remote_queue, "keepalive-timeout", work_queue_keepalive_timeout); batch_queue_set_option(remote_queue, "caching", cache_mode ? "yes" : "no"); batch_queue_set_option(remote_queue, "wait-queue-size", wq_wait_queue_size); batch_queue_set_option(remote_queue, "amazon-config", amazon_config); batch_queue_set_option(remote_queue, "lambda-config", lambda_config); batch_queue_set_option(remote_queue, "working-dir", working_dir); batch_queue_set_option(remote_queue, "master-preferred-connection", work_queue_preferred_connection); char *fa_multiplier = string_format("%f", wq_option_fast_abort_multiplier); batch_queue_set_option(remote_queue, "fast-abort", fa_multiplier); free(fa_multiplier); /* Do not create a local queue for systems where local and remote are the same. */ if(!batch_queue_supports_feature(remote_queue, "local_job_queue")) { local_queue = 0; } else { local_queue = batch_queue_create(BATCH_QUEUE_TYPE_LOCAL); if(!local_queue) { fatal("couldn't create local job queue."); } } /* Remote storage modes do not (yet) support measuring storage for garbage collection. */ if(makeflow_gc_method == MAKEFLOW_GC_SIZE && !batch_queue_supports_feature(remote_queue, "gc_size")) { makeflow_gc_method = MAKEFLOW_GC_ALL; } /* Set dag_node->umbrella_spec */ if(!clean_mode) { struct dag_node *cur; cur = d->nodes; while(cur) { struct dag_variable_lookup_set s = {d, cur->category, cur, NULL}; char *spec = NULL; spec = dag_variable_lookup_string("SPEC", &s); if(spec) { debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the makefile ...\n", cur->nodeid); dag_node_set_umbrella_spec(cur, xxstrdup(spec)); } else if(umbrella && umbrella->spec) { debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the --umbrella_spec option ...\n", cur->nodeid); dag_node_set_umbrella_spec(cur, umbrella->spec); } free(spec); cur = cur->next; } debug(D_MAKEFLOW_RUN, "makeflow_wrapper_umbrella_preparation...\n"); // When the user specifies umbrella specs in a makefile, but does not use any `--umbrella...` option, // an umbrella wrapper was created to hold the default values for umbrella-related setttings such as // log_prefix and default umbrella execution engine. if(!umbrella) umbrella = makeflow_wrapper_umbrella_create(); makeflow_wrapper_umbrella_preparation(umbrella, d); } if(enforcer) { makeflow_wrapper_enforcer_init(enforcer, parrot_path); } makeflow_parse_input_outputs(d); makeflow_prepare_nested_jobs(d); if (change_dir) chdir(change_dir); if(!disable_afs_check && (batch_queue_type==BATCH_QUEUE_TYPE_CONDOR)) { char *cwd = path_getcwd(); if(!strncmp(cwd, "/afs", 4)) { fprintf(stderr,"error: The working directory is '%s'\n", cwd); fprintf(stderr,"This won't work because Condor is not able to write to files in AFS.\n"); fprintf(stderr,"Instead, run your workflow from a local disk like /tmp."); fprintf(stderr,"Or, use the Work Queue batch system with -T wq.\n"); free(cwd); goto EXIT_WITH_FAILURE; } free(cwd); } /* Prepare the input files specified in the mountfile. */ if(mountfile && !clean_mode) { /* check the validity of the mountfile and load the info from the mountfile into the dag */ printf("checking the consistency of the mountfile ...\n"); if(makeflow_mounts_parse_mountfile(mountfile, d)) { fprintf(stderr, "Failed to parse the mountfile: %s.\n", mountfile); free(mountfile); return -1; } free(mountfile); use_mountfile = 1; } printf("checking %s for consistency...\n",dagfile); if(!makeflow_check(d)) { goto EXIT_WITH_FAILURE; } if(!makeflow_check_batch_consistency(d) && clean_mode == MAKEFLOW_CLEAN_NONE) { goto EXIT_WITH_FAILURE; } int rc = makeflow_hook_dag_check(d); if(rc == MAKEFLOW_HOOK_FAILURE) { goto EXIT_WITH_FAILURE; } else if(rc == MAKEFLOW_HOOK_END) { goto EXIT_WITH_SUCCESS; } printf("%s has %d rules.\n",dagfile,d->nodeid_counter); setlinebuf(stdout); setlinebuf(stderr); if(mount_cache) d->cache_dir = mount_cache; /* In case when the user uses --cache option to specify the mount cache dir and the log file also has * a cache dir logged, these two dirs must be the same. Otherwise exit. */ if(makeflow_log_recover(d, logfilename, log_verbose_mode, remote_queue, clean_mode, skip_file_check )) { goto EXIT_WITH_FAILURE; } /* This check must happen after makeflow_log_recover which may load the cache_dir info into d->cache_dir. * This check must happen before makeflow_mount_install to guarantee that the program ends before any mount is copied if any target is invliad. */ if(use_mountfile) { if(makeflow_mount_check_target(d)) { goto EXIT_WITH_FAILURE; } } if(use_mountfile && !clean_mode) { if(makeflow_mounts_install(d)) { fprintf(stderr, "Failed to install the dependencies specified in the mountfile!\n"); goto EXIT_WITH_FAILURE; } } struct dag_file *f = dag_file_lookup_or_create(d, batchlogfilename); makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT); if(batch_queue_supports_feature(remote_queue, "batch_log_transactions")) { const char *transactions = batch_queue_get_option(remote_queue, "batch_log_transactions_name"); f = dag_file_lookup_or_create(d, transactions); makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT); } if(clean_mode != MAKEFLOW_CLEAN_NONE) { makeflow_hook_dag_clean(d); printf("cleaning filesystem...\n"); if(makeflow_clean(d, remote_queue, clean_mode)) { debug(D_ERROR, "Failed to clean up makeflow!\n"); goto EXIT_WITH_FAILURE; } if(clean_mode == MAKEFLOW_CLEAN_ALL) { unlink(logfilename); } goto EXIT_WITH_SUCCESS; } printf("starting workflow....\n"); makeflow_hook_dag_start(d); port = batch_queue_port(remote_queue); if(work_queue_port_file) opts_write_port_file(work_queue_port_file, port); if(port > 0) printf("listening for workers on port %d.\n", port); signal(SIGINT, handle_abort); signal(SIGQUIT, handle_abort); signal(SIGTERM, handle_abort); makeflow_log_started_event(d); runtime = timestamp_get(); d->archive_directory = archive_directory; d->should_read_archive = should_read_archive; d->should_write_to_archive = should_write_to_archive; makeflow_run(d); if(makeflow_failed_flag == 0 && makeflow_nodes_local_waiting_count(d) > 0) { debug(D_ERROR, "There are local jobs that could not be run. Usually this means that makeflow did not have enough local resources to run them."); goto EXIT_WITH_FAILURE; } if(makeflow_hook_dag_end(d) != MAKEFLOW_HOOK_SUCCESS){ goto EXIT_WITH_FAILURE; } EXIT_WITH_SUCCESS: /* Makeflow fails by default if we goto EXIT_WITH_FAILURE. This indicates we have correctly initialized. */ makeflow_failed_flag = 0; EXIT_WITH_FAILURE: time_completed = timestamp_get(); runtime = time_completed - runtime; /* * Set the abort and failed flag for batch_job_mesos mode. * Since batch_queue_delete(struct batch_queue *q) will call * batch_queue_mesos_free(struct batch_queue *q), which is defined * in batch_job/src/batch_job_mesos.c. Then this function will check * the abort and failed status of the batch_queue and inform * the makeflow mesos scheduler. */ if (batch_queue_type == BATCH_QUEUE_TYPE_MESOS) { batch_queue_set_int_option(remote_queue, "batch-queue-abort-flag", (int)makeflow_abort_flag); batch_queue_set_int_option(remote_queue, "batch-queue-failed-flag", (int)makeflow_failed_flag); } if(write_summary_to || email_summary_to) makeflow_summary_create(d, write_summary_to, email_summary_to, runtime, time_completed, argc, argv, dagfile, remote_queue, makeflow_abort_flag, makeflow_failed_flag ); if(wrapper){ makeflow_wrapper_delete(wrapper); } int exit_value; if(makeflow_abort_flag) { makeflow_hook_dag_abort(d); makeflow_log_aborted_event(d); fprintf(stderr, "workflow was aborted.\n"); exit_value = EXIT_FAILURE; } else if(makeflow_failed_flag) { makeflow_hook_dag_fail(d); makeflow_log_failed_event(d); fprintf(stderr, "workflow failed.\n"); exit_value = EXIT_FAILURE; } else { makeflow_hook_dag_success(d); makeflow_log_completed_event(d); printf("nothing left to do.\n"); exit_value = EXIT_SUCCESS; } makeflow_hook_destroy(d); /* Batch queues are removed after hooks are destroyed to allow for file clean up on related files. */ batch_queue_delete(remote_queue); if(local_queue) batch_queue_delete(local_queue); makeflow_log_close(d); free(archive_directory); exit(exit_value); return 0; } /* vim: set noexpandtab tabstop=4: */
1
13,805
Is there any need to pass this through `string_metric_parse`? I believe you can either create an off_t or just pass 0, with no need to added a string conversion into the mix.
cooperative-computing-lab-cctools
c
@@ -888,6 +888,17 @@ class WebDriver(BaseWebDriver): """ self.execute(Command.MINIMIZE_WINDOW) + def print_page(self, print_option_arg = None): + """ + Takes PDF of the current page. + The driver makes a best effort to return a PDF based on the provided parameters. + """ + options = {} + if print_option_arg: + options = print_option_arg.print_options + + return self.execute(Command.PRINT_PAGE, options)['value'] + @property def switch_to(self): """
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The WebDriver implementation.""" from abc import ABCMeta import base64 import copy from contextlib import (contextmanager, asynccontextmanager) import importlib import pkgutil import warnings import sys from .command import Command from .errorhandler import ErrorHandler from .file_detector import FileDetector, LocalFileDetector from .mobile import Mobile from .remote_connection import RemoteConnection from .script_key import ScriptKey from .switch_to import SwitchTo from .webelement import WebElement from selenium.common.exceptions import (InvalidArgumentException, JavascriptException, WebDriverException, NoSuchCookieException, UnknownMethodException) from selenium.webdriver.common.by import By from selenium.webdriver.common.timeouts import Timeouts from selenium.webdriver.common.html5.application_cache import ApplicationCache from selenium.webdriver.support.relative_locator import RelativeBy from six import add_metaclass try: str = basestring except NameError: pass cdp = None def import_cdp(): global cdp if cdp is None: cdp = importlib.import_module("selenium.webdriver.common.bidi.cdp") _W3C_CAPABILITY_NAMES = frozenset([ 'acceptInsecureCerts', 'browserName', 'browserVersion', 'platformName', 'pageLoadStrategy', 'proxy', 'setWindowRect', 'timeouts', 'unhandledPromptBehavior', 'strictFileInteractability' ]) _OSS_W3C_CONVERSION = { 'acceptSslCerts': 'acceptInsecureCerts', 'version': 'browserVersion', 'platform': 'platformName' } devtools = None def _make_w3c_caps(caps): """Makes a W3C alwaysMatch capabilities object. Filters out capability names that are not in the W3C spec. Spec-compliant drivers will reject requests containing unknown capability names. Moves the Firefox profile, if present, from the old location to the new Firefox options object. :Args: - caps - A dictionary of capabilities requested by the caller. """ caps = copy.deepcopy(caps) profile = caps.get('firefox_profile') always_match = {} if caps.get('proxy') and caps['proxy'].get('proxyType'): caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower() for k, v in caps.items(): if v and k in _OSS_W3C_CONVERSION: always_match[_OSS_W3C_CONVERSION[k]] = v.lower() if k == 'platform' else v if k in _W3C_CAPABILITY_NAMES or ':' in k: always_match[k] = v if profile: moz_opts = always_match.get('moz:firefoxOptions', {}) # If it's already present, assume the caller did that intentionally. if 'profile' not in moz_opts: # Don't mutate the original capabilities. new_opts = copy.deepcopy(moz_opts) new_opts['profile'] = profile always_match['moz:firefoxOptions'] = new_opts return {"firstMatch": [{}], "alwaysMatch": always_match} def get_remote_connection(capabilities, command_executor, keep_alive, ignore_local_proxy=False): from selenium.webdriver.chromium.remote_connection import ChromiumRemoteConnection from selenium.webdriver.safari.remote_connection import SafariRemoteConnection from selenium.webdriver.firefox.remote_connection import FirefoxRemoteConnection candidates = [RemoteConnection] + [ChromiumRemoteConnection, SafariRemoteConnection, FirefoxRemoteConnection] handler = next( (c for c in candidates if c.browser_name == capabilities.get('browserName')), RemoteConnection ) return handler(command_executor, keep_alive=keep_alive, ignore_proxy=ignore_local_proxy) @add_metaclass(ABCMeta) class BaseWebDriver(object): """ Abstract Base Class for all Webdriver subtypes. ABC's allow custom implementations of Webdriver to be registered so that isinstance type checks will succeed. """ # TODO: After dropping Python 2, use ABC instead of ABCMeta and remove metaclass decorator. class WebDriver(BaseWebDriver): """ Controls a browser by sending commands to a remote server. This server is expected to be running the WebDriver wire protocol as defined at https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol :Attributes: - session_id - String ID of the browser session started and controlled by this WebDriver. - capabilities - Dictionary of effective capabilities of this browser session as returned by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities - command_executor - remote_connection.RemoteConnection object used to execute commands. - error_handler - errorhandler.ErrorHandler object used to handle errors. """ _web_element_cls = WebElement def __init__(self, command_executor='http://127.0.0.1:4444', desired_capabilities=None, browser_profile=None, proxy=None, keep_alive=True, file_detector=None, options=None): """ Create a new driver that will issue commands using the wire protocol. :Args: - command_executor - Either a string representing URL of the remote server or a custom remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'. - desired_capabilities - A dictionary of capabilities to request when starting the browser session. Required parameter. - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested. Optional. - proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will be started with given proxy settings, if possible. Optional. - keep_alive - Whether to configure remote_connection.RemoteConnection to use HTTP keep-alive. Defaults to True. - file_detector - Pass custom file detector object during instantiation. If None, then default LocalFileDetector() will be used. - options - instance of a driver options.Options class """ capabilities = {} _ignore_local_proxy = False if options is not None: capabilities = options.to_capabilities() _ignore_local_proxy = options._ignore_local_proxy if desired_capabilities is not None: if not isinstance(desired_capabilities, dict): raise WebDriverException("Desired Capabilities must be a dictionary") else: capabilities.update(desired_capabilities) self.command_executor = command_executor if isinstance(self.command_executor, (str, bytes)): self.command_executor = get_remote_connection(capabilities, command_executor=command_executor, keep_alive=keep_alive, ignore_local_proxy=_ignore_local_proxy) self._is_remote = True self.session_id = None self.caps = {} self.pinned_scripts = {} self.error_handler = ErrorHandler() self.start_client() self.start_session(capabilities, browser_profile) self._switch_to = SwitchTo(self) self._mobile = Mobile(self) self.file_detector = file_detector or LocalFileDetector() def __repr__(self): return '<{0.__module__}.{0.__name__} (session="{1}")>'.format( type(self), self.session_id) def __enter__(self): return self def __exit__(self, *args): self.quit() @contextmanager def file_detector_context(self, file_detector_class, *args, **kwargs): """ Overrides the current file detector (if necessary) in limited context. Ensures the original file detector is set afterwards. Example: with webdriver.file_detector_context(UselessFileDetector): someinput.send_keys('/etc/hosts') :Args: - file_detector_class - Class of the desired file detector. If the class is different from the current file_detector, then the class is instantiated with args and kwargs and used as a file detector during the duration of the context manager. - args - Optional arguments that get passed to the file detector class during instantiation. - kwargs - Keyword arguments, passed the same way as args. """ last_detector = None if not isinstance(self.file_detector, file_detector_class): last_detector = self.file_detector self.file_detector = file_detector_class(*args, **kwargs) try: yield finally: if last_detector is not None: self.file_detector = last_detector @property def mobile(self): return self._mobile @property def name(self): """Returns the name of the underlying browser for this instance. :Usage: :: name = driver.name """ if 'browserName' in self.caps: return self.caps['browserName'] else: raise KeyError('browserName not specified in session capabilities') def start_client(self): """ Called before starting a new session. This method may be overridden to define custom startup behavior. """ pass def stop_client(self): """ Called after executing a quit command. This method may be overridden to define custom shutdown behavior. """ pass def start_session(self, capabilities, browser_profile=None): """ Creates a new session with the desired capabilities. :Args: - browser_name - The name of the browser to request. - version - Which browser version to request. - platform - Which platform to request the browser on. - javascript_enabled - Whether the new session should support JavaScript. - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested. """ if not isinstance(capabilities, dict): raise InvalidArgumentException("Capabilities must be a dictionary") if browser_profile: if "moz:firefoxOptions" in capabilities: capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded else: capabilities.update({'firefox_profile': browser_profile.encoded}) w3c_caps = _make_w3c_caps(capabilities) parameters = {"capabilities": w3c_caps, "desiredCapabilities": capabilities} response = self.execute(Command.NEW_SESSION, parameters) if 'sessionId' not in response: response = response['value'] self.session_id = response['sessionId'] self.caps = response.get('value') # if capabilities is none we are probably speaking to # a W3C endpoint if self.caps is None: self.caps = response.get('capabilities') # Double check to see if we have a W3C Compliant browser self.w3c = response.get('status') is None self.command_executor.w3c = self.w3c def _wrap_value(self, value): if isinstance(value, dict): converted = {} for key, val in value.items(): converted[key] = self._wrap_value(val) return converted elif isinstance(value, self._web_element_cls): return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id} elif isinstance(value, list): return list(self._wrap_value(item) for item in value) else: return value def create_web_element(self, element_id): """Creates a web element with the specified `element_id`.""" return self._web_element_cls(self, element_id, w3c=self.w3c) def _unwrap_value(self, value): if isinstance(value, dict): if 'ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value: wrapped_id = value.get('ELEMENT', None) if wrapped_id: return self.create_web_element(value['ELEMENT']) else: return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf']) else: for key, val in value.items(): value[key] = self._unwrap_value(val) return value elif isinstance(value, list): return list(self._unwrap_value(item) for item in value) else: return value def execute(self, driver_command, params=None): """ Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object. """ if self.session_id is not None: if not params: params = {'sessionId': self.session_id} elif 'sessionId' not in params: params['sessionId'] = self.session_id params = self._wrap_value(params) response = self.command_executor.execute(driver_command, params) if response: self.error_handler.check_response(response) response['value'] = self._unwrap_value( response.get('value', None)) return response # If the server doesn't send a response, assume the command was # a success return {'success': 0, 'value': None, 'sessionId': self.session_id} def get(self, url): """ Loads a web page in the current browser session. """ self.execute(Command.GET, {'url': url}) @property def title(self): """Returns the title of the current page. :Usage: :: title = driver.title """ resp = self.execute(Command.GET_TITLE) return resp['value'] if resp['value'] is not None else "" def find_element_by_id(self, id_): """Finds an element by id. :Args: - id\\_ - The id of the element to be found. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_id('foo') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.ID, value=id_) def find_elements_by_id(self, id_): """ Finds multiple elements by id. :Args: - id\\_ - The id of the elements to be found. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_id('foo') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.ID, value=id_) def find_element_by_xpath(self, xpath): """ Finds an element by xpath. :Args: - xpath - The xpath locator of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_xpath('//div/td[1]') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.XPATH, value=xpath) def find_elements_by_xpath(self, xpath): """ Finds multiple elements by xpath. :Args: - xpath - The xpath locator of the elements to be found. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_xpath("//div[contains(@class, 'foo')]") """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.XPATH, value=xpath) def find_element_by_link_text(self, link_text): """ Finds an element by link text. :Args: - link_text: The text of the element to be found. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_link_text('Sign In') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.LINK_TEXT, value=link_text) def find_elements_by_link_text(self, text): """ Finds elements by link text. :Args: - link_text: The text of the elements to be found. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_link_text('Sign In') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.LINK_TEXT, value=text) def find_element_by_partial_link_text(self, link_text): """ Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_elements_by_partial_link_text(self, link_text): """ Finds elements by a partial match of their link text. :Args: - link_text: The text of the element to partial match on. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_partial_link_text('Sign') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_element_by_name(self, name): """ Finds an element by name. :Args: - name: The name of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_name('foo') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.NAME, value=name) def find_elements_by_name(self, name): """ Finds elements by name. :Args: - name: The name of the elements to find. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_name('foo') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.NAME, value=name) def find_element_by_tag_name(self, name): """ Finds an element by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_tag_name('h1') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.TAG_NAME, value=name) def find_elements_by_tag_name(self, name): """ Finds elements by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_tag_name('h1') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.TAG_NAME, value=name) def find_element_by_class_name(self, name): """ Finds an element by class name. :Args: - name: The class name of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_class_name('foo') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.CLASS_NAME, value=name) def find_elements_by_class_name(self, name): """ Finds elements by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_class_name('foo') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.CLASS_NAME, value=name) def find_element_by_css_selector(self, css_selector): """ Finds an element by css selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_css_selector('#foo') """ warnings.warn("find_element_by_* commands are deprecated. Please use find_element() instead") return self.find_element(by=By.CSS_SELECTOR, value=css_selector) def find_elements_by_css_selector(self, css_selector): """ Finds elements by css selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_css_selector('.foo') """ warnings.warn("find_elements_by_* commands are deprecated. Please use find_elements() instead") return self.find_elements(by=By.CSS_SELECTOR, value=css_selector) def pin_script(self, script): """ """ script_key = ScriptKey() self.pinned_scripts[script_key.id] = script return script_key def unpin(self, script_key): """ """ self.pinned_scripts.pop(script_key.id) def get_pinned_scripts(self): """ """ return list(self.pinned_scripts.keys()) def execute_script(self, script, *args): """ Synchronously Executes JavaScript in the current window/frame. :Args: - script: The JavaScript to execute. - \\*args: Any applicable arguments for your JavaScript. :Usage: :: driver.execute_script('return document.title;') """ if isinstance(script, ScriptKey): try: script = self.pinned_scripts[script.id] except KeyError: raise JavascriptException("Pinned script could not be found") converted_args = list(args) command = None if self.w3c: command = Command.W3C_EXECUTE_SCRIPT else: command = Command.EXECUTE_SCRIPT return self.execute(command, { 'script': script, 'args': converted_args})['value'] def execute_async_script(self, script, *args): """ Asynchronously Executes JavaScript in the current window/frame. :Args: - script: The JavaScript to execute. - \\*args: Any applicable arguments for your JavaScript. :Usage: :: script = "var callback = arguments[arguments.length - 1]; " \\ "window.setTimeout(function(){ callback('timeout') }, 3000);" driver.execute_async_script(script) """ converted_args = list(args) if self.w3c: command = Command.W3C_EXECUTE_SCRIPT_ASYNC else: command = Command.EXECUTE_ASYNC_SCRIPT return self.execute(command, { 'script': script, 'args': converted_args})['value'] @property def current_url(self): """ Gets the URL of the current page. :Usage: :: driver.current_url """ return self.execute(Command.GET_CURRENT_URL)['value'] @property def page_source(self): """ Gets the source of the current page. :Usage: :: driver.page_source """ return self.execute(Command.GET_PAGE_SOURCE)['value'] def close(self): """ Closes the current window. :Usage: :: driver.close() """ self.execute(Command.CLOSE) def quit(self): """ Quits the driver and closes every associated window. :Usage: :: driver.quit() """ try: self.execute(Command.QUIT) finally: self.stop_client() self.command_executor.close() @property def current_window_handle(self): """ Returns the handle of the current window. :Usage: :: driver.current_window_handle """ if self.w3c: return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value'] else: return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value'] @property def window_handles(self): """ Returns the handles of all windows within the current session. :Usage: :: driver.window_handles """ if self.w3c: return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value'] else: return self.execute(Command.GET_WINDOW_HANDLES)['value'] def maximize_window(self): """ Maximizes the current window that webdriver is using """ params = None command = Command.W3C_MAXIMIZE_WINDOW if not self.w3c: command = Command.MAXIMIZE_WINDOW params = {'windowHandle': 'current'} self.execute(command, params) def fullscreen_window(self): """ Invokes the window manager-specific 'full screen' operation """ self.execute(Command.FULLSCREEN_WINDOW) def minimize_window(self): """ Invokes the window manager-specific 'minimize' operation """ self.execute(Command.MINIMIZE_WINDOW) @property def switch_to(self): """ :Returns: - SwitchTo: an object containing all options to switch focus into :Usage: :: element = driver.switch_to.active_element alert = driver.switch_to.alert driver.switch_to.default_content() driver.switch_to.frame('frame_name') driver.switch_to.frame(1) driver.switch_to.frame(driver.find_elements_by_tag_name("iframe")[0]) driver.switch_to.parent_frame() driver.switch_to.window('main') """ return self._switch_to # Navigation def back(self): """ Goes one step backward in the browser history. :Usage: :: driver.back() """ self.execute(Command.GO_BACK) def forward(self): """ Goes one step forward in the browser history. :Usage: :: driver.forward() """ self.execute(Command.GO_FORWARD) def refresh(self): """ Refreshes the current page. :Usage: :: driver.refresh() """ self.execute(Command.REFRESH) # Options def get_cookies(self): """ Returns a set of dictionaries, corresponding to cookies visible in the current session. :Usage: :: driver.get_cookies() """ return self.execute(Command.GET_ALL_COOKIES)['value'] def get_cookie(self, name): """ Get a single cookie by name. Returns the cookie if found, None if not. :Usage: :: driver.get_cookie('my_cookie') """ if self.w3c: try: return self.execute(Command.GET_COOKIE, {'name': name})['value'] except NoSuchCookieException: return None else: cookies = self.get_cookies() for cookie in cookies: if cookie['name'] == name: return cookie return None def delete_cookie(self, name): """ Deletes a single cookie with the given name. :Usage: :: driver.delete_cookie('my_cookie') """ self.execute(Command.DELETE_COOKIE, {'name': name}) def delete_all_cookies(self): """ Delete all cookies in the scope of the session. :Usage: :: driver.delete_all_cookies() """ self.execute(Command.DELETE_ALL_COOKIES) def add_cookie(self, cookie_dict): """ Adds a cookie to your current session. :Args: - cookie_dict: A dictionary object, with required keys - "name" and "value"; optional keys - "path", "domain", "secure", "expiry", "sameSite" Usage: driver.add_cookie({'name' : 'foo', 'value' : 'bar'}) driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'}) driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True}) driver.add_cookie({'name': 'foo', 'value': 'bar', 'sameSite': 'Strict'}) """ if 'sameSite' in cookie_dict: assert cookie_dict['sameSite'] in ['Strict', 'Lax'] self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict}) else: self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict}) # Timeouts def implicitly_wait(self, time_to_wait): """ Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete. This method only needs to be called one time per session. To set the timeout for calls to execute_async_script, see set_script_timeout. :Args: - time_to_wait: Amount of time to wait (in seconds) :Usage: :: driver.implicitly_wait(30) """ if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'implicit': int(float(time_to_wait) * 1000)}) else: self.execute(Command.IMPLICIT_WAIT, { 'ms': float(time_to_wait) * 1000}) def set_script_timeout(self, time_to_wait): """ Set the amount of time that the script should wait during an execute_async_script call before throwing an error. :Args: - time_to_wait: The amount of time to wait (in seconds) :Usage: :: driver.set_script_timeout(30) """ if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'script': int(float(time_to_wait) * 1000)}) else: self.execute(Command.SET_SCRIPT_TIMEOUT, { 'ms': float(time_to_wait) * 1000}) def set_page_load_timeout(self, time_to_wait): """ Set the amount of time to wait for a page load to complete before throwing an error. :Args: - time_to_wait: The amount of time to wait :Usage: :: driver.set_page_load_timeout(30) """ try: self.execute(Command.SET_TIMEOUTS, { 'pageLoad': int(float(time_to_wait) * 1000)}) except WebDriverException: self.execute(Command.SET_TIMEOUTS, { 'ms': float(time_to_wait) * 1000, 'type': 'page load'}) @property def timeouts(self): """ Get all the timeouts that have been set on the current session :Usage: :: driver.timeouts :rtype: Timeout """ timeouts = self.execute(Command.GET_TIMEOUTS)['value'] timeouts["implicit_wait"] = timeouts.pop("implicit") / 1000 timeouts["page_load"] = timeouts.pop("pageLoad") / 1000 timeouts["script"] = timeouts.pop("script") / 1000 return Timeouts(**timeouts) @timeouts.setter def timeouts(self, timeouts): """ Set all timeouts for the session. This will override any previously set timeouts. :Usage: :: my_timeouts = Timeouts() my_timeouts.implicit_wait = 10 driver.timeouts = my_timeouts """ self.execute(Command.SET_TIMEOUTS, timeouts._to_json())['value'] def find_element(self, by=By.ID, value=None): """ Find an element given a By strategy and locator. :Usage: :: element = driver.find_element(By.ID, 'foo') :rtype: WebElement """ if self.w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value return self.execute(Command.FIND_ELEMENT, { 'using': by, 'value': value})['value'] def find_elements(self, by=By.ID, value=None): """ Find elements given a By strategy and locator. :Usage: :: elements = driver.find_elements(By.CLASS_NAME, 'foo') :rtype: list of WebElement """ if isinstance(by, RelativeBy): _pkg = '.'.join(__name__.split('.')[:-1]) raw_function = pkgutil.get_data(_pkg, 'findElements.js').decode('utf8') find_element_js = "return ({}).apply(null, arguments);".format(raw_function) return self.execute_script(find_element_js, by.to_dict()) if self.w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value # Return empty list if driver returns null # See https://github.com/SeleniumHQ/selenium/issues/4555 return self.execute(Command.FIND_ELEMENTS, { 'using': by, 'value': value})['value'] or [] @property def desired_capabilities(self): """ returns the drivers current desired capabilities being used """ warnings.warn("desired_capabilities is deprecated. Please call capabilities.", DeprecationWarning, stacklevel=2) return self.caps @property def capabilities(self): """ returns the drivers current capabilities being used. """ return self.caps def get_screenshot_as_file(self, filename): """ Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: :: driver.get_screenshot_as_file('/Screenshots/foo.png') """ if not filename.lower().endswith('.png'): warnings.warn("name used for saved screenshot does not match file " "type. It should end with a `.png` extension", UserWarning) png = self.get_screenshot_as_png() try: with open(filename, 'wb') as f: f.write(png) except IOError: return False finally: del png return True def save_screenshot(self, filename): """ Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: :: driver.save_screenshot('/Screenshots/foo.png') """ return self.get_screenshot_as_file(filename) def get_screenshot_as_png(self): """ Gets the screenshot of the current window as a binary data. :Usage: :: driver.get_screenshot_as_png() """ return base64.b64decode(self.get_screenshot_as_base64().encode('ascii')) def get_screenshot_as_base64(self): """ Gets the screenshot of the current window as a base64 encoded string which is useful in embedded images in HTML. :Usage: :: driver.get_screenshot_as_base64() """ return self.execute(Command.SCREENSHOT)['value'] def set_window_size(self, width, height, windowHandle='current'): """ Sets the width and height of the current window. (window.resizeTo) :Args: - width: the width in pixels to set the window to - height: the height in pixels to set the window to :Usage: :: driver.set_window_size(800,600) """ if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") self.set_window_rect(width=int(width), height=int(height)) else: self.execute(Command.SET_WINDOW_SIZE, { 'width': int(width), 'height': int(height), 'windowHandle': windowHandle}) def get_window_size(self, windowHandle='current'): """ Gets the width and height of the current window. :Usage: :: driver.get_window_size() """ command = Command.GET_WINDOW_SIZE if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") size = self.get_window_rect() else: size = self.execute(command, {'windowHandle': windowHandle}) if size.get('value', None) is not None: size = size['value'] return {k: size[k] for k in ('width', 'height')} def set_window_position(self, x, y, windowHandle='current'): """ Sets the x,y position of the current window. (window.moveTo) :Args: - x: the x-coordinate in pixels to set the window position - y: the y-coordinate in pixels to set the window position :Usage: :: driver.set_window_position(0,0) """ if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") return self.set_window_rect(x=int(x), y=int(y)) else: self.execute(Command.SET_WINDOW_POSITION, { 'x': int(x), 'y': int(y), 'windowHandle': windowHandle }) def get_window_position(self, windowHandle='current'): """ Gets the x,y position of the current window. :Usage: :: driver.get_window_position() """ if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") position = self.get_window_rect() else: position = self.execute(Command.GET_WINDOW_POSITION, {'windowHandle': windowHandle})['value'] return {k: position[k] for k in ('x', 'y')} def get_window_rect(self): """ Gets the x, y coordinates of the window as well as height and width of the current window. :Usage: :: driver.get_window_rect() """ return self.execute(Command.GET_WINDOW_RECT)['value'] def set_window_rect(self, x=None, y=None, width=None, height=None): """ Sets the x, y coordinates of the window as well as height and width of the current window. This method is only supported for W3C compatible browsers; other browsers should use `set_window_position` and `set_window_size`. :Usage: :: driver.set_window_rect(x=10, y=10) driver.set_window_rect(width=100, height=200) driver.set_window_rect(x=10, y=10, width=100, height=200) """ if not self.w3c: raise UnknownMethodException("set_window_rect is only supported for W3C compatible browsers") if (x is None and y is None) and (height is None and width is None): raise InvalidArgumentException("x and y or height and width need values") return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y, "width": width, "height": height})['value'] @property def file_detector(self): return self._file_detector @file_detector.setter def file_detector(self, detector): """ Set the file detector to be used when sending keyboard input. By default, this is set to a file detector that does nothing. see FileDetector see LocalFileDetector see UselessFileDetector :Args: - detector: The detector to use. Must not be None. """ if detector is None: raise WebDriverException("You may not set a file detector that is null") if not isinstance(detector, FileDetector): raise WebDriverException("Detector has to be instance of FileDetector") self._file_detector = detector @property def orientation(self): """ Gets the current orientation of the device :Usage: :: orientation = driver.orientation """ return self.execute(Command.GET_SCREEN_ORIENTATION)['value'] @orientation.setter def orientation(self, value): """ Sets the current orientation of the device :Args: - value: orientation to set it to. :Usage: :: driver.orientation = 'landscape' """ allowed_values = ['LANDSCAPE', 'PORTRAIT'] if value.upper() in allowed_values: self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value}) else: raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'") @property def application_cache(self): """ Returns a ApplicationCache Object to interact with the browser app cache""" return ApplicationCache(self) @property def log_types(self): """ Gets a list of the available log types. This only works with w3c compliant browsers. :Usage: :: driver.log_types """ return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value'] if self.w3c else [] def get_log(self, log_type): """ Gets the log for a given log type :Args: - log_type: type of log that which will be returned :Usage: :: driver.get_log('browser') driver.get_log('driver') driver.get_log('client') driver.get_log('server') """ return self.execute(Command.GET_LOG, {'type': log_type})['value'] @asynccontextmanager async def add_js_error_listener(self): """ Listens for JS errors and when the contextmanager exits check if there were JS Errors :Usage: :: async with driver.add_js_error_listener() as error: driver.find_element(By.ID, "throwing-mouseover").click() assert error is not None assert error.exception_details.stack_trace.call_frames[0].function_name == "onmouseover" """ assert sys.version_info >= (3, 7) global cdp async with self._get_bidi_connection(): global devtools session = cdp.get_session_context('page.enable') await session.execute(devtools.page.enable()) session = cdp.get_session_context('runtime.enable') await session.execute(devtools.runtime.enable()) js_exception = devtools.runtime.ExceptionThrown(None, None) async with session.wait_for(devtools.runtime.ExceptionThrown) as exception: yield js_exception js_exception.timestamp = exception.value.timestamp js_exception.exception_details = exception.value.exception_details @asynccontextmanager async def add_listener(self, event_type): ''' Listens for certain events that are passed in. :Args: - event_type: The type of event that we want to look at. :Usage: :: async with driver.add_listener(Console.log) as messages: driver.execute_script("console.log('I like cheese')") assert messages["message"] == "I love cheese" ''' assert sys.version_info >= (3, 7) global cdp from selenium.webdriver.common.bidi.console import Console async with self._get_bidi_connection(): global devtools session = cdp.get_session_context('page.enable') await session.execute(devtools.page.enable()) session = cdp.get_session_context('runtime.enable') await session.execute(devtools.runtime.enable()) console = { "message": None, "level": None } async with session.wait_for(devtools.runtime.ConsoleAPICalled) as messages: yield console if event_type == Console.ERROR: console["message"] = messages.value.args[0].value console["level"] = messages.value.args[0].type_ if event_type == Console.ALL: console["message"] = messages.value.args[0].value console["level"] = messages.value.args[0].type_ @asynccontextmanager async def _get_bidi_connection(self): global cdp import_cdp() ws_url = None if self.caps.get("se:options"): ws_url = self.caps.get("se:options").get("cdp") else: version, ws_url = self._get_cdp_details() if ws_url is None: raise WebDriverException("Unable to find url to connect to from capabilities") cdp.import_devtools(version) global devtools devtools = importlib.import_module("selenium.webdriver.common.devtools.v{}".format(version)) async with cdp.open_cdp(ws_url) as conn: targets = await conn.execute(devtools.target.get_targets()) target_id = targets[0].target_id async with conn.open_session(target_id) as session: yield session def _get_cdp_details(self): import json import urllib3 http = urllib3.PoolManager() if self.caps.get("browserName") == "chrome": debugger_address = self.caps.get(f"{self.vendor_prefix}:{self.caps.get('browserName')}Options").get("debuggerAddress") else: debugger_address = self.caps.get("moz:debuggerAddress") res = http.request('GET', f"http://{debugger_address}/json/version") data = json.loads(res.data) browser_version = data.get("Browser") websocket_url = data.get("webSocketDebuggerUrl") import re version = re.search(r".*/(\d+)\.", browser_version).group(1) return version, websocket_url
1
18,340
Let's not use a form of hungarian notation in naming our variables
SeleniumHQ-selenium
rb
@@ -527,8 +527,13 @@ static void handle_incoming_request(struct st_h2o_http1_conn_t *conn) send_bad_request(conn, "line folding of header fields is not supported"); return; } + + H2O_PROBE_CONN(RECEIVE_REQUEST_HEADERS, &conn->super, conn->_req_index, &conn->req.input.method, &conn->req.input.authority, + &conn->req.input.path, conn->req.version, conn->req.headers.entries, conn->req.headers.size); + H2O_PROBE(RECEIVE_REQUEST_HEADERS, &conn->super, conn->_req_index, &conn->req.input.method, &conn->req.input.authority, &conn->req.input.path, conn->req.version, conn->req.headers.entries, conn->req.headers.size); + if (entity_body_header_index != -1) { conn->req.timestamps.request_body_begin_at = h2o_gettimeofday(conn->super.ctx->loop); if (expect.base != NULL) {
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Shota Fukumori, * Fastly, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <inttypes.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include "picohttpparser.h" #include "h2o.h" #include "h2o/http1.h" #include "h2o/http2.h" #include "h2o/probes.h" #define MAX_PULL_BUF_SZ 65536 struct st_h2o_http1_finalostream_t { h2o_ostream_t super; int sent_headers; char *chunked_buf; /* buffer used for chunked-encoding (NULL unless chunked encoding is used) */ struct { void *buf; h2o_ostream_pull_cb cb; } pull; struct { h2o_iovec_vector_t bufs; unsigned sending : 1; struct { h2o_iovec_t *inbufs; size_t inbufcnt; h2o_send_state_t send_state; } pending_final; } informational; }; struct st_h2o_http1_conn_t { h2o_conn_t super; h2o_socket_t *sock; /* internal structure */ h2o_linklist_t _conns; h2o_timer_t _timeout_entry; uint64_t _req_index; size_t _prevreqlen; size_t _unconsumed_request_size; struct st_h2o_http1_req_entity_reader *_req_entity_reader; struct st_h2o_http1_finalostream_t _ostr_final; struct { void *data; h2o_http1_upgrade_cb cb; } upgrade; /* the HTTP request / response (intentionally placed at the last, since it is a large structure and has it's own ctor) */ h2o_req_t req; }; struct st_h2o_http1_req_entity_reader { void (*handle_incoming_entity)(struct st_h2o_http1_conn_t *conn); }; struct st_h2o_http1_content_length_entity_reader { struct st_h2o_http1_req_entity_reader super; size_t content_length; }; struct st_h2o_http1_chunked_entity_reader { struct st_h2o_http1_req_entity_reader super; struct phr_chunked_decoder decoder; }; static void proceed_pull(struct st_h2o_http1_conn_t *conn, size_t nfilled); static void finalostream_start_pull(h2o_ostream_t *_self, h2o_ostream_pull_cb cb); static void finalostream_send(h2o_ostream_t *_self, h2o_req_t *req, h2o_iovec_t *inbufs, size_t inbufcnt, h2o_send_state_t state); static void finalostream_send_informational(h2o_ostream_t *_self, h2o_req_t *req); static void reqread_on_read(h2o_socket_t *sock, const char *err); static void reqread_on_timeout(h2o_timer_t *entry); static void reqread_start(struct st_h2o_http1_conn_t *conn); static int foreach_request(h2o_context_t *ctx, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata); const h2o_protocol_callbacks_t H2O_HTTP1_CALLBACKS = { NULL, /* graceful_shutdown (note: nothing special needs to be done for handling graceful shutdown) */ foreach_request}; static int is_msie(h2o_req_t *req) { ssize_t cursor = h2o_find_header(&req->headers, H2O_TOKEN_USER_AGENT, -1); if (cursor == -1) return 0; if (h2o_strstr(req->headers.entries[cursor].value.base, req->headers.entries[cursor].value.len, H2O_STRLIT("; MSIE ")) == SIZE_MAX) return 0; return 1; } static void init_request(struct st_h2o_http1_conn_t *conn) { if (conn->_req_index != 0) h2o_dispose_request(&conn->req); h2o_init_request(&conn->req, &conn->super, NULL); ++conn->_req_index; conn->req._ostr_top = &conn->_ostr_final.super; conn->_ostr_final = (struct st_h2o_http1_finalostream_t){{ NULL, /* next */ finalostream_send, /* do_send */ NULL, /* stop */ finalostream_start_pull, /* start_pull */ conn->super.ctx->globalconf->send_informational_mode == H2O_SEND_INFORMATIONAL_MODE_ALL ? finalostream_send_informational : NULL, /* send_informational */ }}; } static void close_connection(struct st_h2o_http1_conn_t *conn, int close_socket) { h2o_timer_unlink(&conn->_timeout_entry); h2o_dispose_request(&conn->req); if (conn->sock != NULL && close_socket) h2o_socket_close(conn->sock); h2o_linklist_unlink(&conn->_conns); free(conn); } /** * timer is activated if cb != NULL, disactivated otherwise */ static void set_timeout(struct st_h2o_http1_conn_t *conn, uint64_t timeout, h2o_timer_cb cb) { if (conn->_timeout_entry.cb != NULL) h2o_timer_unlink(&conn->_timeout_entry); conn->_timeout_entry.cb = cb; if (cb != NULL) h2o_timer_link(conn->super.ctx->loop, timeout, &conn->_timeout_entry); } static void process_request(struct st_h2o_http1_conn_t *conn) { if (conn->sock->ssl == NULL && conn->req.upgrade.base != NULL && conn->super.ctx->globalconf->http1.upgrade_to_http2 && conn->req.upgrade.len >= 3 && h2o_lcstris(conn->req.upgrade.base, 3, H2O_STRLIT("h2c")) && (conn->req.upgrade.len == 3 || (conn->req.upgrade.len == 6 && (memcmp(conn->req.upgrade.base + 3, H2O_STRLIT("-14")) == 0 || memcmp(conn->req.upgrade.base + 3, H2O_STRLIT("-16")) == 0)))) { if (h2o_http2_handle_upgrade(&conn->req, conn->super.connected_at) == 0) { return; } } h2o_process_request(&conn->req); } #define DECL_ENTITY_READ_SEND_ERROR_XXX(status_) \ static void entity_read_send_error_##status_(struct st_h2o_http1_conn_t *conn, const char *reason, const char *body) \ { \ if (conn->_ostr_final.sent_headers) \ return; \ conn->_req_entity_reader = NULL; \ set_timeout(conn, 0, NULL); \ h2o_socket_read_stop(conn->sock); \ conn->super.ctx->emitted_error_status[H2O_STATUS_ERROR_##status_]++; \ h2o_send_error_generic(&conn->req, status_, reason, body, H2O_SEND_ERROR_HTTP1_CLOSE_CONNECTION); \ } DECL_ENTITY_READ_SEND_ERROR_XXX(400) DECL_ENTITY_READ_SEND_ERROR_XXX(413) DECL_ENTITY_READ_SEND_ERROR_XXX(502) static void handle_one_body_fragment(struct st_h2o_http1_conn_t *conn, size_t fragment_size, int complete) { set_timeout(conn, 0, NULL); h2o_socket_read_stop(conn->sock); if (conn->req.write_req.cb(conn->req.write_req.ctx, h2o_iovec_init(conn->sock->input->bytes, fragment_size), complete) != 0) { entity_read_send_error_502(conn, "Bad Gateway", "Bad Gateway"); return; } h2o_buffer_consume(&conn->sock->input, fragment_size); conn->req._req_body.bytes_received += fragment_size; if (complete) { conn->req.proceed_req = NULL; conn->_req_entity_reader = NULL; } } static void handle_chunked_entity_read(struct st_h2o_http1_conn_t *conn) { struct st_h2o_http1_chunked_entity_reader *reader = (void *)conn->_req_entity_reader; size_t bufsz, consume; ssize_t ret; int complete = 1; /* decode the incoming data */ if ((consume = bufsz = conn->sock->input->size) == 0) return; ret = phr_decode_chunked(&reader->decoder, conn->sock->input->bytes, &bufsz); if (ret != -1 && bufsz + conn->req._req_body.bytes_received >= conn->super.ctx->globalconf->max_request_entity_size) { entity_read_send_error_413(conn, "Request Entity Too Large", "request entity is too large"); return; } if (ret < 0) { if (ret == -2) { /* incomplete */ complete = 0; goto Done; } /* error */ entity_read_send_error_400(conn, "Invalid Request", "broken chunked-encoding"); return; } /* complete */ consume -= ret; Done: handle_one_body_fragment(conn, bufsz, complete); h2o_buffer_consume(&conn->sock->input, consume - bufsz); } static int create_chunked_entity_reader(struct st_h2o_http1_conn_t *conn) { struct st_h2o_http1_chunked_entity_reader *reader = h2o_mem_alloc_pool(&conn->req.pool, *reader, 1); conn->_req_entity_reader = &reader->super; reader->super.handle_incoming_entity = handle_chunked_entity_read; memset(&reader->decoder, 0, sizeof(reader->decoder)); reader->decoder.consume_trailer = 1; return 0; } static void handle_content_length_entity_read(struct st_h2o_http1_conn_t *conn) { int complete = 0; struct st_h2o_http1_content_length_entity_reader *reader = (void *)conn->_req_entity_reader; size_t length = conn->sock->input->size; if (conn->req._req_body.bytes_received + conn->sock->input->size >= reader->content_length) { complete = 1; length = reader->content_length - conn->req._req_body.bytes_received; } if (!complete && length == 0) return; handle_one_body_fragment(conn, length, complete); } static int create_content_length_entity_reader(struct st_h2o_http1_conn_t *conn, size_t content_length) { struct st_h2o_http1_content_length_entity_reader *reader = h2o_mem_alloc_pool(&conn->req.pool, *reader, 1); conn->_req_entity_reader = &reader->super; reader->super.handle_incoming_entity = handle_content_length_entity_read; reader->content_length = content_length; return 0; } static int create_entity_reader(struct st_h2o_http1_conn_t *conn, const struct phr_header *entity_header) { /* strlen("content-length") is unequal to sizeof("transfer-encoding"), and thus checking the length only is sufficient */ if (entity_header->name_len == sizeof("transfer-encoding") - 1) { /* transfer-encoding */ if (!h2o_lcstris(entity_header->value, entity_header->value_len, H2O_STRLIT("chunked"))) { entity_read_send_error_400(conn, "Invalid Request", "unknown transfer-encoding"); return -1; } return create_chunked_entity_reader(conn); } else { /* content-length */ size_t content_length = h2o_strtosize(entity_header->value, entity_header->value_len); if (content_length == SIZE_MAX) { entity_read_send_error_400(conn, "Invalid Request", "broken content-length header"); return -1; } if (content_length > conn->super.ctx->globalconf->max_request_entity_size) { entity_read_send_error_413(conn, "Request Entity Too Large", "request entity is too large"); return -1; } conn->req.content_length = content_length; return create_content_length_entity_reader(conn, (size_t)content_length); } /* failed */ return -1; } static int init_headers(h2o_mem_pool_t *pool, h2o_headers_t *headers, const struct phr_header *src, size_t len, h2o_iovec_t *connection, h2o_iovec_t *host, h2o_iovec_t *upgrade, h2o_iovec_t *expect, ssize_t *entity_header_index) { *entity_header_index = -1; assert(headers->size == 0); /* setup */ if (len != 0) { size_t i; h2o_vector_reserve(pool, headers, len); for (i = 0; i != len; ++i) { const h2o_token_t *name_token; char orig_case[src[i].name_len]; /* reject multiline header */ if (src[i].name_len == 0) return -1; /* preserve the original case */ memcpy(orig_case, src[i].name, src[i].name_len); /* convert to lower-case in-place */ h2o_strtolower((char *)src[i].name, src[i].name_len); if ((name_token = h2o_lookup_token(src[i].name, src[i].name_len)) != NULL) { if (name_token->flags.is_init_header_special) { if (name_token == H2O_TOKEN_HOST) { host->base = (char *)src[i].value; host->len = src[i].value_len; } else if (name_token == H2O_TOKEN_CONTENT_LENGTH) { if (*entity_header_index == -1) *entity_header_index = i; } else if (name_token == H2O_TOKEN_TRANSFER_ENCODING) { *entity_header_index = i; } else if (name_token == H2O_TOKEN_EXPECT) { expect->base = (char *)src[i].value; expect->len = src[i].value_len; } else if (name_token == H2O_TOKEN_UPGRADE) { upgrade->base = (char *)src[i].value; upgrade->len = src[i].value_len; } else { assert(!"logic flaw"); } } else { h2o_add_header(pool, headers, name_token, orig_case, src[i].value, src[i].value_len); if (name_token == H2O_TOKEN_CONNECTION) *connection = headers->entries[headers->size - 1].value; } } else { h2o_add_header_by_str(pool, headers, src[i].name, src[i].name_len, 0, orig_case, src[i].value, src[i].value_len); } } } return 0; } static int fixup_request(struct st_h2o_http1_conn_t *conn, struct phr_header *headers, size_t num_headers, int minor_version, h2o_iovec_t *expect, ssize_t *entity_header_index) { h2o_iovec_t connection = {NULL, 0}, host = {NULL, 0}, upgrade = {NULL, 0}; expect->base = NULL; expect->len = 0; conn->req.input.scheme = conn->sock->ssl != NULL ? &H2O_URL_SCHEME_HTTPS : &H2O_URL_SCHEME_HTTP; conn->req.version = 0x100 | (minor_version != 0); /* RFC 7231 6.2: a server MUST NOT send a 1xx response to an HTTP/1.0 client */ if (conn->req.version < 0x101) conn->_ostr_final.super.send_informational = NULL; /* init headers */ if (init_headers(&conn->req.pool, &conn->req.headers, headers, num_headers, &connection, &host, &upgrade, expect, entity_header_index) != 0) return -1; /* copy the values to pool, since the buffer pointed by the headers may get realloced */ if (*entity_header_index != -1) { size_t i; conn->req.input.method = h2o_strdup(&conn->req.pool, conn->req.input.method.base, conn->req.input.method.len); conn->req.input.path = h2o_strdup(&conn->req.pool, conn->req.input.path.base, conn->req.input.path.len); for (i = 0; i != conn->req.headers.size; ++i) { h2o_header_t *header = conn->req.headers.entries + i; if (!h2o_iovec_is_token(header->name)) { *header->name = h2o_strdup(&conn->req.pool, header->name->base, header->name->len); } header->value = h2o_strdup(&conn->req.pool, header->value.base, header->value.len); } if (host.base != NULL) host = h2o_strdup(&conn->req.pool, host.base, host.len); if (upgrade.base != NULL) upgrade = h2o_strdup(&conn->req.pool, upgrade.base, upgrade.len); } /* path might contain absolute URL; if so, convert it */ if (conn->req.input.path.len != 0 && conn->req.input.path.base[0] != '/') { h2o_url_t url; if (h2o_url_parse(conn->req.input.path.base, conn->req.input.path.len, &url) == 0) { conn->req.input.path = url.path; host = conn->req.authority; } } /* move host header to req->authority */ if (host.base != NULL) conn->req.input.authority = host; /* setup persistent flag (and upgrade info) */ if (connection.base != NULL) { /* TODO contains_token function can be faster */ if (h2o_contains_token(connection.base, connection.len, H2O_STRLIT("keep-alive"), ',')) { conn->req.http1_is_persistent = 1; } if (upgrade.base != NULL && h2o_contains_token(connection.base, connection.len, H2O_STRLIT("upgrade"), ',')) { conn->req.upgrade = upgrade; } } else if (conn->req.version >= 0x101) { /* defaults to keep-alive if >= HTTP/1.1 */ conn->req.http1_is_persistent = 1; } /* disable keep-alive if shutdown is requested */ if (conn->req.http1_is_persistent && conn->super.ctx->shutdown_requested) conn->req.http1_is_persistent = 0; return 0; } static void on_continue_sent(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; if (err != NULL) { close_connection(conn, 1); return; } h2o_socket_read_start(sock, reqread_on_read); conn->_req_entity_reader->handle_incoming_entity(conn); } static int contains_crlf_only(const char *s, size_t len) { for (; len != 0; ++s, --len) if (!(*s == '\r' || *s == '\n')) return 0; return 1; } static void send_bad_request(struct st_h2o_http1_conn_t *conn, const char *body) { h2o_socket_read_stop(conn->sock); h2o_send_error_400(&conn->req, "Bad Request", body, H2O_SEND_ERROR_HTTP1_CLOSE_CONNECTION); } static void proceed_request(h2o_req_t *req, size_t written, int is_end_entity) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, req, req); set_timeout(conn, conn->super.ctx->globalconf->http1.req_timeout, reqread_on_timeout); h2o_socket_read_start(conn->sock, reqread_on_read); return; } static int write_req_non_streaming(void *_req, h2o_iovec_t payload, int is_end_entity) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, req, _req); if (h2o_buffer_append(&conn->req._req_body.body, payload.base, payload.len) == 0) return -1; conn->req.entity = h2o_iovec_init(conn->req._req_body.body->bytes, conn->req._req_body.body->size); if (is_end_entity) { conn->req.proceed_req = NULL; h2o_process_request(&conn->req); } else { proceed_request(&conn->req, payload.len, is_end_entity); } return 0; } static int write_req_streaming_pre_dispatch(void *_req, h2o_iovec_t payload, int is_end_stream) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, req, _req); if (h2o_buffer_append(&conn->req._req_body.body, payload.base, payload.len) == 0) return -1; conn->req.entity = h2o_iovec_init(conn->req._req_body.body->bytes, conn->req._req_body.body->size); /* mark that we have seen eos */ if (is_end_stream) conn->req.proceed_req = NULL; return 0; } static void on_request_streaming_selected(h2o_req_t *req, int is_streaming) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, req, req); if (is_streaming) { conn->req.write_req.cb = write_req_streaming_pre_dispatch; conn->req.proceed_req = proceed_request; h2o_process_request(&conn->req); return; } conn->req.write_req.cb = write_req_non_streaming; return; } static void handle_incoming_request(struct st_h2o_http1_conn_t *conn) { size_t inreqlen = conn->sock->input->size < H2O_MAX_REQLEN ? conn->sock->input->size : H2O_MAX_REQLEN; int reqlen, minor_version; struct phr_header headers[H2O_MAX_HEADERS]; size_t num_headers = H2O_MAX_HEADERS; ssize_t entity_body_header_index; h2o_iovec_t expect; /* need to set request_begin_at here for keep-alive connection */ if (conn->req.timestamps.request_begin_at.tv_sec == 0) conn->req.timestamps.request_begin_at = h2o_gettimeofday(conn->super.ctx->loop); reqlen = phr_parse_request(conn->sock->input->bytes, inreqlen, (const char **)&conn->req.input.method.base, &conn->req.input.method.len, (const char **)&conn->req.input.path.base, &conn->req.input.path.len, &minor_version, headers, &num_headers, conn->_prevreqlen); conn->_prevreqlen = inreqlen; switch (reqlen) { default: // parse complete conn->_unconsumed_request_size = reqlen; if (fixup_request(conn, headers, num_headers, minor_version, &expect, &entity_body_header_index) != 0) { set_timeout(conn, 0, NULL); send_bad_request(conn, "line folding of header fields is not supported"); return; } H2O_PROBE(RECEIVE_REQUEST_HEADERS, &conn->super, conn->_req_index, &conn->req.input.method, &conn->req.input.authority, &conn->req.input.path, conn->req.version, conn->req.headers.entries, conn->req.headers.size); if (entity_body_header_index != -1) { conn->req.timestamps.request_body_begin_at = h2o_gettimeofday(conn->super.ctx->loop); if (expect.base != NULL) { if (!h2o_lcstris(expect.base, expect.len, H2O_STRLIT("100-continue"))) { set_timeout(conn, 0, NULL); h2o_socket_read_stop(conn->sock); h2o_send_error_417(&conn->req, "Expectation Failed", "unknown expectation", H2O_SEND_ERROR_HTTP1_CLOSE_CONNECTION); return; } } if (create_entity_reader(conn, headers + entity_body_header_index) != 0) { return; } conn->req.write_req.cb = h2o_write_req_first; conn->req.write_req.on_streaming_selected = on_request_streaming_selected; conn->req.write_req.ctx = &conn->req; conn->_unconsumed_request_size = 0; h2o_buffer_consume(&conn->sock->input, reqlen); h2o_buffer_init(&conn->req._req_body.body, &h2o_socket_buffer_prototype); if (expect.base != NULL) { static const h2o_iovec_t res = {H2O_STRLIT("HTTP/1.1 100 Continue\r\n\r\n")}; h2o_socket_write(conn->sock, (void *)&res, 1, on_continue_sent); /* processing of the incoming entity is postponed until the 100 response is sent */ h2o_socket_read_stop(conn->sock); return; } conn->_req_entity_reader->handle_incoming_entity(conn); } else { set_timeout(conn, 0, NULL); h2o_socket_read_stop(conn->sock); process_request(conn); } return; case -2: // incomplete if (inreqlen == H2O_MAX_REQLEN) { send_bad_request(conn, "Bad Request"); } return; case -1: // error /* upgrade to HTTP/2 if the request starts with: PRI * HTTP/2 */ if (conn->super.ctx->globalconf->http1.upgrade_to_http2) { /* should check up to the first octet that phr_parse_request returns an error */ static const h2o_iovec_t HTTP2_SIG = {H2O_STRLIT("PRI * HTTP/2")}; if (conn->sock->input->size >= HTTP2_SIG.len && memcmp(conn->sock->input->bytes, HTTP2_SIG.base, HTTP2_SIG.len) == 0) { h2o_accept_ctx_t accept_ctx = {conn->super.ctx, conn->super.hosts}; h2o_socket_t *sock = conn->sock; struct timeval connected_at = conn->super.connected_at; /* destruct the connection after detatching the socket */ conn->sock = NULL; close_connection(conn, 1); /* and accept as http2 connection */ h2o_http2_accept(&accept_ctx, sock, connected_at); return; } } if (inreqlen <= 4 && contains_crlf_only(conn->sock->input->bytes, inreqlen)) { close_connection(conn, 1); } else { send_bad_request(conn, "Bad Request"); } return; } } void reqread_on_read(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; if (err != NULL) { close_connection(conn, 1); return; } if (conn->_req_entity_reader == NULL) handle_incoming_request(conn); else conn->_req_entity_reader->handle_incoming_entity(conn); } static void reqread_on_timeout(h2o_timer_t *entry) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, _timeout_entry, entry); if (conn->_req_index == 1) { /* assign hostconf and bind conf so that the request can be logged */ h2o_hostconf_t *hostconf = h2o_req_setup(&conn->req); h2o_req_bind_conf(&conn->req, hostconf, &hostconf->fallback_path); /* set error status for logging */ conn->req.res.reason = "Request Timeout"; } conn->req.http1_is_persistent = 0; close_connection(conn, 1); } static inline void reqread_start(struct st_h2o_http1_conn_t *conn) { set_timeout(conn, conn->super.ctx->globalconf->http1.req_timeout, reqread_on_timeout); h2o_socket_read_start(conn->sock, reqread_on_read); if (conn->sock->input->size != 0) handle_incoming_request(conn); } static void on_send_next_push(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; if (err != NULL) close_connection(conn, 1); else h2o_proceed_response(&conn->req); } static void on_send_next_pull(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; if (err != NULL) close_connection(conn, 1); else proceed_pull(conn, 0); } static void cleanup_connection(struct st_h2o_http1_conn_t *conn) { if (!conn->req.http1_is_persistent) { /* TODO use lingering close */ close_connection(conn, 1); return; } assert(conn->req.proceed_req == NULL); assert(conn->_req_entity_reader == NULL); /* handle next request */ if (conn->_unconsumed_request_size) h2o_buffer_consume(&conn->sock->input, conn->_unconsumed_request_size); init_request(conn); conn->req._req_body.bytes_received = 0; conn->req.write_req.cb = NULL; conn->req.write_req.ctx = NULL; conn->req.proceed_req = NULL; conn->_prevreqlen = 0; conn->_unconsumed_request_size = 0; reqread_start(conn); } static void on_send_complete_post_trailers(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; if (err != NULL) conn->req.http1_is_persistent = 0; cleanup_connection(conn); } static void on_send_complete(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; assert(conn->req._ostr_top == &conn->_ostr_final.super); conn->req.timestamps.response_end_at = h2o_gettimeofday(conn->super.ctx->loop); if (err != NULL) conn->req.http1_is_persistent = 0; if (err == NULL && conn->req.send_server_timing && conn->_ostr_final.chunked_buf != NULL) { h2o_iovec_t trailer; if ((trailer = h2o_build_server_timing_trailer(&conn->req, H2O_STRLIT("server-timing: "), H2O_STRLIT("\r\n\r\n"))).len != 0) { h2o_socket_write(conn->sock, &trailer, 1, on_send_complete_post_trailers); return; } } cleanup_connection(conn); } static void on_upgrade_complete(h2o_socket_t *socket, const char *err) { struct st_h2o_http1_conn_t *conn = socket->data; h2o_http1_upgrade_cb cb = conn->upgrade.cb; void *data = conn->upgrade.data; h2o_socket_t *sock = NULL; size_t headers_size = 0; /* destruct the connection (after detaching the socket) */ if (err == 0) { sock = conn->sock; headers_size = conn->_unconsumed_request_size; close_connection(conn, 0); } else { close_connection(conn, 1); } cb(data, sock, headers_size); } static size_t flatten_headers_estimate_size(h2o_req_t *req, size_t server_name_and_connection_len) { size_t len = sizeof("HTTP/1.1 \r\nserver: \r\nconnection: \r\ncontent-length: \r\n\r\n") + 3 + strlen(req->res.reason) + server_name_and_connection_len + sizeof(H2O_UINT64_LONGEST_STR) - 1 + sizeof("cache-control: private") - 1; const h2o_header_t *header, *end; for (header = req->res.headers.entries, end = header + req->res.headers.size; header != end; ++header) len += header->name->len + header->value.len + 4; return len; } static size_t flatten_res_headers(char *buf, h2o_req_t *req, int replace_vary) { char *dst = buf; size_t i; for (i = 0; i != req->res.headers.size; ++i) { const h2o_header_t *header = req->res.headers.entries + i; if (header->name == &H2O_TOKEN_VARY->buf) { /* replace Vary with Cache-Control: private; see the following URLs to understand why this is necessary * - http://blogs.msdn.com/b/ieinternals/archive/2009/06/17/vary-header-prevents-caching-in-ie.aspx * - https://www.igvita.com/2013/05/01/deploying-webp-via-accept-content-negotiation/ */ if (replace_vary && is_msie(req)) { static h2o_header_t cache_control_private = {&H2O_TOKEN_CACHE_CONTROL->buf, NULL, {H2O_STRLIT("private")}}; header = &cache_control_private; } } memcpy(dst, header->orig_name ? header->orig_name : header->name->base, header->name->len); dst += header->name->len; *dst++ = ':'; *dst++ = ' '; memcpy(dst, header->value.base, header->value.len); dst += header->value.len; *dst++ = '\r'; *dst++ = '\n'; } return dst - buf; } static size_t flatten_headers(char *buf, h2o_req_t *req, const char *connection) { h2o_context_t *ctx = req->conn->ctx; char *dst = buf; assert(req->res.status <= 999); /* send essential headers with the first chars uppercased for max. interoperability (#72) */ if (req->res.content_length != SIZE_MAX) { dst += sprintf(dst, "HTTP/1.1 %d %s\r\nConnection: %s\r\nContent-Length: %zu\r\n", req->res.status, req->res.reason, connection, req->res.content_length); } else { dst += sprintf(dst, "HTTP/1.1 %d %s\r\nConnection: %s\r\n", req->res.status, req->res.reason, connection); } if (ctx->globalconf->server_name.len) { dst += sprintf(dst, "Server: %s\r\n", ctx->globalconf->server_name.base); } dst += flatten_res_headers(dst, req, 1); *dst++ = '\r'; *dst++ = '\n'; return dst - buf; } static int should_use_chunked_encoding(h2o_req_t *req) { if (req->version != 0x101) return 0; /* do nothing if content-length is known */ if (req->res.content_length != SIZE_MAX) return 0; /* RFC 2616 4.4 states that the following status codes (and response to a HEAD method) should not include message body */ if ((100 <= req->res.status && req->res.status <= 199) || req->res.status == 204 || req->res.status == 304) return 0; if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD"))) return 0; return 1; } static void setup_chunked(struct st_h2o_http1_finalostream_t *self, h2o_req_t *req) { if (should_use_chunked_encoding(req)) { h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, NULL, H2O_STRLIT("chunked")); self->chunked_buf = h2o_mem_alloc_pool_aligned(&req->pool, 1, sizeof(size_t) * 2 + sizeof("\r\n")); } } static void encode_chunked(h2o_iovec_t *prefix, h2o_iovec_t *suffix, h2o_send_state_t state, size_t chunk_size, int send_trailers, char *buffer) { *prefix = h2o_iovec_init(NULL, 0); *suffix = h2o_iovec_init(NULL, 0); /* create chunk header and output data */ if (chunk_size != 0) { prefix->base = buffer; prefix->len = sprintf(buffer, "%zx\r\n", chunk_size); if (state != H2O_SEND_STATE_ERROR) { suffix->base = "\r\n0\r\n\r\n"; suffix->len = state == H2O_SEND_STATE_FINAL ? (send_trailers ? 5 : 7) : 2; } } else if (state == H2O_SEND_STATE_FINAL) { suffix->base = "0\r\n\r\n"; suffix->len = send_trailers ? 3 : 5; } /* if state is error, send a broken chunk to pass the error down to the browser */ if (state == H2O_SEND_STATE_ERROR) { suffix->base = "\r\n1\r\n"; suffix->len = 5; } } static void proceed_pull(struct st_h2o_http1_conn_t *conn, size_t nfilled) { h2o_iovec_t bufs[4]; size_t bufcnt = 0; h2o_send_state_t send_state; h2o_iovec_t prefix = h2o_iovec_init(NULL, 0), suffix = h2o_iovec_init(NULL, 0); if (nfilled != 0) bufs[bufcnt++] = h2o_iovec_init(conn->_ostr_final.pull.buf, nfilled); if (nfilled < MAX_PULL_BUF_SZ) { h2o_iovec_t cbuf = h2o_iovec_init((char *)conn->_ostr_final.pull.buf + nfilled, MAX_PULL_BUF_SZ - nfilled); send_state = h2o_pull(&conn->req, conn->_ostr_final.pull.cb, &cbuf); conn->req.bytes_sent += cbuf.len; if (conn->_ostr_final.chunked_buf != NULL) { encode_chunked(&prefix, &suffix, send_state, cbuf.len, conn->req.send_server_timing != 0, conn->_ostr_final.chunked_buf); if (prefix.len != 0) bufs[bufcnt++] = prefix; bufs[bufcnt++] = cbuf; if (suffix.len != 0) bufs[bufcnt++] = suffix; } else if (nfilled != 0) { bufs[bufcnt - 1].len += cbuf.len; } else { bufs[bufcnt++] = cbuf; } if (send_state == H2O_SEND_STATE_ERROR) { conn->req.http1_is_persistent = 0; conn->req.send_server_timing = 0; /* suppress sending trailers */ } } else { send_state = H2O_SEND_STATE_IN_PROGRESS; } /* write */ h2o_socket_write(conn->sock, bufs, bufcnt, h2o_send_state_is_in_progress(send_state) ? on_send_next_pull : on_send_complete); } static void finalostream_start_pull(h2o_ostream_t *_self, h2o_ostream_pull_cb cb) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, _ostr_final.super, _self); const char *connection = conn->req.http1_is_persistent ? "keep-alive" : "close"; size_t bufsz, headers_len; assert(conn->req._ostr_top == &conn->_ostr_final.super); assert(!conn->_ostr_final.sent_headers); conn->req.timestamps.response_start_at = h2o_gettimeofday(conn->super.ctx->loop); setup_chunked(&conn->_ostr_final, &conn->req); if (conn->req.send_server_timing) h2o_add_server_timing_header(&conn->req, conn->_ostr_final.chunked_buf != NULL); /* register the pull callback */ conn->_ostr_final.pull.cb = cb; /* setup the buffer */ bufsz = flatten_headers_estimate_size(&conn->req, conn->super.ctx->globalconf->server_name.len + strlen(connection)); if (bufsz < MAX_PULL_BUF_SZ) { if (MAX_PULL_BUF_SZ - bufsz < conn->req.res.content_length) { bufsz = MAX_PULL_BUF_SZ; } else { bufsz += conn->req.res.content_length; } } conn->_ostr_final.pull.buf = h2o_mem_alloc_pool(&conn->req.pool, char, bufsz); /* fill-in the header */ headers_len = flatten_headers(conn->_ostr_final.pull.buf, &conn->req, connection); conn->_ostr_final.sent_headers = 1; proceed_pull(conn, headers_len); } static void on_delayed_send_complete(h2o_timer_t *entry) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, _timeout_entry, entry); on_send_complete(conn->sock, 0); } void finalostream_send(h2o_ostream_t *_self, h2o_req_t *req, h2o_iovec_t *inbufs, size_t inbufcnt, h2o_send_state_t send_state) { struct st_h2o_http1_finalostream_t *self = (void *)_self; struct st_h2o_http1_conn_t *conn = (struct st_h2o_http1_conn_t *)req->conn; h2o_iovec_t *bufs = alloca(sizeof(h2o_iovec_t) * (inbufcnt + 1 + 2)) /* 1 for header, 2 for chunked encoding */, chunked_suffix; int i; int bufcnt = 0; assert(self == &conn->_ostr_final); if (self->informational.sending) { self->informational.pending_final.inbufs = h2o_mem_alloc_pool(&req->pool, h2o_iovec_t, inbufcnt); memcpy(self->informational.pending_final.inbufs, inbufs, sizeof(h2o_iovec_t) * inbufcnt); self->informational.pending_final.inbufcnt = inbufcnt; self->informational.pending_final.send_state = send_state; return; } /* count bytes_sent if other ostreams haven't counted */ size_t bytes_to_be_sent = 0; for (i = 0; i != inbufcnt; ++i) { bytes_to_be_sent += inbufs[i].len; } req->bytes_sent += bytes_to_be_sent; if (send_state == H2O_SEND_STATE_ERROR) { conn->req.http1_is_persistent = 0; conn->req.send_server_timing = 0; if (req->upstream_refused) { /* to let the client retry, immediately close the connection without sending any data */ on_send_complete(conn->sock, NULL); return; } } if (!self->sent_headers) { conn->req.timestamps.response_start_at = h2o_gettimeofday(conn->super.ctx->loop); setup_chunked(self, req); if (conn->req.send_server_timing) h2o_add_server_timing_header(&conn->req, conn->_ostr_final.chunked_buf != NULL); /* build headers and send */ const char *connection = req->http1_is_persistent ? "keep-alive" : "close"; bufs[bufcnt].base = h2o_mem_alloc_pool( &req->pool, char, flatten_headers_estimate_size(req, conn->super.ctx->globalconf->server_name.len + strlen(connection))); bufs[bufcnt].len = flatten_headers(bufs[bufcnt].base, req, connection); ++bufcnt; self->sent_headers = 1; } if (self->chunked_buf != NULL) { encode_chunked(bufs + bufcnt, &chunked_suffix, send_state, bytes_to_be_sent, req->send_server_timing != 0, self->chunked_buf); if (bufs[bufcnt].len != 0) ++bufcnt; } h2o_memcpy(bufs + bufcnt, inbufs, sizeof(h2o_iovec_t) * inbufcnt); bufcnt += inbufcnt; if (self->chunked_buf != NULL && chunked_suffix.len != 0) bufs[bufcnt++] = chunked_suffix; if (bufcnt != 0) { h2o_socket_write(conn->sock, bufs, bufcnt, h2o_send_state_is_in_progress(send_state) ? on_send_next_push : on_send_complete); } else { set_timeout(conn, 0, on_delayed_send_complete); } } static void on_send_informational(h2o_socket_t *sock, const char *err); static void do_send_informational(struct st_h2o_http1_finalostream_t *self, h2o_socket_t *sock) { if (self->informational.sending || self->informational.bufs.size == 0) return; self->informational.sending = 1; h2o_socket_write(sock, self->informational.bufs.entries, self->informational.bufs.size, on_send_informational); self->informational.bufs.size = 0; } static void on_send_informational(h2o_socket_t *sock, const char *err) { struct st_h2o_http1_conn_t *conn = sock->data; struct st_h2o_http1_finalostream_t *self = (struct st_h2o_http1_finalostream_t *)conn->req._ostr_top; if (err != NULL) { close_connection(conn, 1); return; } self->informational.sending = 0; if (self->informational.pending_final.inbufs != NULL) { finalostream_send(&self->super, &conn->req, self->informational.pending_final.inbufs, self->informational.pending_final.inbufcnt, self->informational.pending_final.send_state); return; } do_send_informational(self, sock); } static void finalostream_send_informational(h2o_ostream_t *_self, h2o_req_t *req) { struct st_h2o_http1_finalostream_t *self = (void *)_self; struct st_h2o_http1_conn_t *conn = (struct st_h2o_http1_conn_t *)req->conn; size_t len = sizeof("HTTP/1.1 \r\n\r\n") + 3 + strlen(req->res.reason) - 1; h2o_iovec_t buf = h2o_iovec_init(NULL, len); int i; for (i = 0; i != req->res.headers.size; ++i) buf.len += req->res.headers.entries[i].name->len + req->res.headers.entries[i].value.len + 4; buf.base = h2o_mem_alloc_pool(&req->pool, char, buf.len); char *dst = buf.base; dst += sprintf(dst, "HTTP/1.1 %d %s\r\n", req->res.status, req->res.reason); dst += flatten_res_headers(dst, req, 0); *dst++ = '\r'; *dst++ = '\n'; h2o_vector_reserve(&req->pool, &self->informational.bufs, self->informational.bufs.size + 1); self->informational.bufs.entries[self->informational.bufs.size++] = buf; do_send_informational(self, conn->sock); } static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_h2o_http1_conn_t *conn = (void *)_conn; return h2o_socket_getsockname(conn->sock, sa); } static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_h2o_http1_conn_t *conn = (void *)_conn; return h2o_socket_getpeername(conn->sock, sa); } static h2o_socket_t *get_socket(h2o_conn_t *_conn) { struct st_h2o_http1_conn_t *conn = (void *)_conn; return conn->sock; } #define DEFINE_TLS_LOGGER(name) \ static h2o_iovec_t log_##name(h2o_req_t *req) \ { \ struct st_h2o_http1_conn_t *conn = (void *)req->conn; \ return h2o_socket_log_ssl_##name(conn->sock, &req->pool); \ } DEFINE_TLS_LOGGER(protocol_version) DEFINE_TLS_LOGGER(session_reused) DEFINE_TLS_LOGGER(cipher) DEFINE_TLS_LOGGER(cipher_bits) DEFINE_TLS_LOGGER(session_id) #undef DEFINE_TLS_LOGGER static h2o_iovec_t log_request_index(h2o_req_t *req) { struct st_h2o_http1_conn_t *conn = (void *)req->conn; char *s = h2o_mem_alloc_pool(&req->pool, char, sizeof(H2O_UINT64_LONGEST_STR)); size_t len = sprintf(s, "%" PRIu64, conn->_req_index); return h2o_iovec_init(s, len); } static int foreach_request(h2o_context_t *ctx, int (*cb)(h2o_req_t *req, void *cbdata), void *cbdata) { h2o_linklist_t *node; for (node = ctx->http1._conns.next; node != &ctx->http1._conns; node = node->next) { struct st_h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http1_conn_t, _conns, node); int ret = cb(&conn->req, cbdata); if (ret != 0) return ret; } return 0; } static const h2o_conn_callbacks_t h1_callbacks = { get_sockname, /* stringify address */ get_peername, /* ditto */ NULL, /* push */ get_socket, /* get underlying socket */ NULL, /* get debug state */ {{ {log_protocol_version, log_session_reused, log_cipher, log_cipher_bits, log_session_id}, /* ssl */ {log_request_index}, /* http1 */ {NULL} /* http2 */ }}}; static int conn_is_h1(h2o_conn_t *conn) { return conn->callbacks == &h1_callbacks; } void h2o_http1_accept(h2o_accept_ctx_t *ctx, h2o_socket_t *sock, struct timeval connected_at) { struct st_h2o_http1_conn_t *conn = (void *)h2o_create_connection(sizeof(*conn), ctx->ctx, ctx->hosts, connected_at, &h1_callbacks); /* zero-fill all properties expect req */ memset((char *)conn + sizeof(conn->super), 0, offsetof(struct st_h2o_http1_conn_t, req) - sizeof(conn->super)); /* init properties that need to be non-zero */ conn->sock = sock; sock->data = conn; h2o_linklist_insert(&ctx->ctx->http1._conns, &conn->_conns); init_request(conn); reqread_start(conn); } void h2o_http1_upgrade(h2o_req_t *req, h2o_iovec_t *inbufs, size_t inbufcnt, h2o_http1_upgrade_cb on_complete, void *user_data) { assert(conn_is_h1(req->conn)); struct st_h2o_http1_conn_t *conn = (void *)req->conn; h2o_iovec_t *bufs = alloca(sizeof(h2o_iovec_t) * (inbufcnt + 1)); conn->upgrade.data = user_data; conn->upgrade.cb = on_complete; bufs[0].base = h2o_mem_alloc_pool( &conn->req.pool, char, flatten_headers_estimate_size(&conn->req, conn->super.ctx->globalconf->server_name.len + sizeof("upgrade") - 1)); bufs[0].len = flatten_headers(bufs[0].base, &conn->req, "upgrade"); h2o_memcpy(bufs + 1, inbufs, sizeof(h2o_iovec_t) * inbufcnt); h2o_socket_write(conn->sock, bufs, inbufcnt + 1, on_upgrade_complete); }
1
13,757
Seems like we are calling the probe twice?
h2o-h2o
c
@@ -13,7 +13,7 @@ import ( // LocalDevExecCmd allows users to execute arbitrary bash commands within a container. var LocalDevExecCmd = &cobra.Command{ - Use: "exec [app_name] [environment_name] '[cmd]'", + Use: "exec '[cmd]'", Short: "run a command in an app container.", Long: `Execs into container and runs bash commands.`, Run: func(cmd *cobra.Command, args []string) {
1
package cmd import ( "fmt" "log" "path" "strings" "github.com/drud/ddev/pkg/plugins/platform" "github.com/drud/drud-go/utils/dockerutil" "github.com/spf13/cobra" ) // LocalDevExecCmd allows users to execute arbitrary bash commands within a container. var LocalDevExecCmd = &cobra.Command{ Use: "exec [app_name] [environment_name] '[cmd]'", Short: "run a command in an app container.", Long: `Execs into container and runs bash commands.`, Run: func(cmd *cobra.Command, args []string) { // The command string will be the first argument if using a stored // appConfig, or the third if passing in app/deploy names. cmdString := args[0] if len(args) > 2 { cmdString = args[2] } app := platform.PluginMap[strings.ToLower(plugin)] opts := platform.AppOptions{ Name: activeApp, Environment: activeDeploy, } app.SetOpts(opts) nameContainer := fmt.Sprintf("%s-%s", app.ContainerName(), serviceType) if !dockerutil.IsRunning(nameContainer) { Failed("App not running locally. Try `ddev add`.") } if !platform.ComposeFileExists(app) { Failed("No docker-compose yaml for this site. Try `ddev add`.") } cmdArgs := []string{ "-f", path.Join(app.AbsPath(), "docker-compose.yaml"), "exec", "-T", nameContainer, } if strings.Contains(cmdString, "drush dl") { // do we want to add a -y here? cmdString = strings.Replace(cmdString, "drush dl", "drush --root=/src/docroot dl", 1) } cmdSplit := strings.Split(cmdString, " ") cmdArgs = append(cmdArgs, cmdSplit...) err := dockerutil.DockerCompose(cmdArgs...) if err != nil { log.Println(err) Failed("Could not execute command.") } }, PreRun: func(cmd *cobra.Command, args []string) { if len(args) == 1 { return } if len(args) == 3 { return } Failed("Invalid arguments detected. Please use a command in the form of: exec [app_name] [environment_name] '[cmd]'") }, } func init() { LocalDevExecCmd.Flags().StringVarP(&serviceType, "service", "s", "web", "Which service to send the command to. [web, db]") RootCmd.AddCommand(LocalDevExecCmd) }
1
10,695
I think we'll want @rickmanelius (or somebody) to go through all the help and make it more accessible. Probably later in the cycle. But "Run a command in an app container" doesn't do it for me :)
drud-ddev
php
@@ -118,7 +118,7 @@ func CreateOrUpdateService(ctx context.Context, sclient clientv1.ServiceInterfac } } else { svc.ResourceVersion = service.ResourceVersion - svc.Spec.IPFamily = service.Spec.IPFamily + svc.Spec.IPFamilies = service.Spec.IPFamilies svc.SetOwnerReferences(mergeOwnerReferences(service.GetOwnerReferences(), svc.GetOwnerReferences())) _, err := sclient.Update(ctx, svc, metav1.UpdateOptions{}) if err != nil && !apierrors.IsNotFound(err) {
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package k8sutil import ( "context" "fmt" "net/http" "net/url" "os" "regexp" "strings" "github.com/hashicorp/go-version" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/discovery" clientv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) // KubeConfigEnv (optionally) specify the location of kubeconfig file const KubeConfigEnv = "KUBECONFIG" var invalidDNS1123Characters = regexp.MustCompile("[^-a-z0-9]+") // PodRunningAndReady returns whether a pod is running and each container has // passed it's ready state. func PodRunningAndReady(pod v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed, v1.PodSucceeded: return false, fmt.Errorf("pod completed") case v1.PodRunning: for _, cond := range pod.Status.Conditions { if cond.Type != v1.PodReady { continue } return cond.Status == v1.ConditionTrue, nil } return false, fmt.Errorf("pod ready condition not found") } return false, nil } func NewClusterConfig(host string, tlsInsecure bool, tlsConfig *rest.TLSClientConfig) (*rest.Config, error) { var cfg *rest.Config var err error kubeconfigFile := os.Getenv(KubeConfigEnv) if kubeconfigFile != "" { cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile) if err != nil { return nil, fmt.Errorf("Error creating config from specified file: %s %v\n", kubeconfigFile, err) } } else { if len(host) == 0 { if cfg, err = rest.InClusterConfig(); err != nil { return nil, err } } else { cfg = &rest.Config{ Host: host, } hostURL, err := url.Parse(host) if err != nil { return nil, fmt.Errorf("error parsing host url %s : %v", host, err) } if hostURL.Scheme == "https" { cfg.TLSClientConfig = *tlsConfig cfg.Insecure = tlsInsecure } } } cfg.QPS = 100 cfg.Burst = 100 return cfg, nil } func IsResourceNotFoundError(err error) bool { se, ok := err.(*apierrors.StatusError) if !ok { return false } if se.Status().Code == http.StatusNotFound && se.Status().Reason == metav1.StatusReasonNotFound { return true } return false } func CreateOrUpdateService(ctx context.Context, sclient clientv1.ServiceInterface, svc *v1.Service) error { service, err := sclient.Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "retrieving service object failed") } if apierrors.IsNotFound(err) { _, err = sclient.Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return errors.Wrap(err, "creating service object failed") } } else { svc.ResourceVersion = service.ResourceVersion svc.Spec.IPFamily = service.Spec.IPFamily svc.SetOwnerReferences(mergeOwnerReferences(service.GetOwnerReferences(), svc.GetOwnerReferences())) _, err := sclient.Update(ctx, svc, metav1.UpdateOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "updating service object failed") } } return nil } func CreateOrUpdateEndpoints(ctx context.Context, eclient clientv1.EndpointsInterface, eps *v1.Endpoints) error { endpoints, err := eclient.Get(ctx, eps.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "retrieving existing kubelet endpoints object failed") } if apierrors.IsNotFound(err) { _, err = eclient.Create(ctx, eps, metav1.CreateOptions{}) if err != nil { return errors.Wrap(err, "creating kubelet endpoints object failed") } } else { eps.ResourceVersion = endpoints.ResourceVersion _, err = eclient.Update(ctx, eps, metav1.UpdateOptions{}) if err != nil { return errors.Wrap(err, "updating kubelet endpoints object failed") } } return nil } // GetMinorVersion returns the minor version as an integer func GetMinorVersion(dclient discovery.DiscoveryInterface) (int, error) { v, err := dclient.ServerVersion() if err != nil { return 0, err } ver, err := version.NewVersion(v.String()) if err != nil { return 0, err } return ver.Segments()[1], nil } // SanitizeVolumeName ensures that the given volume name is a valid DNS-1123 label // accepted by Kubernetes. func SanitizeVolumeName(name string) string { name = strings.ToLower(name) name = invalidDNS1123Characters.ReplaceAllString(name, "-") if len(name) > validation.DNS1123LabelMaxLength { name = name[0:validation.DNS1123LabelMaxLength] } return strings.Trim(name, "-") } func mergeOwnerReferences(old []metav1.OwnerReference, new []metav1.OwnerReference) []metav1.OwnerReference { existing := make(map[metav1.OwnerReference]bool) for _, ownerRef := range old { existing[ownerRef] = true } for _, ownerRef := range new { if _, ok := existing[ownerRef]; !ok { old = append(old, ownerRef) } } return old }
1
15,455
Have not tested this yet, lets see if tests complain, but I suspect it should be as easy as this
prometheus-operator-prometheus-operator
go
@@ -42,6 +42,10 @@ SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format # A pattern to check if the name of a Spark column is a Koalas index name or not. SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__") +NATURAL_ORDER_COLUMN_NAME = '__natural_order__' + +HIDDEN_COLUMNS = set([NATURAL_ORDER_COLUMN_NAME]) + IndexMap = Tuple[str, Optional[Tuple[str, ...]]]
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An internal immutable DataFrame with some metadata to manage indexes. """ import re from typing import Dict, List, Optional, Tuple, Union from itertools import accumulate import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like from pyspark import sql as spark from pyspark._globals import _NoValue, _NoValueType from pyspark.sql import functions as F, Window from pyspark.sql.functions import PandasUDFType, pandas_udf from pyspark.sql.types import DataType, StructField, StructType, to_arrow_type, LongType from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.config import get_option from databricks.koalas.typedef import infer_pd_series_spark_type, spark_type_to_pandas_dtype from databricks.koalas.utils import (column_index_level, default_session, lazy_property, name_like_string, scol_for) # A function to turn given numbers to Spark columns that represent Koalas index. SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format # A pattern to check if the name of a Spark column is a Koalas index name or not. SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__") IndexMap = Tuple[str, Optional[Tuple[str, ...]]] class _InternalFrame(object): """ The internal immutable DataFrame which manages Spark DataFrame and column names and index information. :ivar _sdf: Spark DataFrame :ivar _index_map: list of pair holding the Spark field names for indexes, and the index name to be seen in Koalas DataFrame. :ivar _scol: Spark Column :ivar _data_columns: list of the Spark field names to be seen as columns in Koalas DataFrame. .. note:: this is an internal class. It is not supposed to be exposed to users and users should not directly access to it. The internal immutable DataFrame represents the index information for a DataFrame it belongs to. For instance, if we have a Koalas DataFrame as below, Pandas DataFrame does not store the index as columns. >>> kdf = ks.DataFrame({ ... 'A': [1, 2, 3, 4], ... 'B': [5, 6, 7, 8], ... 'C': [9, 10, 11, 12], ... 'D': [13, 14, 15, 16], ... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E']) >>> kdf # doctest: +NORMALIZE_WHITESPACE A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 However, all columns including index column are also stored in Spark DataFrame internally as below. >>> kdf._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ In order to fill this gap, the current metadata is used by mapping Spark's internal column to Koalas' index. See the method below: * `sdf` represents the internal Spark DataFrame * `data_columns` represents non-indexing columns * `index_columns` represents internal index columns * `columns` represents all columns * `index_names` represents the external index name * `index_map` is zipped pairs of `index_columns` and `index_names` * `spark_df` represents Spark DataFrame derived by the metadata * `pandas_df` represents pandas DataFrame derived by the metadata >>> internal = kdf._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.data_columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None] >>> internal.index_map [('__index_level_0__', None)] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.pandas_df A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 In case that index is set to one of the existing column as below: >>> kdf1 = kdf.set_index("A") >>> kdf1 # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 >>> kdf1._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal = kdf1._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_names [('A',)] >>> internal.index_map [('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 The `spark_df` will drop the index columns: >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+ | B| C| D| E| +---+---+---+---+ | 5| 9| 13| 17| | 6| 10| 14| 18| | 7| 11| 15| 19| | 8| 12| 16| 20| +---+---+---+---+ but if `drop=False`, the columns will still remain in `spark_df`: >>> kdf.set_index("A", drop=False)._internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ In case that index becomes a multi index as below: >>> kdf2 = kdf.set_index("A", append=True) >>> kdf2 # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 >>> kdf2._internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal = kdf2._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__', 'A'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None, ('A',)] >>> internal.index_map [('__index_level_0__', None), ('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 For multi-level columns, it also holds column_index >>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ... ('Y', 'C'), ('Y', 'D')]) >>> kdf3 = ks.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16], ... [17, 18, 19, 20]], columns = columns) >>> kdf3 # doctest: +NORMALIZE_WHITESPACE X Y A B C D 0 1 2 3 4 1 5 6 7 8 2 9 10 11 12 3 13 14 15 16 4 17 18 19 20 >>> internal = kdf3._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+------+------+------+------+ |__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)| +-----------------+------+------+------+------+ | 0| 1| 2| 3| 4| | 1| 5| 6| 7| 8| | 2| 9| 10| 11| 12| | 3| 13| 14| 15| 16| | 4| 17| 18| 19| 20| +-----------------+------+------+------+------+ >>> internal.data_columns ['(X, A)', '(X, B)', '(Y, C)', '(Y, D)'] >>> internal.column_index [('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')] For series, it also holds scol to represent the column. >>> kseries = kdf1.B >>> kseries A 1 5 2 6 3 7 4 8 Name: B, dtype: int64 >>> internal = kseries._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.scol Column<b'B'> >>> internal.data_columns ['B'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B'] >>> internal.index_names [('A',)] >>> internal.index_map [('A', ('A',))] >>> internal.spark_internal_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+ | A| B| +---+---+ | 1| 5| | 2| 6| | 3| 7| | 4| 8| +---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B A 1 5 2 6 3 7 4 8 """ def __init__(self, sdf: spark.DataFrame, index_map: Optional[List[IndexMap]] = None, column_index: Optional[List[Tuple[str, ...]]] = None, column_scols: Optional[List[spark.Column]] = None, column_index_names: Optional[List[str]] = None, scol: Optional[spark.Column] = None) -> None: """ Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and index fields and names. :param sdf: Spark DataFrame to be managed. :param index_map: list of string pair Each pair holds the index field name which exists in Spark fields, and the index name. :param column_index: list of tuples with the same length The multi-level values in the tuples. :param column_scols: list of Spark Column Spark Columns to appear as columns. If scol is not None, this argument is ignored, otherwise if this is None, calculated from sdf. :param column_index_names: Names for each of the index levels. :param scol: Spark Column to be managed. """ assert isinstance(sdf, spark.DataFrame) if index_map is None: # Here is when Koalas DataFrame is created directly from Spark DataFrame. assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in sdf.schema.names), \ "Index columns should not appear in columns of the Spark DataFrame. Avoid " \ "index colum names [%s]." % SPARK_INDEX_NAME_PATTERN # Create default index. index_map = [(SPARK_INDEX_NAME_FORMAT(0), None)] sdf = _InternalFrame.attach_default_index(sdf) assert index_map is not None assert all(isinstance(index_field, str) and (index_name is None or (isinstance(index_name, tuple) and all(isinstance(name, str) for name in index_name))) for index_field, index_name in index_map), index_map assert scol is None or isinstance(scol, spark.Column) assert column_scols is None or all(isinstance(scol, spark.Column) for scol in column_scols) self._sdf = sdf # type: spark.DataFrame self._index_map = index_map # type: List[IndexMap] self._scol = scol # type: Optional[spark.Column] if scol is not None: self._column_scols = [scol] elif column_scols is None: index_columns = set(index_column for index_column, _ in self._index_map) self._column_scols = [scol_for(sdf, col) for col in sdf.columns if col not in index_columns] else: self._column_scols = column_scols if scol is not None: assert column_index is not None and len(column_index) == 1, column_index assert all(idx is None or (isinstance(idx, tuple) and len(idx) > 0) for idx in column_index), column_index self._column_index = column_index elif column_index is None: self._column_index = [(sdf.select(scol).columns[0],) for scol in self._column_scols] else: assert len(column_index) == len(self._column_scols), \ (len(column_index), len(self._column_scols)) assert all(isinstance(i, tuple) for i in column_index), column_index assert len(set(len(i) for i in column_index)) <= 1, column_index self._column_index = column_index if column_index_names is not None and not is_list_like(column_index_names): raise ValueError('Column_index_names should be list-like or None for a MultiIndex') if isinstance(column_index_names, list): if all(name is None for name in column_index_names): self._column_index_names = None else: self._column_index_names = column_index_names else: self._column_index_names = column_index_names @staticmethod def attach_default_index(sdf): """ This method attaches a default index to Spark DataFrame. Spark does not have the index notion so corresponding column should be generated. There are several types of default index can be configured by `compute.default_index_type`. """ default_index_type = get_option("compute.default_index_type") if default_index_type == "sequence": sequential_index = F.row_number().over( Window.orderBy(F.monotonically_increasing_id().asc())) - 1 scols = [scol_for(sdf, column) for column in sdf.columns] return sdf.select(sequential_index.alias(SPARK_INDEX_NAME_FORMAT(0)), *scols) elif default_index_type == "distributed-sequence": # 1. Calculates counts per each partition ID. `counts` here is, for instance, # { # 1: 83, # 6: 83, # 3: 83, # ... # } counts = map(lambda x: (x["key"], x["count"]), sdf.groupby(F.spark_partition_id().alias("key")).count().collect()) # 2. Calculates cumulative sum in an order of partition id. # Note that it does not matter if partition id guarantees its order or not. # We just need a one-by-one sequential id. # sort by partition key. sorted_counts = sorted(counts, key=lambda x: x[0]) # get cumulative sum in an order of partition key. cumulative_counts = accumulate(map(lambda count: count[1], sorted_counts)) # zip it with partition key. sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts)) # 3. Group by partition id and assign each range. def default_index(pdf): current_partition_max = sums[pdf["__spark_partition_id"].iloc[0]] offset = len(pdf) pdf[SPARK_INDEX_NAME_FORMAT(0)] = list(range( current_partition_max - offset, current_partition_max)) return pdf.drop(columns=["__spark_partition_id"]) return_schema = StructType( [StructField(SPARK_INDEX_NAME_FORMAT(0), LongType())] + list(sdf.schema)) grouped_map_func = pandas_udf(return_schema, PandasUDFType.GROUPED_MAP)(default_index) sdf = sdf.withColumn("__spark_partition_id", F.spark_partition_id()) return sdf.groupBy("__spark_partition_id").apply(grouped_map_func) elif default_index_type == "distributed": scols = [scol_for(sdf, column) for column in sdf.columns] return sdf.select( F.monotonically_increasing_id().alias(SPARK_INDEX_NAME_FORMAT(0)), *scols) else: raise ValueError("'compute.default_index_type' should be one of 'sequence'," " 'distributed-sequence' and 'distributed'") @lazy_property def _column_index_to_name(self) -> Dict[Tuple[str, ...], str]: return dict(zip(self.column_index, self.data_columns)) def column_name_for(self, column_name_or_index: Union[str, Tuple[str, ...]]) -> str: """ Return the actual Spark column name for the given column name or index. """ if column_name_or_index in self._column_index_to_name: return self._column_index_to_name[column_name_or_index] else: if not isinstance(column_name_or_index, str): raise KeyError(name_like_string(column_name_or_index)) return column_name_or_index @lazy_property def _column_index_to_scol(self) -> Dict[Tuple[str, ...], spark.Column]: return dict(zip(self.column_index, self.column_scols)) def scol_for(self, column_name_or_index: Union[str, Tuple[str, ...]]): """ Return Spark Column for the given column name or index. """ if column_name_or_index in self._column_index_to_scol: return self._column_index_to_scol[column_name_or_index] else: return scol_for(self._sdf, self.column_name_for(column_name_or_index)) def spark_type_for(self, column_name_or_index: Union[str, Tuple[str, ...]]) -> DataType: """ Return DataType for the given column name or index. """ return self._sdf.select(self.scol_for(column_name_or_index)).schema[0].dataType @property def sdf(self) -> spark.DataFrame: """ Return the managed Spark DataFrame. """ return self._sdf @lazy_property def data_columns(self) -> List[str]: """ Return the managed column field names. """ return self.sdf.select(self.column_scols).columns @property def column_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed data columns. """ return self._column_scols @lazy_property def index_columns(self) -> List[str]: """ Return the managed index field names. """ return [index_column for index_column, _ in self._index_map] @lazy_property def index_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed index columns. """ return [self.scol_for(column) for column in self.index_columns] @lazy_property def columns(self) -> List[str]: """ Return all the field names including index field names. """ index_columns = set(self.index_columns) return self.index_columns + [column for column in self.data_columns if column not in index_columns] @lazy_property def scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed columns including index columns. """ return [self.scol_for(column) for column in self.columns] @property def index_map(self) -> List[IndexMap]: """ Return the managed index information. """ assert len(self._index_map) > 0 return self._index_map @lazy_property def index_names(self) -> List[Optional[Tuple[str, ...]]]: """ Return the managed index names. """ return [index_name for _, index_name in self.index_map] @property def scol(self) -> Optional[spark.Column]: """ Return the managed Spark Column. """ return self._scol @property def column_index(self) -> List[Tuple[str, ...]]: """ Return the managed column index. """ return self._column_index @lazy_property def column_index_level(self) -> int: """ Return the level of the column index. """ return column_index_level(self._column_index) @property def column_index_names(self) -> Optional[List[str]]: """ Return names of the index levels. """ return self._column_index_names @lazy_property def spark_internal_df(self) -> spark.DataFrame: """ Return as Spark DataFrame. This contains index columns as well and should be only used for internal purposes. """ index_columns = set(self.index_columns) data_columns = [] for i, (column, idx) in enumerate(zip(self.data_columns, self.column_index)): if column not in index_columns: scol = self.scol_for(idx) name = str(i) if idx is None else name_like_string(idx) if column != name: scol = scol.alias(name) data_columns.append(scol) return self._sdf.select(self.index_scols + data_columns) @lazy_property def spark_df(self) -> spark.DataFrame: """ Return as Spark DataFrame. """ data_columns = [] for i, (column, idx) in enumerate(zip(self.data_columns, self.column_index)): scol = self.scol_for(idx) name = str(i) if idx is None else name_like_string(idx) if column != name: scol = scol.alias(name) data_columns.append(scol) return self._sdf.select(data_columns) @lazy_property def pandas_df(self): """ Return as pandas DataFrame. """ sdf = self.spark_internal_df pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: pdf = pdf.astype({field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}) index_columns = self.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[[col if col in index_columns else str(i) if idx is None else name_like_string(idx) for i, (col, idx) in enumerate(zip(self.data_columns, self.column_index))]] if self.column_index_level > 1: pdf.columns = pd.MultiIndex.from_tuples(self._column_index) else: pdf.columns = [None if idx is None else idx[0] for idx in self._column_index] if self._column_index_names is not None: pdf.columns.names = self._column_index_names index_names = self.index_names if len(index_names) > 0: pdf.index.names = [name if name is None or len(name) > 1 else name[0] for name in index_names] return pdf def copy(self, sdf: Union[spark.DataFrame, _NoValueType] = _NoValue, index_map: Union[List[IndexMap], _NoValueType] = _NoValue, column_index: Union[List[Tuple[str, ...]], _NoValueType] = _NoValue, column_scols: Union[List[spark.Column], _NoValueType] = _NoValue, column_index_names: Optional[Union[List[str], _NoValueType]] = _NoValue, scol: Union[spark.Column, _NoValueType] = _NoValue) -> '_InternalFrame': """ Copy the immutable DataFrame. :param sdf: the new Spark DataFrame. If None, then the original one is used. :param index_map: the new index information. If None, then the original one is used. :param column_index: the new column index. :param column_scols: the new Spark Columns. If None, then the original ones are used. :param column_index_names: the new names of the index levels. :param scol: the new Spark Column. If None, then the original one is used. :return: the copied immutable DataFrame. """ if sdf is _NoValue: sdf = self._sdf if index_map is _NoValue: index_map = self._index_map if column_index is _NoValue: column_index = self._column_index if column_scols is _NoValue: column_scols = self._column_scols if column_index_names is _NoValue: column_index_names = self._column_index_names if scol is _NoValue: scol = self._scol return _InternalFrame(sdf, index_map=index_map, column_index=column_index, column_scols=column_scols, column_index_names=column_index_names, scol=scol) @staticmethod def from_pandas(pdf: pd.DataFrame) -> '_InternalFrame': """ Create an immutable DataFrame from pandas DataFrame. :param pdf: :class:`pd.DataFrame` :return: the created immutable DataFrame """ columns = pdf.columns data_columns = [name_like_string(col) for col in columns] if isinstance(columns, pd.MultiIndex): column_index = columns.tolist() else: column_index = None column_index_names = columns.names index = pdf.index index_map = [] # type: List[IndexMap] if isinstance(index, pd.MultiIndex): if index.names is None: index_map = [(SPARK_INDEX_NAME_FORMAT(i), None) for i in range(len(index.levels))] else: index_map = [(SPARK_INDEX_NAME_FORMAT(i) if name is None else name, name if name is None or isinstance(name, tuple) else (name,)) for i, name in enumerate(index.names)] else: name = index.name index_map = [(name_like_string(name) if name is not None else SPARK_INDEX_NAME_FORMAT(0), name if name is None or isinstance(name, tuple) else (name,))] index_columns = [index_column for index_column, _ in index_map] reset_index = pdf.reset_index() reset_index.columns = index_columns + data_columns schema = StructType([StructField(name_like_string(name), infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) sdf = default_session().createDataFrame(reset_index, schema=schema) return _InternalFrame(sdf=sdf, index_map=index_map, column_index=column_index, column_scols=[scol_for(sdf, col) for col in data_columns], column_index_names=column_index_names)
1
13,596
no big deal but we don't we just use a list to keep the order? I don't think it's likely to have a duplicated columns if that was the concern.
databricks-koalas
py
@@ -1,5 +1,5 @@ /** - * core/modules data store + * Modules data store * * Site Kit by Google, Copyright 2020 Google LLC *
1
/** * core/modules data store * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Internal dependencies */ import Data from 'googlesitekit-data'; import modules from './modules'; import { STORE_NAME } from './constants'; export { STORE_NAME }; const store = Data.combineStores( Data.commonStore, modules, ); export const INITIAL_STATE = store.INITIAL_STATE; export const actions = store.actions; export const controls = store.controls; export const reducer = store.reducer; export const resolvers = store.resolvers; export const selectors = store.selectors; // Register this store on the global registry. Data.registerStore( STORE_NAME, store ); export default store;
1
32,211
See above, same for all similar cases below.
google-site-kit-wp
js
@@ -34,6 +34,7 @@ const ContainerdConfigTemplate = ` {{- if .NodeConfig.AgentConfig.Snapshotter }} [plugins.cri.containerd] + disable_snapshot_annotations = true snapshotter = "{{ .NodeConfig.AgentConfig.Snapshotter }}" {{end}}
1
package templates import ( "bytes" "text/template" "github.com/rancher/k3s/pkg/daemons/config" ) type ContainerdConfig struct { NodeConfig *config.Node IsRunningInUserNS bool PrivateRegistryConfig *Registry } const ContainerdConfigTemplate = ` [plugins.opt] path = "{{ .NodeConfig.Containerd.Opt }}" [plugins.cri] stream_server_address = "127.0.0.1" stream_server_port = "10010" enable_selinux = {{ .NodeConfig.SELinux }} {{- if .IsRunningInUserNS }} disable_cgroup = true disable_apparmor = true restrict_oom_score_adj = true {{end}} {{- if .NodeConfig.AgentConfig.PauseImage }} sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}" {{end}} {{- if .NodeConfig.AgentConfig.Snapshotter }} [plugins.cri.containerd] snapshotter = "{{ .NodeConfig.AgentConfig.Snapshotter }}" {{end}} {{- if not .NodeConfig.NoFlannel }} [plugins.cri.cni] bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}" conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}" {{end}} [plugins.cri.containerd.runtimes.runc] runtime_type = "io.containerd.runc.v2" {{ if .PrivateRegistryConfig }} {{ if .PrivateRegistryConfig.Mirrors }} [plugins.cri.registry.mirrors]{{end}} {{range $k, $v := .PrivateRegistryConfig.Mirrors }} [plugins.cri.registry.mirrors."{{$k}}"] endpoint = [{{range $i, $j := $v.Endpoints}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}] {{end}} {{range $k, $v := .PrivateRegistryConfig.Configs }} {{ if $v.Auth }} [plugins.cri.registry.configs."{{$k}}".auth] {{ if $v.Auth.Username }}username = {{ printf "%q" $v.Auth.Username }}{{end}} {{ if $v.Auth.Password }}password = {{ printf "%q" $v.Auth.Password }}{{end}} {{ if $v.Auth.Auth }}auth = {{ printf "%q" $v.Auth.Auth }}{{end}} {{ if $v.Auth.IdentityToken }}identitytoken = {{ printf "%q" $v.Auth.IdentityToken }}{{end}} {{end}} {{ if $v.TLS }} [plugins.cri.registry.configs."{{$k}}".tls] {{ if $v.TLS.CAFile }}ca_file = "{{ $v.TLS.CAFile }}"{{end}} {{ if $v.TLS.CertFile }}cert_file = "{{ $v.TLS.CertFile }}"{{end}} {{ if $v.TLS.KeyFile }}key_file = "{{ $v.TLS.KeyFile }}"{{end}} {{ if $v.TLS.InsecureSkipVerify }}insecure_skip_verify = true{{end}} {{end}} {{end}} {{end}} ` func ParseTemplateFromConfig(templateBuffer string, config interface{}) (string, error) { out := new(bytes.Buffer) t := template.Must(template.New("compiled_template").Parse(templateBuffer)) if err := t.Execute(out, config); err != nil { return "", err } return out.String(), nil }
1
8,733
where is this coming from?
k3s-io-k3s
go
@@ -0,0 +1,11 @@ +class MoveScreencastsIntoProducts < ActiveRecord::Migration + def up + say_with_time "Converting screencasts into video_tutorials" do + update "UPDATE products SET type = 'VideoTutorial' WHERE type = 'Screencast'" + end + end + + def down + raise ActiveRecord::IrreversibleMigration + end +end
1
1
11,605
We can also drop `plans.includes_screencasts`, right?
thoughtbot-upcase
rb
@@ -374,6 +374,11 @@ class ECPrivkey(ECPubkey): sigdecode = get_r_and_s_from_sig_string private_key = _MySigningKey.from_secret_exponent(self.secret_scalar, curve=SECP256k1) sig = private_key.sign_digest_deterministic(data, hashfunc=hashlib.sha256, sigencode=sigencode) + counter = 0 + while ord(sig[0]) > 127: # grind for low R value https://github.com/bitcoin/bitcoin/pull/13666 + counter += 1 + extra_entropy = bfh("%064x" % counter)[::-1] + sig = private_key.sign_digest_deterministic(data, hashfunc=hashlib.sha256, sigencode=sigencode, extra_entropy=extra_entropy) public_key = private_key.get_verifying_key() if not public_key.verify_digest(sig, data, sigdecode=sigdecode): raise Exception('Sanity check verifying our own signature failed.')
1
# -*- coding: utf-8 -*- # # Electrum - lightweight Bitcoin client # Copyright (C) 2018 The Electrum developers # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import base64 import hmac import hashlib from typing import Union import ecdsa from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1 from ecdsa.curves import SECP256k1 from ecdsa.ellipticcurve import Point from ecdsa.util import string_to_number, number_to_string from .util import bfh, bh2u, assert_bytes, print_error, to_bytes, InvalidPassword, profiler from .crypto import (Hash, aes_encrypt_with_iv, aes_decrypt_with_iv, hmac_oneshot) from .ecc_fast import do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1 do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1() CURVE_ORDER = SECP256k1.order def generator(): return ECPubkey.from_point(generator_secp256k1) def point_at_infinity(): return ECPubkey(None) def sig_string_from_der_sig(der_sig, order=CURVE_ORDER): r, s = ecdsa.util.sigdecode_der(der_sig, order) return ecdsa.util.sigencode_string(r, s, order) def der_sig_from_sig_string(sig_string, order=CURVE_ORDER): r, s = ecdsa.util.sigdecode_string(sig_string, order) return ecdsa.util.sigencode_der_canonize(r, s, order) def der_sig_from_r_and_s(r, s, order=CURVE_ORDER): return ecdsa.util.sigencode_der_canonize(r, s, order) def get_r_and_s_from_der_sig(der_sig, order=CURVE_ORDER): r, s = ecdsa.util.sigdecode_der(der_sig, order) return r, s def get_r_and_s_from_sig_string(sig_string, order=CURVE_ORDER): r, s = ecdsa.util.sigdecode_string(sig_string, order) return r, s def sig_string_from_r_and_s(r, s, order=CURVE_ORDER): return ecdsa.util.sigencode_string_canonize(r, s, order) def point_to_ser(P, compressed=True) -> bytes: if isinstance(P, tuple): assert len(P) == 2, 'unexpected point: %s' % P x, y = P else: x, y = P.x(), P.y() if x is None or y is None: # infinity return None if compressed: return bfh(('%02x' % (2+(y&1))) + ('%064x' % x)) return bfh('04'+('%064x' % x)+('%064x' % y)) def get_y_coord_from_x(x, odd=True): curve = curve_secp256k1 _p = curve.p() _a = curve.a() _b = curve.b() for offset in range(128): Mx = x + offset My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p My = pow(My2, (_p + 1) // 4, _p) if curve.contains_point(Mx, My): if odd == bool(My & 1): return My return _p - My raise Exception('ECC_YfromX: No Y found') def ser_to_point(ser: bytes) -> (int, int): if ser[0] not in (0x02, 0x03, 0x04): raise ValueError('Unexpected first byte: {}'.format(ser[0])) if ser[0] == 0x04: return string_to_number(ser[1:33]), string_to_number(ser[33:]) x = string_to_number(ser[1:]) return x, get_y_coord_from_x(x, ser[0] == 0x03) def _ser_to_python_ecdsa_point(ser: bytes) -> ecdsa.ellipticcurve.Point: x, y = ser_to_point(ser) try: return Point(curve_secp256k1, x, y, CURVE_ORDER) except: raise InvalidECPointException() class InvalidECPointException(Exception): """e.g. not on curve, or infinity""" class _MyVerifyingKey(ecdsa.VerifyingKey): @classmethod def from_signature(klass, sig, recid, h, curve): # TODO use libsecp?? """ See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """ from ecdsa import util, numbertheory from . import msqr curveFp = curve.curve G = curve.generator order = G.order() # extract r,s from signature r, s = util.sigdecode_string(sig, order) # 1.1 x = r + (recid//2) * order # 1.3 alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p() beta = msqr.modular_sqrt(alpha, curveFp.p()) y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta # 1.4 the constructor checks that nR is at infinity try: R = Point(curveFp, x, y, order) except: raise InvalidECPointException() # 1.5 compute e from message: e = string_to_number(h) minus_e = -e % order # 1.6 compute Q = r^-1 (sR - eG) inv_r = numbertheory.inverse_mod(r,order) try: Q = inv_r * ( s * R + minus_e * G ) except: raise InvalidECPointException() return klass.from_public_point( Q, curve ) class _MySigningKey(ecdsa.SigningKey): """Enforce low S values in signatures""" def sign_number(self, number, entropy=None, k=None): r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k) if s > CURVE_ORDER//2: s = CURVE_ORDER - s return r, s class _PubkeyForPointAtInfinity: point = ecdsa.ellipticcurve.INFINITY class ECPubkey(object): def __init__(self, b: bytes): if b is not None: assert_bytes(b) point = _ser_to_python_ecdsa_point(b) self._pubkey = ecdsa.ecdsa.Public_key(generator_secp256k1, point) else: self._pubkey = _PubkeyForPointAtInfinity() @classmethod def from_sig_string(cls, sig_string: bytes, recid: int, msg_hash: bytes): assert_bytes(sig_string) if len(sig_string) != 64: raise Exception('Wrong encoding') if recid < 0 or recid > 3: raise ValueError('recid is {}, but should be 0 <= recid <= 3'.format(recid)) ecdsa_verifying_key = _MyVerifyingKey.from_signature(sig_string, recid, msg_hash, curve=SECP256k1) ecdsa_point = ecdsa_verifying_key.pubkey.point return ECPubkey.from_point(ecdsa_point) @classmethod def from_signature65(cls, sig: bytes, msg_hash: bytes): if len(sig) != 65: raise Exception("Wrong encoding") nV = sig[0] if nV < 27 or nV >= 35: raise Exception("Bad encoding") if nV >= 31: compressed = True nV -= 4 else: compressed = False recid = nV - 27 return cls.from_sig_string(sig[1:], recid, msg_hash), compressed @classmethod def from_point(cls, point): _bytes = point_to_ser(point, compressed=False) # faster than compressed return ECPubkey(_bytes) def get_public_key_bytes(self, compressed=True): if self.is_at_infinity(): raise Exception('point is at infinity') return point_to_ser(self.point(), compressed) def get_public_key_hex(self, compressed=True): return bh2u(self.get_public_key_bytes(compressed)) def point(self) -> (int, int): return self._pubkey.point.x(), self._pubkey.point.y() def __mul__(self, other: int): if not isinstance(other, int): raise TypeError('multiplication not defined for ECPubkey and {}'.format(type(other))) ecdsa_point = self._pubkey.point * other return self.from_point(ecdsa_point) def __rmul__(self, other: int): return self * other def __add__(self, other): if not isinstance(other, ECPubkey): raise TypeError('addition not defined for ECPubkey and {}'.format(type(other))) ecdsa_point = self._pubkey.point + other._pubkey.point return self.from_point(ecdsa_point) def __eq__(self, other): return self._pubkey.point.x() == other._pubkey.point.x() \ and self._pubkey.point.y() == other._pubkey.point.y() def __ne__(self, other): return not (self == other) def verify_message_for_address(self, sig65: bytes, message: bytes) -> None: assert_bytes(message) h = Hash(msg_magic(message)) public_key, compressed = self.from_signature65(sig65, h) # check public key if public_key != self: raise Exception("Bad signature") # check message self.verify_message_hash(sig65[1:], h) def verify_message_hash(self, sig_string: bytes, msg_hash: bytes) -> None: assert_bytes(sig_string) if len(sig_string) != 64: raise Exception('Wrong encoding') ecdsa_point = self._pubkey.point verifying_key = _MyVerifyingKey.from_public_point(ecdsa_point, curve=SECP256k1) verifying_key.verify_digest(sig_string, msg_hash, sigdecode=ecdsa.util.sigdecode_string) def encrypt_message(self, message: bytes, magic: bytes = b'BIE1'): """ ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac """ assert_bytes(message) randint = ecdsa.util.randrange(CURVE_ORDER) ephemeral_exponent = number_to_string(randint, CURVE_ORDER) ephemeral = ECPrivkey(ephemeral_exponent) ecdh_key = (self * ephemeral.secret_scalar).get_public_key_bytes(compressed=True) key = hashlib.sha512(ecdh_key).digest() iv, key_e, key_m = key[0:16], key[16:32], key[32:] ciphertext = aes_encrypt_with_iv(key_e, iv, message) ephemeral_pubkey = ephemeral.get_public_key_bytes(compressed=True) encrypted = magic + ephemeral_pubkey + ciphertext mac = hmac_oneshot(key_m, encrypted, hashlib.sha256) return base64.b64encode(encrypted + mac) @classmethod def order(cls): return CURVE_ORDER def is_at_infinity(self): return self == point_at_infinity() def msg_magic(message: bytes) -> bytes: from .bitcoin import var_int length = bfh(var_int(len(message))) return b"\x18Bitcoin Signed Message:\n" + length + message def verify_message_with_address(address: str, sig65: bytes, message: bytes): from .bitcoin import pubkey_to_address assert_bytes(sig65, message) try: h = Hash(msg_magic(message)) public_key, compressed = ECPubkey.from_signature65(sig65, h) # check public key using the address pubkey_hex = public_key.get_public_key_hex(compressed) for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']: addr = pubkey_to_address(txin_type, pubkey_hex) if address == addr: break else: raise Exception("Bad signature") # check message public_key.verify_message_hash(sig65[1:], h) return True except Exception as e: print_error("Verification error: {0}".format(e)) return False def is_secret_within_curve_range(secret: Union[int, bytes]) -> bool: if isinstance(secret, bytes): secret = string_to_number(secret) return 0 < secret < CURVE_ORDER class ECPrivkey(ECPubkey): def __init__(self, privkey_bytes: bytes): assert_bytes(privkey_bytes) if len(privkey_bytes) != 32: raise Exception('unexpected size for secret. should be 32 bytes, not {}'.format(len(privkey_bytes))) secret = string_to_number(privkey_bytes) if not is_secret_within_curve_range(secret): raise InvalidECPointException('Invalid secret scalar (not within curve order)') self.secret_scalar = secret point = generator_secp256k1 * secret super().__init__(point_to_ser(point)) self._privkey = ecdsa.ecdsa.Private_key(self._pubkey, secret) @classmethod def from_secret_scalar(cls, secret_scalar: int): secret_bytes = number_to_string(secret_scalar, CURVE_ORDER) return ECPrivkey(secret_bytes) @classmethod def from_arbitrary_size_secret(cls, privkey_bytes: bytes): """This method is only for legacy reasons. Do not introduce new code that uses it. Unlike the default constructor, this method does not require len(privkey_bytes) == 32, and the secret does not need to be within the curve order either. """ return ECPrivkey(cls.normalize_secret_bytes(privkey_bytes)) @classmethod def normalize_secret_bytes(cls, privkey_bytes: bytes) -> bytes: scalar = string_to_number(privkey_bytes) % CURVE_ORDER if scalar == 0: raise Exception('invalid EC private key scalar: zero') privkey_32bytes = number_to_string(scalar, CURVE_ORDER) return privkey_32bytes def sign(self, data: bytes, sigencode=None, sigdecode=None) -> bytes: if sigencode is None: sigencode = sig_string_from_r_and_s if sigdecode is None: sigdecode = get_r_and_s_from_sig_string private_key = _MySigningKey.from_secret_exponent(self.secret_scalar, curve=SECP256k1) sig = private_key.sign_digest_deterministic(data, hashfunc=hashlib.sha256, sigencode=sigencode) public_key = private_key.get_verifying_key() if not public_key.verify_digest(sig, data, sigdecode=sigdecode): raise Exception('Sanity check verifying our own signature failed.') return sig def sign_transaction(self, hashed_preimage: bytes) -> bytes: return self.sign(hashed_preimage, sigencode=der_sig_from_r_and_s, sigdecode=get_r_and_s_from_der_sig) def sign_message(self, message: bytes, is_compressed: bool) -> bytes: def bruteforce_recid(sig_string): for recid in range(4): sig65 = construct_sig65(sig_string, recid, is_compressed) try: self.verify_message_for_address(sig65, message) return sig65, recid except Exception as e: continue else: raise Exception("error: cannot sign message. no recid fits..") message = to_bytes(message, 'utf8') msg_hash = Hash(msg_magic(message)) sig_string = self.sign(msg_hash, sigencode=sig_string_from_r_and_s, sigdecode=get_r_and_s_from_sig_string) sig65, recid = bruteforce_recid(sig_string) return sig65 def decrypt_message(self, encrypted, magic=b'BIE1'): encrypted = base64.b64decode(encrypted) if len(encrypted) < 85: raise Exception('invalid ciphertext: length') magic_found = encrypted[:4] ephemeral_pubkey_bytes = encrypted[4:37] ciphertext = encrypted[37:-32] mac = encrypted[-32:] if magic_found != magic: raise Exception('invalid ciphertext: invalid magic bytes') try: ecdsa_point = _ser_to_python_ecdsa_point(ephemeral_pubkey_bytes) except AssertionError as e: raise Exception('invalid ciphertext: invalid ephemeral pubkey') from e if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ecdsa_point.x(), ecdsa_point.y()): raise Exception('invalid ciphertext: invalid ephemeral pubkey') ephemeral_pubkey = ECPubkey.from_point(ecdsa_point) ecdh_key = (ephemeral_pubkey * self.secret_scalar).get_public_key_bytes(compressed=True) key = hashlib.sha512(ecdh_key).digest() iv, key_e, key_m = key[0:16], key[16:32], key[32:] if mac != hmac_oneshot(key_m, encrypted[:-32], hashlib.sha256): raise InvalidPassword() return aes_decrypt_with_iv(key_e, iv, ciphertext) def construct_sig65(sig_string, recid, is_compressed): comp = 4 if is_compressed else 0 return bytes([27 + recid + comp]) + sig_string
1
12,796
wouldn't `int.to_bytes(counter, 32, 'little')` be equivalent, clearer and faster?
spesmilo-electrum
py
@@ -60,6 +60,11 @@ func (a *API) CreatePayments(ctx context.Context, config CreatePaymentsParams) ( return CreatePayments(ctx, a, config) } +// ValidateStoragePaymentCondition validates that the given condition is a payment condition and has the right values +func (a *API) ValidateStoragePaymentCondition(ctx context.Context, condition *types.Predicate, minerAddr address.Address, commP types.CommP, pieceSize *types.BytesAmount) error { + return ValidateStoragePaymentCondition(ctx, condition, minerAddr, commP, pieceSize) +} + // DealGet returns a single deal matching a given cid or an error func (a *API) DealGet(ctx context.Context, proposalCid cid.Cid) (*storagedeal.Deal, error) { return DealGet(ctx, a, proposalCid)
1
package porcelain import ( "context" "math/big" "time" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" minerActor "github.com/filecoin-project/go-filecoin/actor/builtin/miner" "github.com/filecoin-project/go-filecoin/actor/builtin/paymentbroker" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/plumbing" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/protocol/storage/storagedeal" "github.com/filecoin-project/go-filecoin/types" ) // API is the porcelain implementation, a set of convenience calls written on the // plumbing api, to be used to build user facing features and protocols. // // The porcelain.API provides porcelain calls **as well as the plumbing calls**. // This is because most consumers depend on a combination of porcelain and plumbing // calls. Flattening both apis into a single implementation enables consumers to take // a single dependency and not have to know which api a call comes from. The mechanism // is embedding: the plumbing implementation is embedded in the porcelain implementation, making // all the embedded type (plumbing) calls available on the embedder type (porcelain). // Providing a single implementation on which to depend also enables consumers to choose // at what level to mock out their dependencies: low (plumbing) or high (porcelain). // We ensure that porcelain calls only depend on the narrow subset of the plumbing api // on which they depend by implementing them in free functions that take their specific // subset of the plumbing.api. The porcelain.API delegates porcelain calls to these // free functions. // // If you are implementing a user facing feature or a protocol this is probably the implementation // you should depend on. Define the subset of it that you use in an interface in your package // take this implementation as a dependency. type API struct { *plumbing.API } // New returns a new porcelain.API. func New(plumbing *plumbing.API) *API { return &API{plumbing} } // ChainBlockHeight determines the current block height func (a *API) ChainBlockHeight() (*types.BlockHeight, error) { return ChainBlockHeight(a) } // ChainGetFullBlock returns the full block given the header cid func (a *API) ChainGetFullBlock(ctx context.Context, id cid.Cid) (*types.FullBlock, error) { return GetFullBlock(ctx, a, id) } // CreatePayments establishes a payment channel and create multiple payments against it func (a *API) CreatePayments(ctx context.Context, config CreatePaymentsParams) (*CreatePaymentsReturn, error) { return CreatePayments(ctx, a, config) } // DealGet returns a single deal matching a given cid or an error func (a *API) DealGet(ctx context.Context, proposalCid cid.Cid) (*storagedeal.Deal, error) { return DealGet(ctx, a, proposalCid) } // DealRedeem redeems a voucher for the deal with the given cid and returns // either the cid of the created redeem message or an error func (a *API) DealRedeem(ctx context.Context, fromAddr address.Address, dealCid cid.Cid, gasPrice types.AttoFIL, gasLimit types.GasUnits) (cid.Cid, error) { return DealRedeem(ctx, a, fromAddr, dealCid, gasPrice, gasLimit) } // DealRedeemPreview previews the redeem method for a deal and returns the // expected gas used func (a *API) DealRedeemPreview(ctx context.Context, fromAddr address.Address, dealCid cid.Cid) (types.GasUnits, error) { return DealRedeemPreview(ctx, a, fromAddr, dealCid) } // DealsLs returns a channel with all deals func (a *API) DealsLs(ctx context.Context) (<-chan *StorageDealLsResult, error) { return DealsLs(ctx, a) } // MessagePoolWait waits for the message pool to have at least messageCount unmined messages. // It's useful for integration testing. func (a *API) MessagePoolWait(ctx context.Context, messageCount uint) ([]*types.SignedMessage, error) { return MessagePoolWait(ctx, a, messageCount) } // MinerCreate creates a miner func (a *API) MinerCreate( ctx context.Context, accountAddr address.Address, gasPrice types.AttoFIL, gasLimit types.GasUnits, sectorSize *types.BytesAmount, pid peer.ID, collateral types.AttoFIL, ) (_ *address.Address, err error) { return MinerCreate(ctx, a, accountAddr, gasPrice, gasLimit, sectorSize, pid, collateral) } // MinerPreviewCreate previews the Gas cost of creating a miner func (a *API) MinerPreviewCreate( ctx context.Context, fromAddr address.Address, sectorSize *types.BytesAmount, pid peer.ID, ) (usedGas types.GasUnits, err error) { return MinerPreviewCreate(ctx, a, fromAddr, sectorSize, pid) } // MinerGetAsk queries for an ask of the given miner func (a *API) MinerGetAsk(ctx context.Context, minerAddr address.Address, askID uint64) (minerActor.Ask, error) { return MinerGetAsk(ctx, a, minerAddr, askID) } // MinerGetOwnerAddress queries for the owner address of the given miner func (a *API) MinerGetOwnerAddress(ctx context.Context, minerAddr address.Address) (address.Address, error) { return MinerGetOwnerAddress(ctx, a, minerAddr) } // MinerGetSectorSize queries for the sector size of the given miner. func (a *API) MinerGetSectorSize(ctx context.Context, minerAddr address.Address) (*types.BytesAmount, error) { return MinerGetSectorSize(ctx, a, minerAddr) } // MinerCalculateLateFee queries for the fee required for a PoSt submitted at some height. func (a *API) MinerCalculateLateFee(ctx context.Context, minerAddr address.Address, height *types.BlockHeight) (types.AttoFIL, error) { return MinerCalculateLateFee(ctx, a, minerAddr, height) } // MinerGetLastCommittedSectorID queries for the sector size of the given miner. func (a *API) MinerGetLastCommittedSectorID(ctx context.Context, minerAddr address.Address) (uint64, error) { return MinerGetLastCommittedSectorID(ctx, a, minerAddr) } // MinerGetWorker queries for the public key of the given miner func (a *API) MinerGetWorker(ctx context.Context, minerAddr address.Address) (address.Address, error) { return MinerGetWorker(ctx, a, minerAddr) } // MinerGetPeerID queries for the peer id of the given miner func (a *API) MinerGetPeerID(ctx context.Context, minerAddr address.Address) (peer.ID, error) { return MinerGetPeerID(ctx, a, minerAddr) } // MinerSetPrice configures the price of storage. See implementation for details. func (a *API) MinerSetPrice(ctx context.Context, from address.Address, miner address.Address, gasPrice types.AttoFIL, gasLimit types.GasUnits, price types.AttoFIL, expiry *big.Int) (MinerSetPriceResponse, error) { return MinerSetPrice(ctx, a, from, miner, gasPrice, gasLimit, price, expiry) } // MinerGetPower queries for the power of the given miner func (a *API) MinerGetPower(ctx context.Context, minerAddr address.Address) (MinerPower, error) { return MinerGetPower(ctx, a, minerAddr) } // MinerGetProvingPeriod queries for the proving period of the given miner func (a *API) MinerGetProvingPeriod(ctx context.Context, minerAddr address.Address) (MinerProvingPeriod, error) { return MinerGetProvingPeriod(ctx, a, minerAddr) } // MinerGetCollateral queries for the proving period of the given miner func (a *API) MinerGetCollateral(ctx context.Context, minerAddr address.Address) (types.AttoFIL, error) { return MinerGetCollateral(ctx, a, minerAddr) } // MinerPreviewSetPrice calculates the amount of Gas needed for a call to MinerSetPrice. // This method accepts all the same arguments as MinerSetPrice. func (a *API) MinerPreviewSetPrice( ctx context.Context, from address.Address, miner address.Address, price types.AttoFIL, expiry *big.Int, ) (types.GasUnits, error) { return MinerPreviewSetPrice(ctx, a, from, miner, price, expiry) } // ProtocolParameters fetches the current protocol configuration parameters. func (a *API) ProtocolParameters(ctx context.Context) (*ProtocolParams, error) { return ProtocolParameters(ctx, a) } // WalletBalance returns the current balance of the given wallet address. func (a *API) WalletBalance(ctx context.Context, address address.Address) (types.AttoFIL, error) { return WalletBalance(ctx, a, address) } // WalletDefaultAddress returns a default wallet address from the config. // If none is set it picks the first address in the wallet and sets it as the default in the config. func (a *API) WalletDefaultAddress() (address.Address, error) { return WalletDefaultAddress(a) } // PaymentChannelLs lists payment channels for a given payer func (a *API) PaymentChannelLs( ctx context.Context, fromAddr address.Address, payerAddr address.Address, ) (map[string]*paymentbroker.PaymentChannel, error) { return PaymentChannelLs(ctx, a, fromAddr, payerAddr) } // PaymentChannelVoucher returns a signed payment channel voucher func (a *API) PaymentChannelVoucher( ctx context.Context, fromAddr address.Address, channel *types.ChannelID, amount types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate, ) (voucher *types.PaymentVoucher, err error) { return PaymentChannelVoucher(ctx, a, fromAddr, channel, amount, validAt, condition) } // ClientListAsks returns a channel with asks from the latest chain state func (a *API) ClientListAsks(ctx context.Context) <-chan Ask { return ClientListAsks(ctx, a) } // CalculatePoSt invokes the sector builder to calculate a proof-of-spacetime. func (a *API) CalculatePoSt(ctx context.Context, sortedCommRs proofs.SortedCommRs, seed types.PoStChallengeSeed) ([]types.PoStProof, []uint64, error) { return CalculatePoSt(ctx, a, sortedCommRs, seed) } // PingMinerWithTimeout pings a storage or retrieval miner, waiting the given // timeout and returning desciptive errors. func (a *API) PingMinerWithTimeout( ctx context.Context, minerPID peer.ID, timeout time.Duration, ) error { return PingMinerWithTimeout(ctx, minerPID, timeout, a) }
1
20,755
I don't see why this is in porcelain since it's just a free function, with no dependency on plumbing or the `a` receiver. I think it should be moved to `protocol/storage`. It's exposed unnecessarily widely here.
filecoin-project-venus
go
@@ -130,6 +130,11 @@ def create_messages(data, entity: str, stats_range: str, from_ts: int, to_ts: in """ for entry in data: _dict = entry.asDict(recursive=True) + + # Clip the recordings to top 1000 so that we don't drop messages + if entity == "recordings": + _dict[entity] = _dict[entity][:1000] + try: model = UserEntityStatMessage(**{ 'musicbrainz_id': _dict['user_name'],
1
import json from datetime import datetime from typing import Iterator, Optional from flask import current_app from pydantic import ValidationError from data.model.user_entity import UserEntityStatMessage from listenbrainz_spark.constants import LAST_FM_FOUNDING_YEAR from listenbrainz_spark.path import LISTENBRAINZ_DATA_DIRECTORY from listenbrainz_spark.stats import (adjust_days, replace_days, replace_months, run_query) from listenbrainz_spark.stats.user.artist import get_artists from listenbrainz_spark.stats.user.recording import get_recordings from listenbrainz_spark.stats.user.release import get_releases from listenbrainz_spark.stats.user.utils import (filter_listens, get_last_monday, get_latest_listen_ts) from listenbrainz_spark.utils import get_listens entity_handler_map = { 'artists': get_artists, 'releases': get_releases, 'recordings': get_recordings } def get_entity_week(entity: str) -> Iterator[Optional[UserEntityStatMessage]]: """ Get the weekly top entity for all users """ current_app.logger.debug("Calculating {}_week...".format(entity)) date = get_latest_listen_ts() to_date = get_last_monday(date) from_date = adjust_days(to_date, 7) listens_df = get_listens(from_date, to_date, LISTENBRAINZ_DATA_DIRECTORY) filtered_df = filter_listens(listens_df, from_date, to_date) table_name = 'user_{}_week'.format(entity) filtered_df.createOrReplaceTempView(table_name) handler = entity_handler_map[entity] data = handler(table_name) messages = create_messages(data=data, entity=entity, stats_range='week', from_ts=from_date.timestamp(), to_ts=to_date.timestamp()) current_app.logger.debug("Done!") return messages def get_entity_month(entity: str) -> Iterator[Optional[UserEntityStatMessage]]: """ Get the month top entity for all users """ current_app.logger.debug("Calculating {}_month...".format(entity)) to_date = get_latest_listen_ts() from_date = replace_days(to_date, 1) listens_df = get_listens(from_date, to_date, LISTENBRAINZ_DATA_DIRECTORY) table_name = 'user_{}_month'.format(entity) listens_df.createOrReplaceTempView(table_name) handler = entity_handler_map[entity] data = handler(table_name) messages = create_messages(data=data, entity=entity, stats_range='month', from_ts=from_date.timestamp(), to_ts=to_date.timestamp()) current_app.logger.debug("Done!") return messages def get_entity_year(entity: str) -> Iterator[Optional[UserEntityStatMessage]]: """ Get the year top entity for all users """ current_app.logger.debug("Calculating {}_year...".format(entity)) to_date = get_latest_listen_ts() from_date = replace_days(replace_months(to_date, 1), 1) listens_df = get_listens(from_date, to_date, LISTENBRAINZ_DATA_DIRECTORY) table_name = 'user_{}_year'.format(entity) listens_df.createOrReplaceTempView(table_name) handler = entity_handler_map[entity] data = handler(table_name) messages = create_messages(data=data, entity=entity, stats_range='year', from_ts=from_date.timestamp(), to_ts=to_date.timestamp()) current_app.logger.debug("Done!") return messages def get_entity_all_time(entity: str) -> Iterator[Optional[UserEntityStatMessage]]: """ Get the all_time top entity for all users """ current_app.logger.debug("Calculating {}_all_time...".format(entity)) to_date = get_latest_listen_ts() from_date = datetime(LAST_FM_FOUNDING_YEAR, 1, 1) listens_df = get_listens(from_date, to_date, LISTENBRAINZ_DATA_DIRECTORY) table_name = 'user_{}_all_time'.format(entity) listens_df.createOrReplaceTempView(table_name) handler = entity_handler_map[entity] data = handler(table_name) messages = create_messages(data=data, entity=entity, stats_range='all_time', from_ts=from_date.timestamp(), to_ts=to_date.timestamp()) current_app.logger.debug("Done!") return messages def create_messages(data, entity: str, stats_range: str, from_ts: int, to_ts: int) -> Iterator[Optional[UserEntityStatMessage]]: """ Create messages to send the data to the webserver via RabbitMQ Args: data (iterator): Data to sent to the webserver entity: The entity for which statistics are calculated, i.e 'artists', 'releases' or 'recordings' stats_range: The range for which the statistics have been calculated from_ts: The UNIX timestamp of start time of the stats to_ts: The UNIX timestamp of end time of the stats Returns: messages: A list of messages to be sent via RabbitMQ """ for entry in data: _dict = entry.asDict(recursive=True) try: model = UserEntityStatMessage(**{ 'musicbrainz_id': _dict['user_name'], 'type': 'user_entity', 'stats_range': stats_range, 'from_ts': from_ts, 'to_ts': to_ts, 'data': _dict[entity], 'entity': entity, 'count': len(_dict[entity]) }) result = model.dict(exclude_none=True) yield result except ValidationError: current_app.logger.error("""ValidationError while calculating {stats_range} top {entity} for user: {user_name}. Data: {data}""".format(stats_range=stats_range, entity=entity, user_name=_dict['user_name'], data=json.dumps(_dict, indent=3)), exc_info=True) yield None
1
16,665
Could we only do this for all time? Because that's what is causing problems rn?
metabrainz-listenbrainz-server
py
@@ -369,7 +369,7 @@ class _FlowType(_BaseFlowType): def parse(self, manager: "CommandManager", t: type, s: str) -> flow.Flow: try: - flows = manager.execute("view.flows.resolve %s" % (s)) + flows = manager.execute("view.flows.resolve '%s'" % (s)) except exceptions.CommandError as e: raise exceptions.TypeError(str(e)) from e if len(flows) != 1:
1
import codecs import os import glob import re import typing from mitmproxy import exceptions from mitmproxy import flow from mitmproxy.utils import emoji, strutils if typing.TYPE_CHECKING: # pragma: no cover from mitmproxy.command import CommandManager class Path(str): pass class Cmd(str): pass class CmdArgs(str): pass class Unknown(str): pass class Space(str): pass class CutSpec(typing.Sequence[str]): pass class Data(typing.Sequence[typing.Sequence[typing.Union[str, bytes]]]): pass class Marker(str): pass class Choice: def __init__(self, options_command): self.options_command = options_command def __instancecheck__(self, instance): # pragma: no cover # return false here so that arguments are piped through parsearg, # which does extended validation. return False class _BaseType: typ: typing.Type = object display: str = "" def completion(self, manager: "CommandManager", t: typing.Any, s: str) -> typing.Sequence[str]: """ Returns a list of completion strings for a given prefix. The strings returned don't necessarily need to be suffixes of the prefix, since completers will do prefix filtering themselves.. """ raise NotImplementedError def parse(self, manager: "CommandManager", typ: typing.Any, s: str) -> typing.Any: """ Parse a string, given the specific type instance (to allow rich type annotations like Choice) and a string. Raises exceptions.TypeError if the value is invalid. """ raise NotImplementedError def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: """ Check if data is valid for this type. """ raise NotImplementedError class _BoolType(_BaseType): typ = bool display = "bool" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return ["false", "true"] def parse(self, manager: "CommandManager", t: type, s: str) -> bool: if s == "true": return True elif s == "false": return False else: raise exceptions.TypeError( "Booleans are 'true' or 'false', got %s" % s ) def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return val in [True, False] class _StrType(_BaseType): typ = str display = "str" # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals escape_sequences = re.compile(r""" \\ ( [\\'"abfnrtv] # Standard C escape sequence | [0-7]{1,3} # Character with octal value | x.. # Character with hex value | N{[^}]+} # Character name in the Unicode database | u.... # Character with 16-bit hex value | U........ # Character with 32-bit hex value ) """, re.VERBOSE) @staticmethod def _unescape(match: re.Match) -> str: return codecs.decode(match.group(0), "unicode-escape") # type: ignore def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> str: try: return self.escape_sequences.sub(self._unescape, s) except ValueError as e: raise exceptions.TypeError(f"Invalid str: {e}") from e def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, str) class _BytesType(_BaseType): typ = bytes display = "bytes" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> bytes: try: return strutils.escaped_str_to_bytes(s) except ValueError as e: raise exceptions.TypeError(str(e)) def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, bytes) class _UnknownType(_BaseType): typ = Unknown display = "unknown" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> str: return s def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return False class _IntType(_BaseType): typ = int display = "int" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> int: try: return int(s) except ValueError as e: raise exceptions.TypeError(str(e)) from e def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, int) class _PathType(_BaseType): typ = Path display = "path" def completion(self, manager: "CommandManager", t: type, start: str) -> typing.Sequence[str]: if not start: start = "./" path = os.path.expanduser(start) ret = [] if os.path.isdir(path): files = glob.glob(os.path.join(path, "*")) prefix = start else: files = glob.glob(path + "*") prefix = os.path.dirname(start) prefix = prefix or "./" for f in files: display = os.path.join(prefix, os.path.normpath(os.path.basename(f))) if os.path.isdir(f): display += "/" ret.append(display) if not ret: ret = [start] ret.sort() return ret def parse(self, manager: "CommandManager", t: type, s: str) -> str: return os.path.expanduser(s) def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, str) class _CmdType(_BaseType): typ = Cmd display = "cmd" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return list(manager.commands.keys()) def parse(self, manager: "CommandManager", t: type, s: str) -> str: if s not in manager.commands: raise exceptions.TypeError("Unknown command: %s" % s) return s def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return val in manager.commands class _ArgType(_BaseType): typ = CmdArgs display = "arg" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> str: return s def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, str) class _StrSeqType(_BaseType): typ = typing.Sequence[str] display = "str[]" def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [] def parse(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return [x.strip() for x in s.split(",")] def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: if isinstance(val, str) or isinstance(val, bytes): return False try: for v in val: if not isinstance(v, str): return False except TypeError: return False return True class _CutSpecType(_BaseType): typ = CutSpec display = "cut[]" valid_prefixes = [ "request.method", "request.scheme", "request.host", "request.http_version", "request.port", "request.path", "request.url", "request.text", "request.content", "request.raw_content", "request.timestamp_start", "request.timestamp_end", "request.header[", "response.status_code", "response.reason", "response.text", "response.content", "response.timestamp_start", "response.timestamp_end", "response.raw_content", "response.header[", "client_conn.peername.port", "client_conn.peername.host", "client_conn.tls_version", "client_conn.sni", "client_conn.tls_established", "server_conn.address.port", "server_conn.address.host", "server_conn.ip_address.host", "server_conn.tls_version", "server_conn.sni", "server_conn.tls_established", ] def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: spec = s.split(",") opts = [] for pref in self.valid_prefixes: spec[-1] = pref opts.append(",".join(spec)) return opts def parse(self, manager: "CommandManager", t: type, s: str) -> CutSpec: parts: typing.Any = s.split(",") return parts def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: if not isinstance(val, str): return False parts = [x.strip() for x in val.split(",")] for p in parts: for pref in self.valid_prefixes: if p.startswith(pref): break else: return False return True class _BaseFlowType(_BaseType): viewmarkers = [ "@all", "@focus", "@shown", "@hidden", "@marked", "@unmarked", ] valid_prefixes = viewmarkers + [ "~q", "~s", "~a", "~hq", "~hs", "~b", "~bq", "~bs", "~t", "~d", "~m", "~u", "~c", ] def completion(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[str]: return self.valid_prefixes class _FlowType(_BaseFlowType): typ = flow.Flow display = "flow" def parse(self, manager: "CommandManager", t: type, s: str) -> flow.Flow: try: flows = manager.execute("view.flows.resolve %s" % (s)) except exceptions.CommandError as e: raise exceptions.TypeError(str(e)) from e if len(flows) != 1: raise exceptions.TypeError( "Command requires one flow, specification matched %s." % len(flows) ) return flows[0] def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: return isinstance(val, flow.Flow) class _FlowsType(_BaseFlowType): typ = typing.Sequence[flow.Flow] display = "flow[]" def parse(self, manager: "CommandManager", t: type, s: str) -> typing.Sequence[flow.Flow]: try: return manager.execute("view.flows.resolve %s" % (s)) except exceptions.CommandError as e: raise exceptions.TypeError(str(e)) from e def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: try: for v in val: if not isinstance(v, flow.Flow): return False except TypeError: return False return True class _DataType(_BaseType): typ = Data display = "data[][]" def completion( self, manager: "CommandManager", t: type, s: str ) -> typing.Sequence[str]: # pragma: no cover raise exceptions.TypeError("data cannot be passed as argument") def parse( self, manager: "CommandManager", t: type, s: str ) -> typing.Any: # pragma: no cover raise exceptions.TypeError("data cannot be passed as argument") def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: # FIXME: validate that all rows have equal length, and all columns have equal types try: for row in val: for cell in row: if not (isinstance(cell, str) or isinstance(cell, bytes)): return False except TypeError: return False return True class _ChoiceType(_BaseType): typ = Choice display = "choice" def completion(self, manager: "CommandManager", t: Choice, s: str) -> typing.Sequence[str]: return manager.execute(t.options_command) def parse(self, manager: "CommandManager", t: Choice, s: str) -> str: opts = manager.execute(t.options_command) if s not in opts: raise exceptions.TypeError("Invalid choice.") return s def is_valid(self, manager: "CommandManager", typ: typing.Any, val: typing.Any) -> bool: try: opts = manager.execute(typ.options_command) except exceptions.CommandError: return False return val in opts ALL_MARKERS = ['true', 'false'] + list(emoji.emoji) class _MarkerType(_BaseType): typ = Marker display = "marker" def completion(self, manager: "CommandManager", t: Choice, s: str) -> typing.Sequence[str]: return ALL_MARKERS def parse(self, manager: "CommandManager", t: Choice, s: str) -> str: if s not in ALL_MARKERS: raise exceptions.TypeError("Invalid choice.") if s == 'true': return ":default:" elif s == 'false': return "" return s def is_valid(self, manager: "CommandManager", typ: typing.Any, val: str) -> bool: return val in ALL_MARKERS class TypeManager: def __init__(self, *types): self.typemap = {} for t in types: self.typemap[t.typ] = t() def get(self, t: typing.Optional[typing.Type], default=None) -> typing.Optional[_BaseType]: if type(t) in self.typemap: return self.typemap[type(t)] return self.typemap.get(t, default) CommandTypes = TypeManager( _ArgType, _BoolType, _ChoiceType, _CmdType, _CutSpecType, _DataType, _FlowType, _FlowsType, _IntType, _MarkerType, _PathType, _StrType, _StrSeqType, _BytesType, )
1
15,992
This looks better than before, but we'll now likely run into issues with `'` characters in the spec. Maybe we can just use `manager.call_strings` instead?
mitmproxy-mitmproxy
py
@@ -131,6 +131,12 @@ public class SparkReadConf { .parse(); } + public Long splitSizeOption() { + return confParser.longConf() + .option(SparkReadOptions.SPLIT_SIZE) + .parseOptional(); + } + public long splitSize() { return confParser.longConf() .option(SparkReadOptions.SPLIT_SIZE)
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark; import java.util.Map; import java.util.Set; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.util.PropertyUtil; import org.apache.spark.sql.SparkSession; /** * A class for common Iceberg configs for Spark reads. * <p> * If a config is set at multiple levels, the following order of precedence is used (top to bottom): * <ol> * <li>Read options</li> * <li>Session configuration</li> * <li>Table metadata</li> * </ol> * The most specific value is set in read options and takes precedence over all other configs. * If no read option is provided, this class checks the session configuration for any overrides. * If no applicable value is found in the session configuration, this class uses the table metadata. * <p> * Note this class is NOT meant to be serialized and sent to executors. */ public class SparkReadConf { private static final Set<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs"); private final Table table; private final Map<String, String> readOptions; private final SparkConfParser confParser; public SparkReadConf(SparkSession spark, Table table, Map<String, String> readOptions) { this.table = table; this.readOptions = readOptions; this.confParser = new SparkConfParser(spark, table, readOptions); } public boolean localityEnabled() { InputFile file = table.io().newInputFile(table.location()); if (file instanceof HadoopInputFile) { String scheme = ((HadoopInputFile) file).getFileSystem().getScheme(); boolean defaultValue = LOCALITY_WHITELIST_FS.contains(scheme); return PropertyUtil.propertyAsBoolean( readOptions, SparkReadOptions.LOCALITY, defaultValue); } return false; } public Long snapshotId() { return confParser.longConf() .option(SparkReadOptions.SNAPSHOT_ID) .parseOptional(); } public Long asOfTimestamp() { return confParser.longConf() .option(SparkReadOptions.AS_OF_TIMESTAMP) .parseOptional(); } public Long startSnapshotId() { return confParser.longConf() .option(SparkReadOptions.START_SNAPSHOT_ID) .parseOptional(); } public Long endSnapshotId() { return confParser.longConf() .option(SparkReadOptions.END_SNAPSHOT_ID) .parseOptional(); } public boolean parquetVectorizationEnabled() { return confParser.booleanConf() .option(SparkReadOptions.VECTORIZATION_ENABLED) .sessionConf(SparkSQLProperties.VECTORIZATION_ENABLED) .tableProperty(TableProperties.PARQUET_VECTORIZATION_ENABLED) .defaultValue(TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT) .parse(); } public int parquetBatchSize() { return confParser.intConf() .option(SparkReadOptions.VECTORIZATION_BATCH_SIZE) .tableProperty(TableProperties.PARQUET_BATCH_SIZE) .defaultValue(TableProperties.PARQUET_BATCH_SIZE_DEFAULT) .parse(); } public boolean orcVectorizationEnabled() { return confParser.booleanConf() .option(SparkReadOptions.VECTORIZATION_ENABLED) .sessionConf(SparkSQLProperties.VECTORIZATION_ENABLED) .tableProperty(TableProperties.ORC_VECTORIZATION_ENABLED) .defaultValue(TableProperties.ORC_VECTORIZATION_ENABLED_DEFAULT) .parse(); } public int orcBatchSize() { return confParser.intConf() .option(SparkReadOptions.VECTORIZATION_BATCH_SIZE) .tableProperty(TableProperties.ORC_BATCH_SIZE) .defaultValue(TableProperties.ORC_BATCH_SIZE_DEFAULT) .parse(); } public long splitSize() { return confParser.longConf() .option(SparkReadOptions.SPLIT_SIZE) .tableProperty(TableProperties.SPLIT_SIZE) .defaultValue(TableProperties.SPLIT_SIZE_DEFAULT) .parse(); } public int splitLookback() { return confParser.intConf() .option(SparkReadOptions.LOOKBACK) .tableProperty(TableProperties.SPLIT_LOOKBACK) .defaultValue(TableProperties.SPLIT_LOOKBACK_DEFAULT) .parse(); } public long splitOpenFileCost() { return confParser.longConf() .option(SparkReadOptions.FILE_OPEN_COST) .tableProperty(TableProperties.SPLIT_OPEN_FILE_COST) .defaultValue(TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT) .parse(); } /** * Enables reading a timestamp without time zone as a timestamp with time zone. * <p> * Generally, this is not safe as a timestamp without time zone is supposed to represent the wall-clock time, * i.e. no matter the reader/writer timezone 3PM should always be read as 3PM, * but a timestamp with time zone represents instant semantics, i.e. the timestamp * is adjusted so that the corresponding time in the reader timezone is displayed. * <p> * When set to false (default), an exception must be thrown while reading a timestamp without time zone. * * @return boolean indicating if reading timestamps without timezone is allowed */ public boolean handleTimestampWithoutZone() { return confParser.booleanConf() .option(SparkReadOptions.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE) .sessionConf(SparkSQLProperties.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE) .defaultValue(SparkSQLProperties.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE_DEFAULT) .parse(); } }
1
44,833
Should this replace `splitSize` instead of adding a parallel call? The `SparkReadConf` is not yet released, so we can change it still.
apache-iceberg
java
@@ -71,7 +71,18 @@ class LocalFileSystem(FileSystem): return if parents: - os.makedirs(path) + # for Python 2 compatibility + try: + FileNotExistsError + except NameError: + FileNotExistsError = OSError + + try: + os.makedirs(path) + except FileNotExistsError as err: + # somebody already created the path + if err.errno != errno.EEXIST: + raise else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory()
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ :class:`LocalTarget` provides a concrete implementation of a :py:class:`~luigi.target.Target` class that uses files on the local file system """ import os import random import shutil import tempfile import io import warnings import errno from luigi.format import FileWrapper, get_default_format from luigi.target import FileAlreadyExists, MissingParentDirectory, NotADirectory, FileSystem, FileSystemTarget, AtomicLocalFile class atomic_file(AtomicLocalFile): """Simple class that writes to a temp file and moves it on close() Also cleans up the temp file if close is not invoked """ def move_to_final_destination(self): os.rename(self.tmp_path, self.path) def generate_tmp_path(self, path): return path + '-luigi-tmp-%09d' % random.randrange(0, 1e10) class LocalFileSystem(FileSystem): """ Wrapper for access to file system operations. Work in progress - add things as needed. """ def copy(self, old_path, new_path, raise_if_exists=False): if raise_if_exists and os.path.exists(new_path): raise RuntimeError('Destination exists: %s' % new_path) d = os.path.dirname(new_path) if d and not os.path.exists(d): self.mkdir(d) shutil.copy(old_path, new_path) def exists(self, path): return os.path.exists(path) def mkdir(self, path, parents=True, raise_if_exists=False): if self.exists(path): if raise_if_exists: raise FileAlreadyExists() elif not self.isdir(path): raise NotADirectory() else: return if parents: os.makedirs(path) else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory() os.mkdir(path) def isdir(self, path): return os.path.isdir(path) def listdir(self, path): for dir_, _, files in os.walk(path): assert dir_.startswith(path) for name in files: yield os.path.join(dir_, name) def remove(self, path, recursive=True): if recursive and self.isdir(path): shutil.rmtree(path) else: os.remove(path) def move(self, old_path, new_path, raise_if_exists=False): """ Move file atomically. If source and destination are located on different filesystems, atomicity is approximated but cannot be guaranteed. """ if raise_if_exists and os.path.exists(new_path): raise FileAlreadyExists('Destination exists: %s' % new_path) d = os.path.dirname(new_path) if d and not os.path.exists(d): self.mkdir(d) try: os.rename(old_path, new_path) except OSError as err: if err.errno == errno.EXDEV: new_path_tmp = '%s-%09d' % (new_path, random.randint(0, 999999999)) shutil.copy(old_path, new_path_tmp) os.rename(new_path_tmp, new_path) os.remove(old_path) else: raise err def rename_dont_move(self, path, dest): """ Rename ``path`` to ``dest``, but don't move it into the ``dest`` folder (if it is a folder). This method is just a wrapper around the ``move`` method of LocalTarget. """ self.move(path, dest, raise_if_exists=True) class LocalTarget(FileSystemTarget): fs = LocalFileSystem() def __init__(self, path=None, format=None, is_tmp=False): if format is None: format = get_default_format() if not path: if not is_tmp: raise Exception('path or is_tmp must be set') path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999)) super(LocalTarget, self).__init__(path) self.format = format self.is_tmp = is_tmp def makedirs(self): """ Create all parent folders if they do not exist. """ normpath = os.path.normpath(self.path) parentfolder = os.path.dirname(normpath) if parentfolder: try: os.makedirs(parentfolder) except OSError: pass def open(self, mode='r'): rwmode = mode.replace('b', '').replace('t', '') if rwmode == 'w': self.makedirs() return self.format.pipe_writer(atomic_file(self.path)) elif rwmode == 'r': fileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode))) return self.format.pipe_reader(fileobj) else: raise Exception("mode must be 'r' or 'w' (got: %s)" % mode) def move(self, new_path, raise_if_exists=False): self.fs.move(self.path, new_path, raise_if_exists=raise_if_exists) def move_dir(self, new_path): self.move(new_path) def remove(self): self.fs.remove(self.path) def copy(self, new_path, raise_if_exists=False): self.fs.copy(self.path, new_path, raise_if_exists) @property def fn(self): warnings.warn("Use LocalTarget.path to reference filename", DeprecationWarning, stacklevel=2) return self.path def __del__(self): if self.is_tmp and self.exists(): self.remove()
1
16,926
To reduce complexity, please use OSError on Python3 as well.
spotify-luigi
py
@@ -209,3 +209,10 @@ def follow_selected(tab_obj: apitypes.Tab, *, tab: bool = False) -> None: tab_obj.caret.follow_selected(tab=tab) except apitypes.WebTabError as e: raise cmdutils.CommandError(str(e)) + + [email protected]() [email protected]('tab', value=cmdutils.Value.cur_tab) +def reverse_selection(tab: apitypes.Tab) -> None: + """Toggle caret selection mode.""" + tab.caret.reverse_selection()
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2018 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Commands related to caret browsing.""" from qutebrowser.api import cmdutils, apitypes @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_next_line(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the next line. Args: count: How many lines to move. """ tab.caret.move_to_next_line(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_prev_line(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the prev line. Args: count: How many lines to move. """ tab.caret.move_to_prev_line(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_next_char(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the next char. Args: count: How many lines to move. """ tab.caret.move_to_next_char(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_prev_char(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the previous char. Args: count: How many chars to move. """ tab.caret.move_to_prev_char(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_end_of_word(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the end of the word. Args: count: How many words to move. """ tab.caret.move_to_end_of_word(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_next_word(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the next word. Args: count: How many words to move. """ tab.caret.move_to_next_word(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_prev_word(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the previous word. Args: count: How many words to move. """ tab.caret.move_to_prev_word(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def move_to_start_of_line(tab: apitypes.Tab) -> None: """Move the cursor or selection to the start of the line.""" tab.caret.move_to_start_of_line() @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def move_to_end_of_line(tab: apitypes.Tab) -> None: """Move the cursor or selection to the end of line.""" tab.caret.move_to_end_of_line() @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_start_of_next_block(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the start of next block. Args: count: How many blocks to move. """ tab.caret.move_to_start_of_next_block(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_start_of_prev_block(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the start of previous block. Args: count: How many blocks to move. """ tab.caret.move_to_start_of_prev_block(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_end_of_next_block(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the end of next block. Args: count: How many blocks to move. """ tab.caret.move_to_end_of_next_block(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) @cmdutils.argument('count', value=cmdutils.Value.count) def move_to_end_of_prev_block(tab: apitypes.Tab, count: int = 1) -> None: """Move the cursor or selection to the end of previous block. Args: count: How many blocks to move. """ tab.caret.move_to_end_of_prev_block(count) @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def move_to_start_of_document(tab: apitypes.Tab) -> None: """Move the cursor or selection to the start of the document.""" tab.caret.move_to_start_of_document() @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def move_to_end_of_document(tab: apitypes.Tab) -> None: """Move the cursor or selection to the end of the document.""" tab.caret.move_to_end_of_document() @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def toggle_selection(tab: apitypes.Tab) -> None: """Toggle caret selection mode.""" tab.caret.toggle_selection() @cmdutils.register(modes=[cmdutils.KeyMode.caret]) @cmdutils.argument('tab', value=cmdutils.Value.cur_tab) def drop_selection(tab: apitypes.Tab) -> None: """Drop selection and keep selection mode enabled.""" tab.caret.drop_selection() @cmdutils.register() @cmdutils.argument('tab_obj', value=cmdutils.Value.cur_tab) def follow_selected(tab_obj: apitypes.Tab, *, tab: bool = False) -> None: """Follow the selected text. Args: tab: Load the selected link in a new tab. """ try: tab_obj.caret.follow_selected(tab=tab) except apitypes.WebTabError as e: raise cmdutils.CommandError(str(e))
1
22,978
That seems wrong.
qutebrowser-qutebrowser
py
@@ -55,6 +55,7 @@ public class ConfirmEmailPage implements java.io.Serializable { if (confirmEmailData != null) { user = confirmEmailData.getAuthenticatedUser(); session.setUser(user); + session.configureSessionTimeout(); // TODO: is this needed here? (it can't hurt, but still) JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("confirmEmail.details.success")); return "/dataverse.xhtml?faces-redirect=true"; }
1
package edu.harvard.iq.dataverse.confirmemail; import edu.harvard.iq.dataverse.DataverseSession; import edu.harvard.iq.dataverse.actionlogging.ActionLogServiceBean; import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import edu.harvard.iq.dataverse.util.BundleUtil; import edu.harvard.iq.dataverse.util.JsfHelper; import java.util.logging.Logger; import javax.ejb.EJB; import javax.faces.view.ViewScoped; import javax.inject.Inject; import javax.inject.Named; /** * * @author bsilverstein */ @ViewScoped @Named("ConfirmEmailPage") public class ConfirmEmailPage implements java.io.Serializable { private static final Logger logger = Logger.getLogger(ConfirmEmailPage.class.getCanonicalName()); @EJB ConfirmEmailServiceBean confirmEmailService; @Inject DataverseSession session; @EJB ActionLogServiceBean actionLogSvc; /** * The unique string used to look up a user and continue the email * confirmation. */ String token; /** * The user looked up by the token who will be confirming their email. */ AuthenticatedUser user; /** * The link that is emailed to the user to confirm the email that contains a * token. */ String confirmEmailUrl; ConfirmEmailData confirmEmailData; public String init() { if (token != null) { ConfirmEmailExecResponse confirmEmailExecResponse = confirmEmailService.processToken(token); confirmEmailData = confirmEmailExecResponse.getConfirmEmailData(); if (confirmEmailData != null) { user = confirmEmailData.getAuthenticatedUser(); session.setUser(user); JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("confirmEmail.details.success")); return "/dataverse.xhtml?faces-redirect=true"; } } JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("confirmEmail.details.failure")); /** * @todo It would be nice to send a 404 response but if we enable this * then the user sees the contents of 404.xhtml rather than the contents * of JsfHelper.addErrorMessage above! */ // try { // FacesContext.getCurrentInstance().getExternalContext().responseSendError(HttpServletResponse.SC_NOT_FOUND, null); // } catch (IOException ex) { // } return null; } public String getToken() { return token; } public void setToken(String token) { this.token = token; } public AuthenticatedUser getUser() { return user; } public String getConfirmEmailUrl() { return confirmEmailUrl; } public ConfirmEmailData getConfirmEmailData() { return confirmEmailData; } public void setConfirmEmailData(ConfirmEmailData confirmEmailData) { this.confirmEmailData = confirmEmailData; } public boolean isInvalidToken() { if (confirmEmailData == null) { return true; } else { return false; } } public String getRedirectToAccountInfoTab() { return "/dataverseuser.xhtml?selectTab=accountInfo&faces-redirect=true"; } }
1
40,322
Yes, this is needed here. This is a builtin user who just reset their password.
IQSS-dataverse
java
@@ -396,13 +396,12 @@ class HDF5PackageStore(PackageStore): """ self._find_path_write() buildfile = name.lstrip('/').replace('/', '.') - storepath = os.path.join(self._pkg_dir, buildfile) + storepath = self._object_path('.' + buildfile) with pd.HDFStore(storepath, mode=self._mode) as store: store[self.DF_NAME] = df filehash = digest_file(storepath) self._add_to_contents(buildfile, filehash, ext, path, target) - objpath = os.path.join(self._pkg_dir, self.OBJ_DIR, filehash) - os.rename(storepath, objpath) + os.rename(storepath, self._object_path(filehash)) @classmethod def ls_packages(cls, pkg_dir):
1
""" Build: parse and add user-supplied files to store """ import json import os import re from shutil import copyfile import tempfile import zlib import pandas as pd import requests try: import fastparquet except ImportError: fastparquet = None try: from pyspark.sql import SparkSession except ImportError: SparkSession = None from .const import FORMAT_HDF5, FORMAT_PARQ, FORMAT_SPARK, NodeType, TargetType, TYPE_KEY from .hashing import digest_file, hash_contents # start with alpha (_ may clobber attrs), continue with alphanumeric or _ VALID_NAME_RE = re.compile(r'^[a-zA-Z]\w*$') CHUNK_SIZE = 4096 ZLIB_LEVEL = 2 # Maximum level. ZLIB_METHOD = zlib.DEFLATED # The only supported one. ZLIB_WBITS = zlib.MAX_WBITS | 16 # Add a gzip header and checksum. CONTENTS_FILE = 'contents.json' class StoreException(Exception): """ Exception class for store I/O """ pass class PackageStore(object): """ Base class for managing Quilt data package repositories. This class and its subclasses abstract file formats, file naming and reading and writing to/from data files. """ PACKAGE_DIR_NAME = 'quilt_packages' PACKAGE_FILE_EXT = '.json' BUILD_DIR = 'build' OBJ_DIR = 'objs' @classmethod def find_package_dirs(cls, start='.'): """ Walks up the directory tree and looks for `quilt_packages` directories in the ancestors of the starting directory. The algorithm is the same as Node's `node_modules` algorithm ( https://nodejs.org/docs/v7.4.0/api/modules.html#modules_all_together ), except that it doesn't stop at the top-level `quilt_packages` directory. Returns a (possibly empty) generator. """ path = os.path.realpath(start) while True: parent_path, name = os.path.split(path) if name != cls.PACKAGE_DIR_NAME: package_dir = os.path.join(path, cls.PACKAGE_DIR_NAME) if os.path.isdir(package_dir): yield package_dir if parent_path == path: # The only reliable way to detect the root. break path = parent_path def __init__(self, user, package, mode): self._user = user self._package = package self._mode = mode self._pkg_dir = None self._path = None self._find_path_read() def __enter__(self): return self def __exit__(self, type, value, traceback): pass def file(self, hash_list): """ Returns the path to an object file that matches the given hash. """ assert isinstance(hash_list, list) assert len(hash_list) == 1, "File objects must be contained in one file." filehash = hash_list[0] objpath = os.path.join(self._pkg_dir, self.OBJ_DIR, filehash) return objpath def dataframe(self, hash_list): """ Creates a DataFrame from a set of objects (identified by hashes). """ raise NotImplementedError() def save_df(self, df, name, path, ext, target): """ Save a DataFrame to the store. """ raise NotImplementedError() def save_file(self, srcfile, name, path, target): """ Save a (raw) file to the store. """ self._find_path_write() filehash = digest_file(srcfile) fullname = name.lstrip('/').replace('/', '.') self._add_to_contents(fullname, filehash, '', path, target) objpath = os.path.join(self._pkg_dir, self.OBJ_DIR, filehash) if not os.path.exists(objpath): copyfile(srcfile, objpath) def get_contents(self): """ Returns a dictionary with the contents of the package. """ try: with open(self._path, 'r') as contents_file: contents = json.load(contents_file) except IOError: contents = {} # Make sure the top-level a valid node (GROUP by default) contents.setdefault(TYPE_KEY, NodeType.GROUP.value) return contents def clear_contents(self): """ Removes the package's contents file. """ if self._path: os.remove(self._path) self._path = None def save_contents(self, contents): """ Saves an updated version of the package's contents. """ with open(self._path, 'w') as contents_file: json.dump(contents, contents_file, indent=2, sort_keys=True) def get(self, path): """ Read a group or object from the store. """ if not self.exists(): raise StoreException("Package not found") key = path.lstrip('/') ipath = key.split('/') if key else [] ptr = self.get_contents() path_so_far = [] for node in ipath: path_so_far += [node] if not node in ptr: raise StoreException("Key {path} Not Found in Package {owner}/{pkg}".format( path="/".join(path_so_far), owner=self._user, pkg=self._package)) ptr = ptr[node] node = ptr node_type = NodeType(node[TYPE_KEY]) if node_type is NodeType.GROUP: return node elif node_type is NodeType.TABLE: return self.dataframe(node['hashes']) elif node_type is NodeType.FILE: return self.file(node['hashes']) else: assert False, "Unhandled NodeType {nt}".format(nt=node_type) def get_hash(self): """ Returns the hash digest of the package data. """ raise StoreException("Not Implemented") def get_path(self): """ Returns the path to the package's contents file. """ return self._path def exists(self): """ Returns True if the package is already installed. """ return not self._path is None def install(self, contents, urls): """ Download and install a package locally. """ self._find_path_write() local_filename = self.get_path() with open(local_filename, 'w') as contents_file: json.dump(contents, contents_file) # Download individual object files and store # in object dir. Verify individual file hashes. # Verify global hash? def install_table(node, urls): """ Downloads and installs the set of objects for one table. """ hashes = node['hashes'] for download_hash in hashes: url = urls[download_hash] # download and install response = requests.get(url, stream=True) if not response.ok: msg = "Download {hash} failed: error {code}" raise StoreException(msg.format(hash=download_hash, code=response.status_code)) local_filename = os.path.join(self._pkg_dir, self.OBJ_DIR, download_hash) with open(local_filename, 'wb') as output_file: # `requests` will automatically un-gzip the content, as long as # the 'Content-Encoding: gzip' header is set. for chunk in response.iter_content(chunk_size=CHUNK_SIZE): if chunk: # filter out keep-alive new chunks output_file.write(chunk) file_hash = digest_file(local_filename) if file_hash != download_hash: os.remove(local_filename) raise StoreException("Mismatched hash! Expected %s, got %s." % (download_hash, file_hash)) def install_tables(contents, urls): """ Parses package contents and calls install_table for each table. """ for key, node in contents.items(): if key == TYPE_KEY: continue if NodeType(node[TYPE_KEY]) is NodeType.GROUP: return install_tables(node, urls) else: install_table(node, urls) return install_tables(contents, urls) def _object_path(self, objhash): """ Returns the path to an object file based on its hash. """ return os.path.join(self._pkg_dir, self.OBJ_DIR, objhash) def _find_path_read(self): """ Finds an existing package in one of the package directories. """ self._path = None self._pkg_dir = None if not VALID_NAME_RE.match(self._user): raise StoreException("Invalid user name: %r" % self._user) if not VALID_NAME_RE.match(self._package): raise StoreException("Invalid package name: %r" % self._package) pkg_dirs = PackageStore.find_package_dirs() for package_dir in pkg_dirs: path = os.path.join(package_dir, self._user, self._package + self.PACKAGE_FILE_EXT) if os.path.exists(path): self._path = path self._pkg_dir = package_dir return return def _find_path_write(self): """ Creates a path to store a data package in the innermost `quilt_packages` directory (or in a new `quilt_packages` directory in the current directory) and allocates a per-user directory if needed. """ if not VALID_NAME_RE.match(self._user): raise StoreException("Invalid user name: %r" % self._user) if not VALID_NAME_RE.match(self._package): raise StoreException("Invalid package name: %r" % self._package) package_dir = next(PackageStore.find_package_dirs(), self.PACKAGE_DIR_NAME) user_path = os.path.join(package_dir, self._user) if not os.path.isdir(user_path): os.makedirs(user_path) obj_path = os.path.join(package_dir, self.OBJ_DIR) if not os.path.isdir(obj_path): os.makedirs(obj_path) path = os.path.join(user_path, self._package + self.PACKAGE_FILE_EXT) self._path = path self._pkg_dir = package_dir return def _add_to_contents(self, fullname, objhash, ext, path, target): """ Adds an object (name-hash mapping) to the package's contents. """ contents = self.get_contents() ipath = fullname.split('.') leaf = ipath.pop() ptr = contents ptr.setdefault(TYPE_KEY, NodeType.GROUP.value) for node in ipath: ptr = ptr.setdefault(node, {TYPE_KEY: NodeType.GROUP.value}) try: target_type = TargetType(target) if target_type is TargetType.PANDAS: node_type = NodeType.TABLE elif target_type is TargetType.FILE: node_type = NodeType.FILE else: assert False, "Unhandled TargetType {tt}".format(tt=target_type) except ValueError: raise StoreException("Unrecognized target {tgt}".format(tgt=target)) ptr[leaf] = dict({TYPE_KEY: node_type.value}, hashes=[objhash], metadata=dict(q_ext=ext, q_path=path, q_target=target) ) self.save_contents(contents) class HDF5PackageStore(PackageStore): """ HDF5 Implementation of PackageStore. """ DF_NAME = 'df' def __init__(self, user, package, mode): super(HDF5PackageStore, self).__init__(user, package, mode) self.__store = None def dataframe(self, hash_list): """ Creates a DataFrame from a set of objects (identified by hashes). """ assert len(hash_list) == 1, "Multi-file DFs not supported in HDF5." filehash = hash_list[0] with pd.HDFStore(self._object_path(filehash), 'r') as store: return store.get(self.DF_NAME) def get_hash(self): return hash_contents(self.get_contents()) class UploadFile(object): """ Helper class to manage temporary package files uploaded by push. """ def __init__(self, store, objhash): self._store = store self._hash = objhash def __enter__(self): self._temp_file = tempfile.TemporaryFile() with open(self._store._object_path(self._hash), 'rb') as input_file: zlib_obj = zlib.compressobj(ZLIB_LEVEL, ZLIB_METHOD, ZLIB_WBITS) for chunk in iter(lambda: input_file.read(CHUNK_SIZE), b''): self._temp_file.write(zlib_obj.compress(chunk)) self._temp_file.write(zlib_obj.flush()) self._temp_file.seek(0) return self._temp_file def __exit__(self, type, value, traceback): self._temp_file.close() def tempfile(self, hash): """ Create and return a temporary file for uploading to a registry. """ return self.UploadFile(self, hash) def save_df(self, df, name, path, ext, target): """ Save a DataFrame to the store. """ self._find_path_write() buildfile = name.lstrip('/').replace('/', '.') storepath = os.path.join(self._pkg_dir, buildfile) with pd.HDFStore(storepath, mode=self._mode) as store: store[self.DF_NAME] = df filehash = digest_file(storepath) self._add_to_contents(buildfile, filehash, ext, path, target) objpath = os.path.join(self._pkg_dir, self.OBJ_DIR, filehash) os.rename(storepath, objpath) @classmethod def ls_packages(cls, pkg_dir): """ List installed packages. """ hdf5_packages = [ (user, pkg[:-len(HDF5PackageStore.PACKAGE_FILE_EXT)]) for user in os.listdir(pkg_dir) for pkg in os.listdir(os.path.join(pkg_dir, user)) if pkg.endswith(HDF5PackageStore.PACKAGE_FILE_EXT)] return hdf5_packages class ParquetPackageStore(PackageStore): """ Parquet Implementation of PackageStore. """ def __init__(self, user, package, mode): if fastparquet is None: raise StoreException("Module fastparquet is required for ParquetPackageStore.") super(ParquetPackageStore, self).__init__(user, package, mode) def save_df(self, df, name, path, ext, target): """ Save a DataFrame to the store. """ self._find_path_write() buildfile = name.lstrip('/').replace('/', '.') storepath = os.path.join(self._pkg_dir, buildfile) fastparquet.write(storepath, df) filehash = digest_file(storepath) self._add_to_contents(buildfile, filehash, ext, path, target) objpath = os.path.join(self._pkg_dir, self.OBJ_DIR, filehash) os.rename(storepath, objpath) def dataframe(self, hash_list): """ Creates a DataFrame from a set of objects (identified by hashes). """ assert len(hash_list) == 1, "Multi-file DFs not supported yet." filehash = hash_list[0] pfile = fastparquet.ParquetFile(self._object_path(filehash)) return pfile.to_pandas() def get_hash(self): raise StoreException("Not Implemented") @classmethod def ls_packages(cls, pkg_dir): """ List installed packages. """ parq_packages = [ (user, pkg) for user in os.listdir(pkg_dir) for pkg in os.listdir(os.path.join(pkg_dir, user)) if os.path.isdir(pkg)] return parq_packages class SparkPackageStore(ParquetPackageStore): """ Spark Implementation of PackageStore. """ def __init__(self, user, package, mode): super(SparkPackageStore, self).__init__(user, package, mode) if SparkSession is None: raise StoreException("Module SparkSession from pyspark.sql is required for " + "SparkPackageStore.") def dataframe(self, hash_list): """ Creates a DataFrame from a set of objects (identified by hashes). """ spark = SparkSession.builder.getOrCreate() assert len(hash_list) == 1, "Multi-file DFs not supported yet." filehash = hash_list[0] df = spark.read.parquet(self._object_path(filehash)) return df # Helper functions def get_store(user, package, format=None, mode='r'): """ Return a PackageStore object of the appropriate type for a given data package. """ pkg_format = format if not pkg_format: pkg_format = os.environ.get('QUILT_PACKAGE_FORMAT', FORMAT_HDF5) if pkg_format == FORMAT_PARQ: return ParquetPackageStore(user, package, mode) elif pkg_format == FORMAT_SPARK: return SparkPackageStore(user, package, mode) else: return HDF5PackageStore(user, package, mode) def ls_packages(pkg_dir): """ List all packages from all package directories. """ pkg_format = os.environ.get('QUILT_PACKAGE_FORMAT', FORMAT_HDF5) if pkg_format == FORMAT_HDF5: packages = HDF5PackageStore.ls_packages(pkg_dir) elif pkg_format == FORMAT_PARQ: packages = ParquetPackageStore.ls_packages(pkg_dir) else: raise StoreException("Unsupported Package Format %s" % pkg_format) return packages
1
14,910
This seems to move the storage of temporary files to the CWD. Is that right? I don't think we should do that. If the process gets interrupted, we should try our best to clean up, but if even that fails, it'd be nice if the mess was left in a different directory. Maybe we should have a directory explicitly for builds?
quiltdata-quilt
py
@@ -67,6 +67,10 @@ type Config struct { // If not set, it uses all versions available. // Warning: This API should not be considered stable and will change soon. Versions []protocol.VersionNumber + // Ask the server to truncate the connection ID sent in the Public Header. + // This saves 8 bytes in the Public Header in every packet. However, if the IP address of the server changes, the connection cannot be migrated. + // Currently only valid for the client. + RequestConnectionIDTruncation bool } // A Listener for incoming QUIC connections
1
package quic import ( "crypto/tls" "io" "net" "github.com/lucas-clemente/quic-go/protocol" ) // Stream is the interface implemented by QUIC streams type Stream interface { io.Reader io.Writer io.Closer StreamID() protocol.StreamID // Reset closes the stream with an error. Reset(error) } // A Session is a QUIC connection between two peers. type Session interface { // AcceptStream returns the next stream opened by the peer, blocking until one is available. // Since stream 1 is reserved for the crypto stream, the first stream is either 2 (for a client) or 3 (for a server). AcceptStream() (Stream, error) // OpenStream opens a new QUIC stream, returning a special error when the peeer's concurrent stream limit is reached. // New streams always have the smallest possible stream ID. // TODO: Enable testing for the special error OpenStream() (Stream, error) // OpenStreamSync opens a new QUIC stream, blocking until the peer's concurrent stream limit allows a new stream to be opened. // It always picks the smallest possible stream ID. OpenStreamSync() (Stream, error) // LocalAddr returns the local address. LocalAddr() net.Addr // RemoteAddr returns the address of the peer. RemoteAddr() net.Addr // Close closes the connection. The error will be sent to the remote peer in a CONNECTION_CLOSE frame. An error value of nil is allowed and will cause a normal PeerGoingAway to be sent. Close(error) error } // ConnState is the status of the connection type ConnState int const ( // ConnStateInitial is the initial state ConnStateInitial ConnState = iota // ConnStateVersionNegotiated means that version negotiation is complete ConnStateVersionNegotiated // ConnStateSecure means that the connection is encrypted ConnStateSecure // ConnStateForwardSecure means that the connection is forward secure ConnStateForwardSecure ) // ConnStateCallback is called every time the connection moves to another connection state. type ConnStateCallback func(Session, ConnState) // Config contains all configuration data needed for a QUIC server or client. // More config parameters (such as timeouts) will be added soon, see e.g. https://github.com/lucas-clemente/quic-go/issues/441. type Config struct { TLSConfig *tls.Config // ConnStateCallback will be called when the QUIC version is successfully negotiated or when the encryption level changes. // If this field is not set, the Dial functions will return only when the connection is forward secure. // Callbacks have to be thread-safe, since they might be called in separate goroutines. ConnState ConnStateCallback // The QUIC versions that can be negotiated. // If not set, it uses all versions available. // Warning: This API should not be considered stable and will change soon. Versions []protocol.VersionNumber } // A Listener for incoming QUIC connections type Listener interface { // Close the server, sending CONNECTION_CLOSE frames to each peer. Close() error // Addr returns the local network addr that the server is listening on. Addr() net.Addr // Serve starts the main server loop, and blocks until a network error occurs or the server is closed. Serve() error }
1
6,071
Please explain why a user would enable this (space savings), and the requirements for this option to be safe.
lucas-clemente-quic-go
go
@@ -43,12 +43,16 @@ export default function ContainerSelect( { value, ...props } ) { - const accounts = useSelect( ( select ) => select( STORE_NAME ).getAccounts() ); + const { accounts, hasResolvedAccounts } = useSelect( ( select ) => ( { + accounts: select( STORE_NAME ).getAccounts(), + hasResolvedAccounts: select( STORE_NAME ).hasFinishedResolution( 'getAccounts' ), + } ) ); + const accountID = useSelect( ( select ) => select( STORE_NAME ).getAccountID() ); const hasExistingTag = useSelect( ( select ) => select( STORE_NAME ).hasExistingTag() ); - const isLoadingContainers = useSelect( ( select ) => select( STORE_NAME ).isDoingGetContainers( accountID ) ); + const hasResolvedContainers = useSelect( ( select ) => select( STORE_NAME ).hasFinishedResolution( 'getContainers', [ accountID ] ) ); - if ( accounts === undefined || containers === undefined || isLoadingContainers ) { + if ( accounts === undefined || ! hasResolvedAccounts || containers === undefined || ! hasResolvedContainers ) { return <ProgressBar small />; }
1
/** * Container Select component. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import classnames from 'classnames'; import PropTypes from 'prop-types'; /** * WordPress dependencies */ import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { Select, Option } from '../../../../material-components'; import { STORE_NAME, CONTAINER_CREATE } from '../../datastore/constants'; import ProgressBar from '../../../../components/progress-bar'; import { isValidAccountID } from '../../util'; const { useSelect } = Data; export default function ContainerSelect( { containers, className, value, ...props } ) { const accounts = useSelect( ( select ) => select( STORE_NAME ).getAccounts() ); const accountID = useSelect( ( select ) => select( STORE_NAME ).getAccountID() ); const hasExistingTag = useSelect( ( select ) => select( STORE_NAME ).hasExistingTag() ); const isLoadingContainers = useSelect( ( select ) => select( STORE_NAME ).isDoingGetContainers( accountID ) ); if ( accounts === undefined || containers === undefined || isLoadingContainers ) { return <ProgressBar small />; } return ( <Select className={ classnames( 'googlesitekit-tagmanager__select-container', className ) } disabled={ hasExistingTag || ! isValidAccountID( accountID ) } value={ value } enhanced outlined { ...props } > { ( containers || [] ) .concat( { // eslint-disable-next-line sitekit/camelcase-acronyms publicId: CONTAINER_CREATE, name: __( 'Set up a new container', 'google-site-kit' ), } ) .map( ( { publicId, name, containerId } ) => ( // eslint-disable-line sitekit/camelcase-acronyms <Option key={ publicId } // eslint-disable-line sitekit/camelcase-acronyms value={ publicId } // eslint-disable-line sitekit/camelcase-acronyms data-internal-id={ containerId } // eslint-disable-line sitekit/camelcase-acronyms > { name } </Option> ) ) } </Select> ); } ContainerSelect.propTypes = { containers: PropTypes.arrayOf( PropTypes.object ), };
1
32,640
I think the `undefined` checks still need to be removed from here.
google-site-kit-wp
js
@@ -158,6 +158,13 @@ class KubernetesJobTask(luigi.Task): """ return self.kubernetes_config.max_retrials + @property + def backoff_limit(self): + """ + Maximum number of retries before considering the job as failed. + """ + return 6 + @property def delete_on_success(self): """
1
# -*- coding: utf-8 -*- # # Copyright 2015 Outlier Bio, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Kubernetes Job wrapper for Luigi. From the Kubernetes website: Kubernetes is an open-source system for automating deployment, scaling, and management of containerized applications. For more information about Kubernetes Jobs: http://kubernetes.io/docs/user-guide/jobs/ Requires: - pykube: ``pip install pykube`` Written and maintained by Marco Capuccini (@mcapuccini). """ import logging import time import uuid from datetime import datetime import luigi logger = logging.getLogger('luigi-interface') try: from pykube.config import KubeConfig from pykube.http import HTTPClient from pykube.objects import Job, Pod except ImportError: logger.warning('pykube is not installed. KubernetesJobTask requires pykube.') class kubernetes(luigi.Config): auth_method = luigi.Parameter( default="kubeconfig", description="Authorization method to access the cluster") kubeconfig_path = luigi.Parameter( default="~/.kube/config", description="Path to kubeconfig file for cluster authentication") max_retrials = luigi.IntParameter( default=0, description="Max retrials in event of job failure") class KubernetesJobTask(luigi.Task): __POLL_TIME = 5 # see __track_job _kubernetes_config = None # Needs to be loaded at runtime def _init_kubernetes(self): self.__logger = logger self.__logger.debug("Kubernetes auth method: " + self.auth_method) if self.auth_method == "kubeconfig": self.__kube_api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path)) elif self.auth_method == "service-account": self.__kube_api = HTTPClient(KubeConfig.from_service_account()) else: raise ValueError("Illegal auth_method") self.job_uuid = str(uuid.uuid4().hex) now = datetime.utcnow() self.uu_name = "%s-%s-%s" % (self.name, now.strftime('%Y%m%d%H%M%S'), self.job_uuid[:16]) @property def auth_method(self): """ This can be set to ``kubeconfig`` or ``service-account``. It defaults to ``kubeconfig``. For more details, please refer to: - kubeconfig: http://kubernetes.io/docs/user-guide/kubeconfig-file - service-account: http://kubernetes.io/docs/user-guide/service-accounts """ return self.kubernetes_config.auth_method @property def kubeconfig_path(self): """ Path to kubeconfig file used for cluster authentication. It defaults to "~/.kube/config", which is the default location when using minikube (http://kubernetes.io/docs/getting-started-guides/minikube). When auth_method is ``service-account`` this property is ignored. **WARNING**: For Python versions < 3.5 kubeconfig must point to a Kubernetes API hostname, and NOT to an IP address. For more details, please refer to: http://kubernetes.io/docs/user-guide/kubeconfig-file """ return self.kubernetes_config.kubeconfig_path @property def name(self): """ A name for this job. This task will automatically append a UUID to the name before to submit to Kubernetes. """ raise NotImplementedError("subclass must define name") @property def labels(self): """ Return custom labels for kubernetes job. example:: ``{"run_dt": datetime.date.today().strftime('%F')}`` """ return {} @property def spec_schema(self): """ Kubernetes Job spec schema in JSON format, an example follows. .. code-block:: javascript { "containers": [{ "name": "pi", "image": "perl", "command": ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] }], "restartPolicy": "Never" } **restartPolicy** - If restartPolicy is not defined, it will be set to "Never" by default. - **Warning**: restartPolicy=OnFailure will bypass max_retrials, and restart the container until success, with the risk of blocking the Luigi task. For more informations please refer to: http://kubernetes.io/docs/user-guide/pods/multi-container/#the-spec-schema """ raise NotImplementedError("subclass must define spec_schema") @property def max_retrials(self): """ Maximum number of retrials in case of failure. """ return self.kubernetes_config.max_retrials @property def delete_on_success(self): """ Delete the Kubernetes workload if the job has ended successfully. """ return True @property def print_pod_logs_on_exit(self): """ Fetch and print the pod logs once the job is completed. """ return False @property def active_deadline_seconds(self): """ Time allowed to successfully schedule pods. See: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-termination-and-cleanup """ return 100 @property def kubernetes_config(self): if not self._kubernetes_config: self._kubernetes_config = kubernetes() return self._kubernetes_config def __track_job(self): """Poll job status while active""" while not self.__verify_job_has_started(): time.sleep(self.__POLL_TIME) self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start") self.__print_kubectl_hints() status = self.__get_job_status() while status == "RUNNING": self.__logger.debug("Kubernetes job " + self.uu_name + " is running") time.sleep(self.__POLL_TIME) status = self.__get_job_status() assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed" # status == "SUCCEEDED" self.__logger.info("Kubernetes job " + self.uu_name + " succeeded") self.signal_complete() def signal_complete(self): """Signal job completion for scheduler and dependent tasks. Touching a system file is an easy way to signal completion. example:: .. code-block:: python with self.output().open('w') as output_file: output_file.write('') """ pass def __get_pods(self): pod_objs = Pod.objects(self.__kube_api) \ .filter(selector="job-name=" + self.uu_name) \ .response['items'] return [Pod(self.__kube_api, p) for p in pod_objs] def __get_job(self): jobs = Job.objects(self.__kube_api) \ .filter(selector="luigi_task_id=" + self.job_uuid) \ .response['items'] assert len(jobs) == 1, "Kubernetes job " + self.uu_name + " not found" return Job(self.__kube_api, jobs[0]) def __print_pod_logs(self): for pod in self.__get_pods(): logs = pod.logs(timestamps=True).strip() self.__logger.info("Fetching logs from " + pod.name) if len(logs) > 0: for l in logs.split('\n'): self.__logger.info(l) def __print_kubectl_hints(self): self.__logger.info("To stream Pod logs, use:") for pod in self.__get_pods(): self.__logger.info("`kubectl logs -f pod/%s`" % pod.name) def __verify_job_has_started(self): """Asserts that the job has successfully started""" # Verify that the job started self.__get_job() # Verify that the pod started pods = self.__get_pods() assert len(pods) > 0, "No pod scheduled by " + self.uu_name for pod in pods: status = pod.obj['status'] for cont_stats in status.get('containerStatuses', []): if 'terminated' in cont_stats['state']: t = cont_stats['state']['terminated'] err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % ( pod.name, t['reason'], t['exitCode'], pod.name) assert t['exitCode'] == 0, err_msg if 'waiting' in cont_stats['state']: wr = cont_stats['state']['waiting']['reason'] assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % ( pod.name, wr, pod.name) for cond in status['conditions']: if 'message' in cond: if cond['reason'] == 'ContainersNotReady': return False assert cond['status'] != 'False', \ "[ERROR] %s - %s" % (cond['reason'], cond['message']) return True def __get_job_status(self): """Return the Kubernetes job status""" # Figure out status and return it job = self.__get_job() if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0: job.scale(replicas=0) if self.print_pod_logs_on_exit: self.__print_pod_logs() if self.delete_on_success: self.__delete_job_cascade(job) return "SUCCEEDED" if "failed" in job.obj["status"]: failed_cnt = job.obj["status"]["failed"] self.__logger.debug("Kubernetes job " + self.uu_name + " status.failed: " + str(failed_cnt)) if self.print_pod_logs_on_exit: self.__print_pod_logs() if failed_cnt > self.max_retrials: job.scale(replicas=0) # avoid more retrials return "FAILED" return "RUNNING" def __delete_job_cascade(self, job): delete_options_cascade = { "kind": "DeleteOptions", "apiVersion": "v1", "propagationPolicy": "Background" } r = self.__kube_api.delete(json=delete_options_cascade, **job.api_kwargs()) if r.status_code != 200: self.__kube_api.raise_for_status(r) def run(self): self._init_kubernetes() # Render job job_json = { "apiVersion": "batch/v1", "kind": "Job", "metadata": { "name": self.uu_name, "labels": { "spawned_by": "luigi", "luigi_task_id": self.job_uuid } }, "spec": { "activeDeadlineSeconds": self.active_deadline_seconds, "template": { "metadata": { "name": self.uu_name }, "spec": self.spec_schema } } } # Update user labels job_json['metadata']['labels'].update(self.labels) # Add default restartPolicy if not specified if "restartPolicy" not in self.spec_schema: job_json["spec"]["template"]["spec"]["restartPolicy"] = "Never" # Submit job self.__logger.info("Submitting Kubernetes Job: " + self.uu_name) job = Job(self.__kube_api, job_json) job.create() # Track the Job (wait while active) self.__logger.info("Start tracking Kubernetes Job: " + self.uu_name) self.__track_job() def output(self): """ An output target is necessary for checking job completion unless an alternative complete method is defined. Example:: return luigi.LocalTarget(os.path.join('/tmp', 'example')) """ pass
1
17,845
Is there any particular reason to have 6?
spotify-luigi
py
@@ -122,8 +122,11 @@ func (s *Space) biggestFreeRange(r address.Range) (biggest address.Range) { biggestSize := address.Count(0) s.walkFree(r, func(chunk address.Range) bool { if size := chunk.Size(); size >= biggestSize { - biggest = chunk - biggestSize = size + chunk = chunk.BiggestPow2AlignedRange() + if size = chunk.Size(); size >= biggestSize { + biggest = chunk + biggestSize = size + } } return false })
1
package space import ( "bytes" "fmt" "sort" "github.com/weaveworks/weave/common" "github.com/weaveworks/weave/net/address" ) type Space struct { // ours and free represent a set of addresses as a sorted // sequences of ranges. Even elements give the inclusive // starting points of ranges, and odd elements give the // exclusive ending points. Ranges in an array do not // overlap, and neighbouring ranges are always coalesced if // possible, so the arrays consist of sorted Addrs without // repetition. ours []address.Address free []address.Address } func New() *Space { return &Space{} } func (s *Space) Add(start address.Address, size address.Offset) { s.free = add(s.free, start, address.Add(start, size)) } // Clear removes all spaces from this space set. Used during node shutdown. func (s *Space) Clear() { s.ours = s.ours[:0] s.free = s.free[:0] } // Walk down the free list calling f() on the in-range portions, until // f() returns true or we run out of free space. Return true iff f() returned true func (s *Space) walkFree(r address.Range, f func(address.Range) bool) bool { if r.Start >= r.End { // degenerate case return false } for i := 0; i < len(s.free); i += 2 { chunk := address.Range{Start: s.free[i], End: s.free[i+1]} if chunk.End <= r.Start { // this chunk comes before the range continue } if chunk.Start >= r.End { // all remaining free space is completely after range break } // at this point we know chunk.End>chunk.Start && // chunk.End>r.Start && r.End>chunk.Start && r.End>r.Start // therefore max(start, r.Start) < min(end, r.End) // Restrict this block of free space to be in range if chunk.Start < r.Start { chunk.Start = r.Start } if chunk.End > r.End { chunk.End = r.End } // at this point we know start<end if f(chunk) { return true } } return false } func (s *Space) Allocate(r address.Range) (bool, address.Address) { var result address.Address return s.walkFree(r, func(chunk address.Range) bool { result = chunk.Start s.ours = add(s.ours, result, result+1) s.free = subtract(s.free, result, result+1) return true }), result } func (s *Space) Claim(addr address.Address) error { if !contains(s.free, addr) { return fmt.Errorf("Address %v is not free to claim", addr) } s.ours = add(s.ours, addr, addr+1) s.free = subtract(s.free, addr, addr+1) return nil } func (s *Space) NumFreeAddresses() address.Count { res := address.Count(0) for i := 0; i < len(s.free); i += 2 { res += address.Length(s.free[i+1], s.free[i]) } return res } func (s *Space) NumFreeAddressesInRange(r address.Range) address.Count { res := address.Count(0) s.walkFree(r, func(chunk address.Range) bool { res += chunk.Size() return false }) return res } func (s *Space) Free(addr address.Address) error { if !contains(s.ours, addr) { return fmt.Errorf("Address %v is not ours", addr) } if contains(s.free, addr) { return fmt.Errorf("Address %v is already free", addr) } s.ours = subtract(s.ours, addr, addr+1) s.free = add(s.free, addr, addr+1) return nil } func (s *Space) biggestFreeRange(r address.Range) (biggest address.Range) { biggestSize := address.Count(0) s.walkFree(r, func(chunk address.Range) bool { if size := chunk.Size(); size >= biggestSize { biggest = chunk biggestSize = size } return false }) return } func (s *Space) Donate(r address.Range) (address.Range, bool) { biggest := s.biggestFreeRange(r) if biggest.Size() == 0 { return address.Range{}, false } // Donate half of that biggest free range. Note size/2 rounds down, so // the resulting donation size rounds up, and in particular can't be empty. biggest.Start = address.Add(biggest.Start, address.Offset(biggest.Size()/2)) s.ours = subtract(s.ours, biggest.Start, biggest.End) s.free = subtract(s.free, biggest.Start, biggest.End) return biggest, true } func firstGreater(a []address.Address, x address.Address) int { return sort.Search(len(a), func(i int) bool { return a[i] > x }) } func firstGreaterOrEq(a []address.Address, x address.Address) int { return sort.Search(len(a), func(i int) bool { return a[i] >= x }) } // Do the ranges contain the given address? func contains(addrs []address.Address, addr address.Address) bool { return firstGreater(addrs, addr)&1 != 0 } // Take the union of the range [start, end) with the ranges in the array func add(addrs []address.Address, start address.Address, end address.Address) []address.Address { return addSub(addrs, start, end, 0) } // Subtract the range [start, end) from the ranges in the array func subtract(addrs []address.Address, start address.Address, end address.Address) []address.Address { return addSub(addrs, start, end, 1) } func addSub(addrs []address.Address, start address.Address, end address.Address, sense int) []address.Address { startPos := firstGreaterOrEq(addrs, start) endPos := firstGreater(addrs[startPos:], end) + startPos // Boundaries up to startPos are unaffected res := make([]address.Address, startPos, len(addrs)+2) copy(res, addrs) // Include start and end as new boundaries if they lie // outside/inside existing ranges (according to sense). if startPos&1 == sense { res = append(res, start) } if endPos&1 == sense { res = append(res, end) } // Boundaries after endPos are unaffected return append(res, addrs[endPos:]...) } func (s *Space) String() string { var buf bytes.Buffer if len(s.ours) > 0 { fmt.Fprint(&buf, "owned:") for i := 0; i < len(s.ours); i += 2 { fmt.Fprintf(&buf, " %s+%d ", s.ours[i], s.ours[i+1]-s.ours[i]) } } if len(s.free) > 0 { fmt.Fprintf(&buf, "free:") for i := 0; i < len(s.free); i += 2 { fmt.Fprintf(&buf, " %s+%d ", s.free[i], s.free[i+1]-s.free[i]) } } if len(s.ours) == 0 && len(s.free) == 0 { fmt.Fprintf(&buf, "No address ranges owned") } return buf.String() } type addressSlice []address.Address func (p addressSlice) Len() int { return len(p) } func (p addressSlice) Less(i, j int) bool { return p[i] < p[j] } func (p addressSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (s *Space) assertInvariants() { common.Assert(sort.IsSorted(addressSlice(s.ours))) common.Assert(sort.IsSorted(addressSlice(s.free))) } // Return a slice representing everything we own, whether it is free or not func (s *Space) everything() []address.Address { a := make([]address.Address, len(s.ours)) copy(a, s.ours) for i := 0; i < len(s.free); i += 2 { a = add(a, s.free[i], s.free[i+1]) } return a } // OwnedRanges returns slice of Ranges, ordered by IP, gluing together // contiguous sequences of owned and free addresses func (s *Space) OwnedRanges() []address.Range { everything := s.everything() result := make([]address.Range, len(everything)/2) for i := 0; i < len(everything); i += 2 { result[i/2] = address.Range{Start: everything[i], End: everything[i+1]} } return result } // Create a Space that has free space in all the supplied Ranges. func (s *Space) AddRanges(ranges []address.Range) { for _, r := range ranges { s.free = add(s.free, r.Start, r.End) } } // Taking ranges to be a set of all space we should own, add in any excess as free space func (s *Space) UpdateRanges(ranges []address.Range) { new := []address.Address{} for _, r := range ranges { new = add(new, r.Start, r.End) } current := s.everything() for i := 0; i < len(current); i += 2 { new = subtract(new, current[i], current[i+1]) } for i := 0; i < len(new); i += 2 { s.free = add(s.free, new[i], new[i+1]) } }
1
13,136
The biggest chunk does not guarantee that it contains the biggest CIDR-aligned range. If we don't care too much about a few CPU cycles being wasted, then I'd suggest to merge the if-statements.
weaveworks-weave
go
@@ -81,6 +81,16 @@ func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) dErr { } substitute(reflect.ValueOf(i.Workflow).Elem(), strings.NewReplacer(replacements...)) + // We do this here, and not in validate, as embedded startup scripts could + // have what we think are daisy variables. + if err := i.Workflow.validateVarsSubbed(); err != nil { + return err + } + + if err := i.Workflow.substituteSourceVars(reflect.ValueOf(i.Workflow).Elem()); err != nil { + return err + } + i.Workflow.populateLogger(ctx) for name, st := range i.Workflow.Steps {
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daisy import ( "context" "fmt" "path/filepath" "reflect" "strings" ) // IncludeWorkflow defines a Daisy workflow injection step. This step will // 'include' the workflow found the path given into the parent workflow. Unlike // a Subworkflow the included workflow will exist in the same namespace // as the parent and have access to all its resources. type IncludeWorkflow struct { Path string Vars map[string]string `json:",omitempty"` Workflow *Workflow } func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) dErr { if i.Path != "" { var err error if i.Workflow, err = s.w.NewIncludedWorkflowFromFile(i.Path); err != nil { return newErr(err) } } if i.Workflow == nil { return errf("IncludeWorkflow %q does not have a workflow", s.name) } i.Workflow.id = s.w.id i.Workflow.username = s.w.username i.Workflow.ComputeClient = s.w.ComputeClient i.Workflow.StorageClient = s.w.StorageClient i.Workflow.GCSPath = s.w.GCSPath i.Workflow.Name = s.name i.Workflow.Project = s.w.Project i.Workflow.Zone = s.w.Zone i.Workflow.autovars = s.w.autovars i.Workflow.bucket = s.w.bucket i.Workflow.scratchPath = s.w.scratchPath i.Workflow.sourcesPath = s.w.sourcesPath i.Workflow.logsPath = s.w.logsPath i.Workflow.outsPath = s.w.outsPath i.Workflow.gcsLogWriter = s.w.gcsLogWriter i.Workflow.gcsLogging = s.w.gcsLogging for k, v := range i.Vars { i.Workflow.AddVar(k, v) } var replacements []string for k, v := range i.Workflow.autovars { if k == "NAME" { v = s.name } if k == "WFDIR" { v = i.Workflow.workflowDir } replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } substitute(reflect.ValueOf(i.Workflow).Elem(), strings.NewReplacer(replacements...)) for k, v := range i.Workflow.Vars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value) } substitute(reflect.ValueOf(i.Workflow).Elem(), strings.NewReplacer(replacements...)) i.Workflow.populateLogger(ctx) for name, st := range i.Workflow.Steps { st.name = name st.w = i.Workflow if err := st.w.populateStep(ctx, st); err != nil { return err } } // Copy Sources up to parent resolving relative paths as we go. for k, v := range i.Workflow.Sources { if v == "" { continue } if _, ok := s.w.Sources[k]; ok { return errf("source %q already exists in workflow", k) } if s.w.Sources == nil { s.w.Sources = map[string]string{} } if _, _, err := splitGCSPath(v); err != nil && !filepath.IsAbs(v) { v = filepath.Join(i.Workflow.workflowDir, v) } s.w.Sources[k] = v } return nil } func (i *IncludeWorkflow) validate(ctx context.Context, s *Step) dErr { return i.Workflow.validate(ctx) } func (i *IncludeWorkflow) run(ctx context.Context, s *Step) dErr { return i.Workflow.run(ctx) }
1
7,103
instead of doing the if err := __; err != nil { return err } thing, you can do errs = addErrs(errs, ___). If you want.
GoogleCloudPlatform-compute-image-tools
go
@@ -4,8 +4,15 @@ package cli import ( + "errors" + "fmt" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/ssm" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/prompt" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/spf13/cobra" )
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/ssm" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/spf13/cobra" ) // InitProjectOpts contains the fields to collect for creating a project. type InitProjectOpts struct { ProjectName string manager archer.ProjectCreator ws archer.Workspace } // Execute creates a new managed empty project. func (opts *InitProjectOpts) Execute() error { if err := validateProjectName(opts.ProjectName); err != nil { return err } if err := opts.manager.CreateProject(&archer.Project{ Name: opts.ProjectName, }); err != nil { return err } return opts.ws.Create(opts.ProjectName) } // BuildProjectInitCommand builds the command for creating a new project. func BuildProjectInitCommand() *cobra.Command { opts := InitProjectOpts{} cmd := &cobra.Command{ Use: "init [name]", Short: "Creates a new, empty project", Example: ` Create a new project named test $ archer project init test`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ssmStore, err := ssm.NewStore() if err != nil { return err } ws, err := workspace.New() if err != nil { return err } opts.ws = ws opts.manager = ssmStore opts.ProjectName = args[0] return opts.Execute() }, } return cmd }
1
10,632
nit: Is this error message accurate?
aws-copilot-cli
go
@@ -4269,7 +4269,7 @@ import_one_object_direct (OstreeRepo *dest_repo, G_IN_SET (src_repo->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER); if (src_is_bare_or_bare_user && !OSTREE_OBJECT_TYPE_IS_META(objtype)) { - if (src_repo == OSTREE_REPO_MODE_BARE) + if (src_repo->mode == OSTREE_REPO_MODE_BARE) { g_autoptr(GVariant) xattrs = NULL; if (!glnx_fd_get_all_xattrs (src_fd, &xattrs,
1
/* * Copyright (C) 2011,2013 Colin Walters <[email protected]> * * SPDX-License-Identifier: LGPL-2.0+ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Author: Colin Walters <[email protected]> */ #include "config.h" #include <glib-unix.h> #include <sys/statvfs.h> #include <gio/gfiledescriptorbased.h> #include <gio/gunixinputstream.h> #include <gio/gunixoutputstream.h> #include <sys/xattr.h> #include <glib/gprintf.h> #include <sys/ioctl.h> #include <linux/fs.h> #include "otutil.h" #include "ostree.h" #include "ostree-core-private.h" #include "ostree-repo-private.h" #include "ostree-sepolicy-private.h" #include "ostree-repo-file-enumerator.h" #include "ostree-checksum-input-stream.h" #include "ostree-varint.h" /* The standardized version of BTRFS_IOC_CLONE */ #ifndef FICLONE #define FICLONE _IOW(0x94, 9, int) #endif /* If fsync is enabled and we're in a txn, we write into a staging dir for * commit, but we also allow direct writes into objects/ for e.g. hardlink * imports. */ static int commit_dest_dfd (OstreeRepo *self) { if (self->in_transaction && !self->disable_fsync) return self->commit_stagedir.fd; else return self->objects_dir_fd; } /* If we don't have O_TMPFILE, or for symlinks we'll create temporary * files. If we have a txn, use the staging dir to ensure that * things are consistently locked against concurrent cleanup, and * in general we have all of our data in one place. */ static int commit_tmp_dfd (OstreeRepo *self) { if (self->in_transaction) return self->commit_stagedir.fd; else return self->tmp_dir_fd; } /* The objects/ directory has a two-character directory prefix for checksums * to avoid putting lots of files in a single directory. This technique * is quite old, but Git also uses it for example. */ gboolean _ostree_repo_ensure_loose_objdir_at (int dfd, const char *loose_path, GCancellable *cancellable, GError **error) { char loose_prefix[3]; loose_prefix[0] = loose_path[0]; loose_prefix[1] = loose_path[1]; loose_prefix[2] = '\0'; if (mkdirat (dfd, loose_prefix, 0777) == -1) { if (G_UNLIKELY (errno != EEXIST)) { glnx_set_error_from_errno (error); return FALSE; } } return TRUE; } /* This GVariant is the header for content objects (regfiles and symlinks) */ static GVariant * create_file_metadata (guint32 uid, guint32 gid, guint32 mode, GVariant *xattrs) { GVariant *ret_metadata = NULL; g_autoptr(GVariant) tmp_xattrs = NULL; if (xattrs == NULL) tmp_xattrs = g_variant_ref_sink (g_variant_new_array (G_VARIANT_TYPE ("(ayay)"), NULL, 0)); ret_metadata = g_variant_new ("(uuu@a(ayay))", GUINT32_TO_BE (uid), GUINT32_TO_BE (gid), GUINT32_TO_BE (mode), xattrs ? xattrs : tmp_xattrs); g_variant_ref_sink (ret_metadata); return ret_metadata; } /* bare-user repositories store file metadata as a user xattr */ gboolean _ostree_write_bareuser_metadata (int fd, guint32 uid, guint32 gid, guint32 mode, GVariant *xattrs, GError **error) { g_autoptr(GVariant) filemeta = create_file_metadata (uid, gid, mode, xattrs); if (TEMP_FAILURE_RETRY (fsetxattr (fd, "user.ostreemeta", (char*)g_variant_get_data (filemeta), g_variant_get_size (filemeta), 0)) != 0) return glnx_throw_errno_prefix (error, "fsetxattr(user.ostreemeta)"); return TRUE; } /* See https://github.com/ostreedev/ostree/pull/698 */ #ifdef WITH_SMACK #define XATTR_NAME_SMACK "security.SMACK64" #endif static void ot_security_smack_reset_dfd_name (int dfd, const char *name) { #ifdef WITH_SMACK char buf[PATH_MAX]; /* See glnx-xattrs.c */ snprintf (buf, sizeof (buf), "/proc/self/fd/%d/%s", dfd, name); (void) lremovexattr (buf, XATTR_NAME_SMACK); #endif } static void ot_security_smack_reset_fd (int fd) { #ifdef WITH_SMACK (void) fremovexattr (fd, XATTR_NAME_SMACK); #endif } /* Given an O_TMPFILE regular file, link it into place. */ gboolean _ostree_repo_commit_tmpf_final (OstreeRepo *self, const char *checksum, OstreeObjectType objtype, GLnxTmpfile *tmpf, GCancellable *cancellable, GError **error) { char tmpbuf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (tmpbuf, checksum, objtype, self->mode); int dest_dfd = commit_dest_dfd (self); if (!_ostree_repo_ensure_loose_objdir_at (dest_dfd, tmpbuf, cancellable, error)) return FALSE; if (!glnx_link_tmpfile_at (tmpf, GLNX_LINK_TMPFILE_NOREPLACE_IGNORE_EXIST, dest_dfd, tmpbuf, error)) return FALSE; /* We're done with the fd */ glnx_tmpfile_clear (tmpf); return TRUE; } /* Given a dfd+path combination (may be regular file or symlink), * rename it into place. */ static gboolean commit_path_final (OstreeRepo *self, const char *checksum, OstreeObjectType objtype, OtCleanupUnlinkat *tmp_path, GCancellable *cancellable, GError **error) { /* The final renameat() */ char tmpbuf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (tmpbuf, checksum, objtype, self->mode); int dest_dfd = commit_dest_dfd (self); if (!_ostree_repo_ensure_loose_objdir_at (dest_dfd, tmpbuf, cancellable, error)) return FALSE; if (renameat (tmp_path->dfd, tmp_path->path, dest_dfd, tmpbuf) == -1) { if (errno != EEXIST) return glnx_throw_errno_prefix (error, "Storing file '%s'", tmp_path->path); /* Otherwise, the caller's cleanup will unlink+free */ } else { /* The tmp path was consumed */ ot_cleanup_unlinkat_clear (tmp_path); } return TRUE; } /* Given either a file or symlink, apply the final metadata to it depending on * the repository mode. Note that @checksum is assumed to have been validated by * the caller. */ static gboolean commit_loose_regfile_object (OstreeRepo *self, const char *checksum, GLnxTmpfile *tmpf, guint32 uid, guint32 gid, guint32 mode, GVariant *xattrs, GCancellable *cancellable, GError **error) { if (self->mode == OSTREE_REPO_MODE_BARE) { if (TEMP_FAILURE_RETRY (fchown (tmpf->fd, uid, gid)) < 0) return glnx_throw_errno_prefix (error, "fchown"); if (TEMP_FAILURE_RETRY (fchmod (tmpf->fd, mode)) < 0) return glnx_throw_errno_prefix (error, "fchmod"); if (xattrs) { ot_security_smack_reset_fd (tmpf->fd); if (!glnx_fd_set_all_xattrs (tmpf->fd, xattrs, cancellable, error)) return FALSE; } } else if (self->mode == OSTREE_REPO_MODE_BARE_USER) { if (!_ostree_write_bareuser_metadata (tmpf->fd, uid, gid, mode, xattrs, error)) return FALSE; /* Note that previously this path added `| 0755` which made every * file executable, see * https://github.com/ostreedev/ostree/issues/907 * We then changed it to mask by 0775, but we always need at least read * permission when running as non-root, so explicitly mask that in. * * Again here, symlinks in bare-user are a hairy special case; only do a * chmod for a *real* regular file, otherwise we'll take the default 0644. */ if (S_ISREG (mode)) { const mode_t content_mode = (mode & (S_IFREG | 0775)) | S_IRUSR; if (!glnx_fchmod (tmpf->fd, content_mode, error)) return FALSE; } else g_assert (S_ISLNK (mode)); } else if (self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) { if (!_ostree_validate_bareuseronly_mode (mode, checksum, error)) return FALSE; if (!glnx_fchmod (tmpf->fd, mode, error)) return FALSE; } if (_ostree_repo_mode_is_bare (self->mode)) { /* To satisfy tools such as guile which compare mtimes * to determine whether or not source files need to be compiled, * set the modification time to OSTREE_TIMESTAMP. */ const struct timespec times[2] = { { OSTREE_TIMESTAMP, UTIME_OMIT }, { OSTREE_TIMESTAMP, 0} }; if (TEMP_FAILURE_RETRY (futimens (tmpf->fd, times)) < 0) return glnx_throw_errno_prefix (error, "futimens"); } /* Ensure that in case of a power cut, these files have the data we * want. See http://lwn.net/Articles/322823/ */ if (!self->in_transaction && !self->disable_fsync) { if (fsync (tmpf->fd) == -1) return glnx_throw_errno_prefix (error, "fsync"); } if (!_ostree_repo_commit_tmpf_final (self, checksum, OSTREE_OBJECT_TYPE_FILE, tmpf, cancellable, error)) return FALSE; return TRUE; } /* This is used by OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES */ typedef struct { goffset unpacked; goffset archived; } OstreeContentSizeCacheEntry; static OstreeContentSizeCacheEntry * content_size_cache_entry_new (goffset unpacked, goffset archived) { OstreeContentSizeCacheEntry *entry = g_slice_new0 (OstreeContentSizeCacheEntry); entry->unpacked = unpacked; entry->archived = archived; return entry; } static void content_size_cache_entry_free (gpointer entry) { if (entry) g_slice_free (OstreeContentSizeCacheEntry, entry); } static void repo_store_size_entry (OstreeRepo *self, const gchar *checksum, goffset unpacked, goffset archived) { if (G_UNLIKELY (self->object_sizes == NULL)) self->object_sizes = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, content_size_cache_entry_free); g_hash_table_replace (self->object_sizes, g_strdup (checksum), content_size_cache_entry_new (unpacked, archived)); } static int compare_ascii_checksums_for_sorting (gconstpointer a_pp, gconstpointer b_pp) { char *a = *((char**)a_pp); char *b = *((char**)b_pp); return strcmp (a, b); } /* * Create sizes metadata GVariant and add it to the metadata variant given. */ static GVariant * add_size_index_to_metadata (OstreeRepo *self, GVariant *original_metadata) { g_autoptr(GVariantBuilder) builder = NULL; /* original_metadata may be NULL */ builder = ot_util_variant_builder_from_variant (original_metadata, G_VARIANT_TYPE ("a{sv}")); if (self->object_sizes && g_hash_table_size (self->object_sizes) > 0) { GVariantBuilder index_builder; g_variant_builder_init (&index_builder, G_VARIANT_TYPE ("a" _OSTREE_OBJECT_SIZES_ENTRY_SIGNATURE)); /* Sort the checksums so we can bsearch if desired */ g_autoptr(GPtrArray) sorted_keys = g_ptr_array_new (); GLNX_HASH_TABLE_FOREACH (self->object_sizes, const char*, e_checksum) g_ptr_array_add (sorted_keys, (gpointer)e_checksum); g_ptr_array_sort (sorted_keys, compare_ascii_checksums_for_sorting); for (guint i = 0; i < sorted_keys->len; i++) { guint8 csum[OSTREE_SHA256_DIGEST_LEN]; const char *e_checksum = sorted_keys->pdata[i]; g_autoptr(GString) buffer = g_string_new (NULL); ostree_checksum_inplace_to_bytes (e_checksum, csum); g_string_append_len (buffer, (char*)csum, sizeof (csum)); OstreeContentSizeCacheEntry *e_size = g_hash_table_lookup (self->object_sizes, e_checksum); _ostree_write_varuint64 (buffer, e_size->archived); _ostree_write_varuint64 (buffer, e_size->unpacked); g_variant_builder_add (&index_builder, "@ay", ot_gvariant_new_bytearray ((guint8*)buffer->str, buffer->len)); } g_variant_builder_add (builder, "{sv}", "ostree.sizes", g_variant_builder_end (&index_builder)); } return g_variant_ref_sink (g_variant_builder_end (builder)); } static gboolean throw_min_free_space_error (OstreeRepo *self, guint64 bytes_required, GError **error) { const char *err_msg = NULL; g_autofree char *err_msg_owned = NULL; if (bytes_required > 0) { g_autofree char *formatted_required = g_format_size (bytes_required); err_msg = err_msg_owned = g_strdup_printf ("would be exceeded, at least %s requested", formatted_required); } else err_msg = "would be exceeded"; if (self->min_free_space_mb > 0) return glnx_throw (error, "min-free-space-size %" G_GUINT64_FORMAT "MB %s", self->min_free_space_mb, err_msg); else return glnx_throw (error, "min-free-space-percent '%u%%' %s", self->min_free_space_percent, err_msg); } typedef struct { gboolean initialized; GLnxTmpfile tmpf; char *expected_checksum; OtChecksum checksum; guint64 content_len; guint64 bytes_written; guint uid; guint gid; guint mode; GVariant *xattrs; } OstreeRealRepoBareContent; G_STATIC_ASSERT (sizeof (OstreeRepoBareContent) >= sizeof (OstreeRealRepoBareContent)); /* Create a tmpfile for writing a bare file. Currently just used * by the static delta code, but will likely later be extended * to be used also by the dfd_iter commit path. */ gboolean _ostree_repo_bare_content_open (OstreeRepo *self, const char *expected_checksum, guint64 content_len, guint uid, guint gid, guint mode, GVariant *xattrs, OstreeRepoBareContent *out_regwrite, GCancellable *cancellable, GError **error) { OstreeRealRepoBareContent *real = (OstreeRealRepoBareContent*) out_regwrite; g_assert (!real->initialized); real->initialized = TRUE; g_assert (S_ISREG (mode)); if (!glnx_open_tmpfile_linkable_at (commit_tmp_dfd (self), ".", O_WRONLY|O_CLOEXEC, &real->tmpf, error)) return FALSE; ot_checksum_init (&real->checksum); real->expected_checksum = g_strdup (expected_checksum); real->content_len = content_len; real->bytes_written = 0; real->uid = uid; real->gid = gid; real->mode = mode; real->xattrs = xattrs ? g_variant_ref (xattrs) : NULL; /* Initialize the checksum with the header info */ g_autoptr(GFileInfo) finfo = _ostree_mode_uidgid_to_gfileinfo (mode, uid, gid); g_autoptr(GBytes) header = _ostree_file_header_new (finfo, xattrs); ot_checksum_update_bytes (&real->checksum, header); return TRUE; } gboolean _ostree_repo_bare_content_write (OstreeRepo *repo, OstreeRepoBareContent *barewrite, const guint8 *buf, size_t len, GCancellable *cancellable, GError **error) { OstreeRealRepoBareContent *real = (OstreeRealRepoBareContent*) barewrite; g_assert (real->initialized); ot_checksum_update (&real->checksum, buf, len); if (glnx_loop_write (real->tmpf.fd, buf, len) < 0) return glnx_throw_errno_prefix (error, "write"); return TRUE; } gboolean _ostree_repo_bare_content_commit (OstreeRepo *self, OstreeRepoBareContent *barewrite, char *checksum_buf, size_t buflen, GCancellable *cancellable, GError **error) { OstreeRealRepoBareContent *real = (OstreeRealRepoBareContent*) barewrite; g_assert (real->initialized); if ((self->min_free_space_percent > 0 || self->min_free_space_mb > 0) && self->in_transaction) { struct stat st_buf; if (!glnx_fstat (real->tmpf.fd, &st_buf, error)) return FALSE; g_mutex_lock (&self->txn_lock); g_assert_cmpint (self->txn.blocksize, >, 0); const fsblkcnt_t object_blocks = (st_buf.st_size / self->txn.blocksize) + 1; if (object_blocks > self->txn.max_blocks) { self->cleanup_stagedir = TRUE; g_mutex_unlock (&self->txn_lock); return throw_min_free_space_error (self, st_buf.st_size, error); } /* This is the main bit that needs mutex protection */ self->txn.max_blocks -= object_blocks; g_mutex_unlock (&self->txn_lock); } ot_checksum_get_hexdigest (&real->checksum, checksum_buf, buflen); if (real->expected_checksum && !_ostree_compare_object_checksum (OSTREE_OBJECT_TYPE_FILE, real->expected_checksum, checksum_buf, error)) return FALSE; if (!commit_loose_regfile_object (self, checksum_buf, &real->tmpf, real->uid, real->gid, real->mode, real->xattrs, cancellable, error)) return FALSE; /* Let's have a guarantee that after commit the object is cleaned up */ _ostree_repo_bare_content_cleanup (barewrite); return TRUE; } void _ostree_repo_bare_content_cleanup (OstreeRepoBareContent *regwrite) { OstreeRealRepoBareContent *real = (OstreeRealRepoBareContent*) regwrite; if (!real->initialized) return; glnx_tmpfile_clear (&real->tmpf); ot_checksum_clear (&real->checksum); g_clear_pointer (&real->expected_checksum, (GDestroyNotify)g_free); g_clear_pointer (&real->xattrs, (GDestroyNotify)g_variant_unref); real->initialized = FALSE; } /* Allocate an O_TMPFILE, write everything from @input to it, but * not exceeding @length. Used for every object in archive repos, * and content objects in all bare-type repos. */ static gboolean create_regular_tmpfile_linkable_with_content (OstreeRepo *self, guint64 length, GInputStream *input, GLnxTmpfile *out_tmpf, GCancellable *cancellable, GError **error) { g_auto(GLnxTmpfile) tmpf = { 0, }; if (!glnx_open_tmpfile_linkable_at (commit_tmp_dfd (self), ".", O_WRONLY|O_CLOEXEC, &tmpf, error)) return FALSE; if (!glnx_try_fallocate (tmpf.fd, 0, length, error)) return FALSE; if (G_IS_FILE_DESCRIPTOR_BASED (input)) { int infd = g_file_descriptor_based_get_fd ((GFileDescriptorBased*) input); if (glnx_regfile_copy_bytes (infd, tmpf.fd, (off_t)length) < 0) return glnx_throw_errno_prefix (error, "regfile copy"); } else { /* We used to do a g_output_stream_splice(), but there are two issues with that: * - We want to honor the size provided, to avoid malicious content that says it's * e.g. 10 bytes but is actually gigabytes. * - Due to GLib bugs that pointlessly calls `poll()` on the output fd for every write */ char buf[8192]; guint64 remaining = length; while (remaining > 0) { const gssize bytes_read = g_input_stream_read (input, buf, MIN (remaining, sizeof (buf)), cancellable, error); if (bytes_read < 0) return FALSE; else if (bytes_read == 0) return glnx_throw (error, "Unexpected EOF with %" G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT " bytes remaining", remaining, length); if (glnx_loop_write (tmpf.fd, buf, bytes_read) < 0) return glnx_throw_errno_prefix (error, "write"); remaining -= bytes_read; } } if (!glnx_fchmod (tmpf.fd, 0644, error)) return FALSE; *out_tmpf = tmpf; tmpf.initialized = FALSE; return TRUE; } static gboolean _check_support_reflink (OstreeRepo *dest, gboolean *supported, GError **error) { /* We have not checked yet if the destination file system supports reflinks, do it here */ if (g_atomic_int_get (&dest->fs_support_reflink) == 0) { glnx_autofd int src_fd = -1; g_auto(GLnxTmpfile) dest_tmpf = { 0, }; if (!glnx_openat_rdonly (dest->repo_dir_fd, "config", TRUE, &src_fd, error)) return FALSE; if (!glnx_open_tmpfile_linkable_at (commit_tmp_dfd (dest), ".", O_WRONLY|O_CLOEXEC, &dest_tmpf, error)) return FALSE; if (ioctl (dest_tmpf.fd, FICLONE, src_fd) == 0) g_atomic_int_set (&dest->fs_support_reflink, 1); else if (errno == EOPNOTSUPP) /* Ignore other kind of errors as they might be temporary failures */ g_atomic_int_set (&dest->fs_support_reflink, -1); } *supported = g_atomic_int_get (&dest->fs_support_reflink) >= 0; return TRUE; } static gboolean _create_payload_link (OstreeRepo *self, const char *checksum, const char *payload_checksum, GFileInfo *file_info, GCancellable *cancellable, GError **error) { gboolean reflinks_supported = FALSE; if (!_check_support_reflink (self, &reflinks_supported, error)) return FALSE; if (!reflinks_supported) return TRUE; if (g_file_info_get_file_type (file_info) != G_FILE_TYPE_REGULAR || !G_IN_SET(self->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER, OSTREE_REPO_MODE_BARE_USER_ONLY)) return TRUE; if (payload_checksum == NULL || g_file_info_get_size (file_info) < self->payload_link_threshold) return TRUE; char target_buf[_OSTREE_LOOSE_PATH_MAX + _OSTREE_PAYLOAD_LINK_PREFIX_LEN]; strcpy (target_buf, _OSTREE_PAYLOAD_LINK_PREFIX); _ostree_loose_path (target_buf + _OSTREE_PAYLOAD_LINK_PREFIX_LEN, checksum, OSTREE_OBJECT_TYPE_FILE, self->mode); if (symlinkat (target_buf, commit_tmp_dfd (self), payload_checksum) < 0) { if (errno != EEXIST) return glnx_throw_errno_prefix (error, "symlinkat"); } else { g_auto(OtCleanupUnlinkat) tmp_unlinker = { commit_tmp_dfd (self), g_strdup (payload_checksum) }; if (!commit_path_final (self, payload_checksum, OSTREE_OBJECT_TYPE_PAYLOAD_LINK, &tmp_unlinker, cancellable, error)) return FALSE; } return TRUE; } static gboolean _import_payload_link (OstreeRepo *dest_repo, OstreeRepo *src_repo, const char *checksum, GCancellable *cancellable, GError **error) { gboolean reflinks_supported = FALSE; g_autofree char *payload_checksum = NULL; g_autoptr(GInputStream) is = NULL; glnx_unref_object OtChecksumInstream *checksum_payload = NULL; g_autoptr(GFileInfo) file_info = NULL; /* The two repositories are on different devices */ if (src_repo->device != dest_repo->device) return TRUE; if (!_check_support_reflink (dest_repo, &reflinks_supported, error)) return FALSE; if (!reflinks_supported) return TRUE; if (!G_IN_SET(dest_repo->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER, OSTREE_REPO_MODE_BARE_USER_ONLY)) return TRUE; if (!ostree_repo_load_file (src_repo, checksum, &is, &file_info, NULL, cancellable, error)) return FALSE; if (g_file_info_get_file_type (file_info) != G_FILE_TYPE_REGULAR || g_file_info_get_size (file_info) < dest_repo->payload_link_threshold) return TRUE; checksum_payload = ot_checksum_instream_new (is, G_CHECKSUM_SHA256); guint64 remaining = g_file_info_get_size (file_info); while (remaining) { char buf[8192]; gssize ret = g_input_stream_read ((GInputStream *) checksum_payload, buf, MIN (sizeof (buf), remaining), cancellable, error); if (ret < 0) return FALSE; remaining -= ret; } payload_checksum = ot_checksum_instream_get_string (checksum_payload); return _create_payload_link (dest_repo, checksum, payload_checksum, file_info, cancellable, error); } static gboolean _try_clone_from_payload_link (OstreeRepo *self, OstreeRepo *dest_repo, const char *payload_checksum, GFileInfo *file_info, GLnxTmpfile *tmpf, GCancellable *cancellable, GError **error) { gboolean reflinks_supported = FALSE; int dfd_searches[] = { -1, self->objects_dir_fd }; if (self->commit_stagedir.initialized) dfd_searches[0] = self->commit_stagedir.fd; /* The two repositories are on different devices */ if (self->device != dest_repo->device) return TRUE; if (!_check_support_reflink (dest_repo, &reflinks_supported, error)) return FALSE; if (!reflinks_supported) return TRUE; for (guint i = 0; i < G_N_ELEMENTS (dfd_searches); i++) { glnx_autofd int fdf = -1; char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; char loose_path_target_buf[_OSTREE_LOOSE_PATH_MAX]; char target_buf[_OSTREE_LOOSE_PATH_MAX + _OSTREE_PAYLOAD_LINK_PREFIX_LEN]; char target_checksum[OSTREE_SHA256_STRING_LEN+1]; int dfd = dfd_searches[i]; ssize_t size; if (dfd == -1) continue; _ostree_loose_path (loose_path_buf, payload_checksum, OSTREE_OBJECT_TYPE_PAYLOAD_LINK, self->mode); size = TEMP_FAILURE_RETRY (readlinkat (dfd, loose_path_buf, target_buf, sizeof (target_buf))); if (size < 0) { if (errno == ENOENT) continue; return glnx_throw_errno_prefix (error, "readlinkat"); } if (size < OSTREE_SHA256_STRING_LEN + _OSTREE_PAYLOAD_LINK_PREFIX_LEN) return glnx_throw (error, "invalid data size for %s", loose_path_buf); sprintf (target_checksum, "%.2s%.62s", target_buf + _OSTREE_PAYLOAD_LINK_PREFIX_LEN, target_buf + _OSTREE_PAYLOAD_LINK_PREFIX_LEN + 3); _ostree_loose_path (loose_path_target_buf, target_checksum, OSTREE_OBJECT_TYPE_FILE, self->mode); if (!ot_openat_ignore_enoent (dfd, loose_path_target_buf, &fdf, error)) return FALSE; if (fdf < 0) { /* If the link is referring to an object that doesn't exist anymore in the repository, just unlink it. */ if (!glnx_unlinkat (dfd, loose_path_buf, 0, error)) return FALSE; } else { /* This undoes all of the previous writes; we want to generate reflinked data. */ if (ftruncate (tmpf->fd, 0) < 0) return glnx_throw_errno_prefix (error, "ftruncate"); if (glnx_regfile_copy_bytes (fdf, tmpf->fd, -1) < 0) return glnx_throw_errno_prefix (error, "regfile copy"); return TRUE; } } if (self->parent_repo) return _try_clone_from_payload_link (self->parent_repo, dest_repo, payload_checksum, file_info, tmpf, cancellable, error); return TRUE; } /* The main driver for writing a content (regfile or symlink) object. * There are a variety of tricky cases here; for example, bare-user * repos store symlinks as regular files. Computing checksums * is optional; if @out_csum is `NULL`, we assume the caller already * knows the checksum. */ static gboolean write_content_object (OstreeRepo *self, const char *expected_checksum, GInputStream *input, GFileInfo *file_info, GVariant *xattrs, guchar **out_csum, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("Writing content object", error); g_return_val_if_fail (expected_checksum || out_csum, FALSE); if (g_cancellable_set_error_if_cancelled (cancellable, error)) return FALSE; OstreeRepoMode repo_mode = ostree_repo_get_mode (self); GInputStream *file_input; /* Unowned alias */ g_autoptr(GInputStream) file_input_owned = NULL; /* We need a temporary for bare-user symlinks */ glnx_unref_object OtChecksumInstream *checksum_input = NULL; glnx_unref_object OtChecksumInstream *checksum_payload_input = NULL; const GFileType object_file_type = g_file_info_get_file_type (file_info); if (out_csum) { /* Previously we checksummed the input verbatim; now * ostree_repo_write_content() parses without checksumming, then we * re-synthesize a header here. The data should be identical; if somehow * it's not that's not a serious problem because we're still computing a * checksum over the data we actually use. */ gboolean reflinks_supported = FALSE; g_autoptr(GBytes) header = _ostree_file_header_new (file_info, xattrs); size_t len; const guint8 *buf = g_bytes_get_data (header, &len); /* Give a null input if there's no content */ g_autoptr(GInputStream) null_input = NULL; if (!input) null_input = input = g_memory_input_stream_new_from_data ("", 0, NULL); checksum_input = ot_checksum_instream_new_with_start (input, G_CHECKSUM_SHA256, buf, len); if (!_check_support_reflink (self, &reflinks_supported, error)) return FALSE; if (xattrs == NULL || !G_IN_SET(self->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER, OSTREE_REPO_MODE_BARE_USER_ONLY) || object_file_type != G_FILE_TYPE_REGULAR || !reflinks_supported) file_input = (GInputStream*)checksum_input; else { /* The payload checksum-input reads from the full object checksum-input; this * means it skips the header. */ checksum_payload_input = ot_checksum_instream_new ((GInputStream*)checksum_input, G_CHECKSUM_SHA256); file_input = (GInputStream*)checksum_payload_input; } } else file_input = input; gboolean phys_object_is_symlink = FALSE; switch (object_file_type) { case G_FILE_TYPE_REGULAR: break; case G_FILE_TYPE_SYMBOLIC_LINK: if (self->mode == OSTREE_REPO_MODE_BARE || self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) phys_object_is_symlink = TRUE; break; default: return glnx_throw (error, "Unsupported file type %u", object_file_type); } guint64 size; /* For bare-user, convert the symlink target to the input stream */ if (repo_mode == OSTREE_REPO_MODE_BARE_USER && object_file_type == G_FILE_TYPE_SYMBOLIC_LINK) { const char *target_str = g_file_info_get_symlink_target (file_info); g_autoptr(GBytes) target = g_bytes_new (target_str, strlen (target_str) + 1); /* Include the terminating zero so we can e.g. mmap this file */ file_input = file_input_owned = g_memory_input_stream_new_from_bytes (target); size = g_bytes_get_size (target); } else if (!phys_object_is_symlink) size = g_file_info_get_size (file_info); else size = 0; /* Free space check; only applies during transactions */ if ((self->min_free_space_percent > 0 || self->min_free_space_mb > 0) && self->in_transaction) { g_mutex_lock (&self->txn_lock); g_assert_cmpint (self->txn.blocksize, >, 0); const fsblkcnt_t object_blocks = (size / self->txn.blocksize) + 1; if (object_blocks > self->txn.max_blocks) { guint64 bytes_required = (guint64)object_blocks * self->txn.blocksize; self->cleanup_stagedir = TRUE; g_mutex_unlock (&self->txn_lock); return throw_min_free_space_error (self, bytes_required, error); } /* This is the main bit that needs mutex protection */ self->txn.max_blocks -= object_blocks; g_mutex_unlock (&self->txn_lock); } /* For regular files, we create them with default mode, and only * later apply any xattrs and setuid bits. The rationale here * is that an attacker on the network with the ability to MITM * could potentially cause the system to make a temporary setuid * binary with trailing garbage, creating a window on the local * system where a malicious setuid binary exists. * * We use GLnxTmpfile for regular files, and OtCleanupUnlinkat for symlinks. */ g_auto(OtCleanupUnlinkat) tmp_unlinker = { commit_tmp_dfd (self), NULL }; g_auto(GLnxTmpfile) tmpf = { 0, }; goffset unpacked_size = 0; gboolean indexable = FALSE; /* Is it a symlink physically? */ if (phys_object_is_symlink) { /* This will not be hit for bare-user or archive */ g_assert (self->mode == OSTREE_REPO_MODE_BARE || self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY); if (!_ostree_make_temporary_symlink_at (commit_tmp_dfd (self), g_file_info_get_symlink_target (file_info), &tmp_unlinker.path, cancellable, error)) return FALSE; } else if (repo_mode != OSTREE_REPO_MODE_ARCHIVE) { if (!create_regular_tmpfile_linkable_with_content (self, size, file_input, &tmpf, cancellable, error)) return FALSE; } else { g_autoptr(GConverter) zlib_compressor = NULL; g_autoptr(GOutputStream) compressed_out_stream = NULL; g_autoptr(GOutputStream) temp_out = NULL; g_assert (repo_mode == OSTREE_REPO_MODE_ARCHIVE); if (self->generate_sizes) indexable = TRUE; if (!glnx_open_tmpfile_linkable_at (commit_tmp_dfd (self), ".", O_WRONLY|O_CLOEXEC, &tmpf, error)) return FALSE; temp_out = g_unix_output_stream_new (tmpf.fd, FALSE); g_autoptr(GBytes) file_meta_header = _ostree_zlib_file_header_new (file_info, xattrs); gsize file_meta_len; const guint8* file_meta_buf = g_bytes_get_data (file_meta_header, &file_meta_len); { gsize bytes_written; if (!g_output_stream_write_all (temp_out, file_meta_buf, file_meta_len, &bytes_written, cancellable, error)) return FALSE; } if (g_file_info_get_file_type (file_info) == G_FILE_TYPE_REGULAR) { zlib_compressor = (GConverter*)g_zlib_compressor_new (G_ZLIB_COMPRESSOR_FORMAT_RAW, self->zlib_compression_level); compressed_out_stream = g_converter_output_stream_new (temp_out, zlib_compressor); /* Don't close the base; we'll do that later */ g_filter_output_stream_set_close_base_stream ((GFilterOutputStream*)compressed_out_stream, FALSE); if (g_output_stream_splice (compressed_out_stream, file_input, 0, cancellable, error) < 0) return FALSE; unpacked_size = g_file_info_get_size (file_info); } if (!g_output_stream_flush (temp_out, cancellable, error)) return FALSE; if (!glnx_fchmod (tmpf.fd, 0644, error)) return FALSE; } const char *actual_checksum = NULL; g_autofree char *actual_payload_checksum = NULL; g_autofree char *actual_checksum_owned = NULL; if (!checksum_input) actual_checksum = expected_checksum; else { actual_checksum = actual_checksum_owned = ot_checksum_instream_get_string (checksum_input); if (expected_checksum) { if (!_ostree_compare_object_checksum (OSTREE_OBJECT_TYPE_FILE, expected_checksum, actual_checksum, error)) return FALSE; } if (checksum_payload_input) actual_payload_checksum = ot_checksum_instream_get_string (checksum_payload_input); } g_assert (actual_checksum != NULL); /* Pacify static analysis */ /* See whether or not we have the object, now that we know the * checksum. */ gboolean have_obj; if (!_ostree_repo_has_loose_object (self, actual_checksum, OSTREE_OBJECT_TYPE_FILE, &have_obj, cancellable, error)) return FALSE; /* If we already have it, just update the stats. */ if (have_obj) { g_mutex_lock (&self->txn_lock); self->txn.stats.content_objects_total++; g_mutex_unlock (&self->txn_lock); if (!_create_payload_link (self, actual_checksum, actual_payload_checksum, file_info, cancellable, error)) return FALSE; if (out_csum) *out_csum = ostree_checksum_to_bytes (actual_checksum); /* Note early return */ return TRUE; } const guint32 uid = g_file_info_get_attribute_uint32 (file_info, "unix::uid"); const guint32 gid = g_file_info_get_attribute_uint32 (file_info, "unix::gid"); const guint32 mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); /* Is it "physically" a symlink? */ if (phys_object_is_symlink) { if (self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) { /* We don't store the metadata in bare-user-only, so we're done. */ } else if (self->mode == OSTREE_REPO_MODE_BARE) { /* Now that we know the checksum is valid, apply uid/gid, mode bits, * and extended attributes. * * Note, this does not apply for bare-user repos, as they store symlinks * as regular files. */ if (G_UNLIKELY (fchownat (tmp_unlinker.dfd, tmp_unlinker.path, uid, gid, AT_SYMLINK_NOFOLLOW) == -1)) return glnx_throw_errno_prefix (error, "fchownat"); if (xattrs != NULL) { ot_security_smack_reset_dfd_name (tmp_unlinker.dfd, tmp_unlinker.path); if (!glnx_dfd_name_set_all_xattrs (tmp_unlinker.dfd, tmp_unlinker.path, xattrs, cancellable, error)) return FALSE; } } else { /* We don't do symlinks in archive or bare-user */ g_assert_not_reached (); } if (!commit_path_final (self, actual_checksum, OSTREE_OBJECT_TYPE_FILE, &tmp_unlinker, cancellable, error)) return FALSE; } else { /* Update size metadata if configured */ if (indexable && object_file_type == G_FILE_TYPE_REGULAR) { struct stat stbuf; if (!glnx_fstat (tmpf.fd, &stbuf, error)) return FALSE; repo_store_size_entry (self, actual_checksum, unpacked_size, stbuf.st_size); } /* Check if a file with the same payload is present in the repository, and in case try to reflink it */ if (actual_payload_checksum && !_try_clone_from_payload_link (self, self, actual_payload_checksum, file_info, &tmpf, cancellable, error)) return FALSE; /* This path is for regular files */ if (!commit_loose_regfile_object (self, actual_checksum, &tmpf, uid, gid, mode, xattrs, cancellable, error)) return FALSE; if (!_create_payload_link (self, actual_checksum, actual_payload_checksum, file_info, cancellable, error)) return FALSE; } /* Update statistics */ g_mutex_lock (&self->txn_lock); self->txn.stats.content_objects_written++; self->txn.stats.content_bytes_written += g_file_info_get_size (file_info); self->txn.stats.content_objects_total++; g_mutex_unlock (&self->txn_lock); if (out_csum) { g_assert (actual_checksum); *out_csum = ostree_checksum_to_bytes (actual_checksum); } return TRUE; } /* A fast path for local commits to `bare` or `bare-user-only` * repos - we basically checksum the file and do a renameat() * into place. * * This could be enhanced down the line to handle cases where we have a modified * stat struct in place; e.g. for `bare` we could do the `chown`, or chmod etc., * and reset the xattrs. * * We could also do this for bare-user, would just involve adding the xattr (and * potentially deleting other ones...not sure if we'd really want e.g. the * security.selinux xattr on setuid binaries and the like to live on). */ static gboolean adopt_and_commit_regfile (OstreeRepo *self, int dfd, const char *name, GFileInfo *finfo, GVariant *xattrs, char *out_checksum_buf, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("Commit regfile (adopt)", error); g_assert (G_IN_SET (self->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER_ONLY)); g_autoptr(GBytes) header = _ostree_file_header_new (finfo, xattrs); g_auto(OtChecksum) hasher = { 0, }; ot_checksum_init (&hasher); ot_checksum_update_bytes (&hasher, header); glnx_autofd int fd = -1; if (!glnx_openat_rdonly (dfd, name, FALSE, &fd, error)) return FALSE; (void)posix_fadvise (fd, 0, 0, POSIX_FADV_SEQUENTIAL); /* See also https://gist.github.com/cgwalters/0df0d15199009664549618c2188581f0 * and https://github.com/coreutils/coreutils/blob/master/src/ioblksize.h * Turns out bigger block size is better; down the line we should use their * same heuristics. */ char buf[16*1024]; while (TRUE) { ssize_t bytes_read = read (fd, buf, sizeof (buf)); if (bytes_read < 0) return glnx_throw_errno_prefix (error, "read"); if (bytes_read == 0) break; ot_checksum_update (&hasher, (guint8*)buf, bytes_read); } ot_checksum_get_hexdigest (&hasher, out_checksum_buf, OSTREE_SHA256_STRING_LEN+1); const char *checksum = out_checksum_buf; /* TODO: dedup this with commit_path_final() */ char loose_path[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path, checksum, OSTREE_OBJECT_TYPE_FILE, self->mode); const guint32 src_dev = g_file_info_get_attribute_uint32 (finfo, "unix::device"); const guint64 src_inode = g_file_info_get_attribute_uint64 (finfo, "unix::inode"); int dest_dfd = commit_dest_dfd (self); if (!_ostree_repo_ensure_loose_objdir_at (dest_dfd, loose_path, cancellable, error)) return FALSE; struct stat dest_stbuf; if (!glnx_fstatat_allow_noent (dest_dfd, loose_path, &dest_stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; /* Is the source actually the same device/inode? This can happen with hardlink * checkouts, which is a bit overly conservative for bare-user-only right now. * If so, we can't use renameat() since from `man 2 renameat`: * * "If oldpath and newpath are existing hard links referring to the same file, * then rename() does nothing, and returns a success status." */ if (errno != ENOENT && src_dev == dest_stbuf.st_dev && src_inode == dest_stbuf.st_ino) { if (!glnx_unlinkat (dfd, name, 0, error)) return FALSE; /* Early return */ return TRUE; } /* For bare-user-only we need to canonicalize perms */ if (self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) { const guint32 src_mode = g_file_info_get_attribute_uint32 (finfo, "unix::mode"); if (fchmod (fd, src_mode & 0755) < 0) return glnx_throw_errno_prefix (error, "fchmod"); } if (renameat (dfd, name, dest_dfd, loose_path) == -1) { if (errno != EEXIST) return glnx_throw_errno_prefix (error, "Storing file '%s'", name); /* We took ownership here, so delete it */ if (!glnx_unlinkat (dfd, name, 0, error)) return FALSE; } return TRUE; } /* Main driver for writing a metadata (non-content) object. */ static gboolean write_metadata_object (OstreeRepo *self, OstreeObjectType objtype, const char *expected_checksum, GBytes *buf, guchar **out_csum, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("Writing metadata object", error); g_return_val_if_fail (expected_checksum || out_csum, FALSE); if (g_cancellable_set_error_if_cancelled (cancellable, error)) return FALSE; /* In the metadata case, we're not streaming, so we don't bother creating a * tempfile until we compute the checksum. Some metadata like dirmeta is * commonly duplicated, and computing the checksum is going to be cheaper than * making a tempfile. * * However, tombstone commit types don't make sense to checksum, because for * historical reasons we used ostree_repo_write_metadata_trusted() with the * *original* sha256 to say what commit was being killed. */ const gboolean is_tombstone = (objtype == OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT); char actual_checksum[OSTREE_SHA256_STRING_LEN+1]; if (is_tombstone) { g_assert (expected_checksum != NULL); memcpy (actual_checksum, expected_checksum, sizeof (actual_checksum)); } else { g_auto(OtChecksum) checksum = { 0, }; ot_checksum_init (&checksum); gsize len; const guint8*bufdata = g_bytes_get_data (buf, &len); ot_checksum_update (&checksum, bufdata, len); ot_checksum_get_hexdigest (&checksum, actual_checksum, sizeof (actual_checksum)); gboolean have_obj; if (!_ostree_repo_has_loose_object (self, actual_checksum, objtype, &have_obj, cancellable, error)) return FALSE; /* If we already have the object, we just need to update the tried-to-commit * stat for metadata and be done here. */ if (have_obj) { g_mutex_lock (&self->txn_lock); self->txn.stats.metadata_objects_total++; g_mutex_unlock (&self->txn_lock); if (out_csum) *out_csum = ostree_checksum_to_bytes (actual_checksum); /* Note early return */ return TRUE; } if (expected_checksum) { if (!_ostree_compare_object_checksum (objtype, expected_checksum, actual_checksum, error)) return FALSE; } } /* Ok, checksum is known, let's get the data */ gsize len; const guint8 *bufp = g_bytes_get_data (buf, &len); /* Write the metadata to a temporary file */ g_auto(GLnxTmpfile) tmpf = { 0, }; if (!glnx_open_tmpfile_linkable_at (commit_tmp_dfd (self), ".", O_WRONLY|O_CLOEXEC, &tmpf, error)) return FALSE; if (!glnx_try_fallocate (tmpf.fd, 0, len, error)) return FALSE; if (glnx_loop_write (tmpf.fd, bufp, len) < 0) return glnx_throw_errno_prefix (error, "write()"); if (!glnx_fchmod (tmpf.fd, 0644, error)) return FALSE; /* And commit it into place */ if (!_ostree_repo_commit_tmpf_final (self, actual_checksum, objtype, &tmpf, cancellable, error)) return FALSE; if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { GError *local_error = NULL; /* If we are writing a commit, be sure there is no tombstone for it. We may have deleted the commit and now we are trying to pull it again. */ if (!ostree_repo_delete_object (self, OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT, actual_checksum, cancellable, &local_error)) { if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) g_clear_error (&local_error); else { g_propagate_error (error, local_error); return FALSE; } } } /* Update the stats, note we both wrote one and add to total */ g_mutex_lock (&self->txn_lock); self->txn.stats.metadata_objects_written++; self->txn.stats.metadata_objects_total++; g_mutex_unlock (&self->txn_lock); if (out_csum) *out_csum = ostree_checksum_to_bytes (actual_checksum); return TRUE; } /* Look in a single subdirectory of objects/, building up the * (device,inode) → checksum map. */ static gboolean scan_one_loose_devino (OstreeRepo *self, int object_dir_fd, GHashTable *devino_cache, GCancellable *cancellable, GError **error) { g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (object_dir_fd, ".", FALSE, &dfd_iter, error)) return FALSE; while (TRUE) { struct dirent *dent; g_auto(GLnxDirFdIterator) child_dfd_iter = { 0, }; if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; /* All object directories only have two character entries */ if (strlen (dent->d_name) != 2) continue; if (!glnx_dirfd_iterator_init_at (dfd_iter.fd, dent->d_name, FALSE, &child_dfd_iter, error)) return FALSE; while (TRUE) { struct dirent *child_dent; if (!glnx_dirfd_iterator_next_dent (&child_dfd_iter, &child_dent, cancellable, error)) return FALSE; if (child_dent == NULL) break; const char *name = child_dent->d_name; gboolean skip; switch (self->mode) { case OSTREE_REPO_MODE_ARCHIVE: case OSTREE_REPO_MODE_BARE: case OSTREE_REPO_MODE_BARE_USER: case OSTREE_REPO_MODE_BARE_USER_ONLY: skip = !g_str_has_suffix (name, ".file"); break; default: g_assert_not_reached (); } if (skip) continue; const char *dot = strrchr (name, '.'); g_assert (dot); /* Skip anything that doesn't look like a 64 character checksum */ if ((dot - name) != 62) continue; struct stat stbuf; if (!glnx_fstatat (child_dfd_iter.fd, child_dent->d_name, &stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; OstreeDevIno *key = g_new (OstreeDevIno, 1); key->dev = stbuf.st_dev; key->ino = stbuf.st_ino; memcpy (key->checksum, dent->d_name, 2); memcpy (key->checksum + 2, name, 62); key->checksum[sizeof(key->checksum)-1] = '\0'; g_hash_table_add (devino_cache, key); } } return TRUE; } /* Used by ostree_repo_scan_hardlinks(); see that function for more information. */ static gboolean scan_loose_devino (OstreeRepo *self, GHashTable *devino_cache, GCancellable *cancellable, GError **error) { if (self->parent_repo) { if (!scan_loose_devino (self->parent_repo, devino_cache, cancellable, error)) return FALSE; } if (self->mode == OSTREE_REPO_MODE_ARCHIVE && self->uncompressed_objects_dir_fd != -1) { if (!scan_one_loose_devino (self, self->uncompressed_objects_dir_fd, devino_cache, cancellable, error)) return FALSE; } if (!scan_one_loose_devino (self, self->objects_dir_fd, devino_cache, cancellable, error)) return FALSE; return TRUE; } /* Loook up a (device,inode) pair in our cache, and see if it maps to a known * checksum. */ static const char * devino_cache_lookup (OstreeRepo *self, OstreeRepoCommitModifier *modifier, guint32 device, guint32 inode) { OstreeDevIno dev_ino_key; OstreeDevIno *dev_ino_val; GHashTable *cache; if (self->loose_object_devino_hash) cache = self->loose_object_devino_hash; else if (modifier && modifier->devino_cache) cache = modifier->devino_cache; else return NULL; dev_ino_key.dev = device; dev_ino_key.ino = inode; dev_ino_val = g_hash_table_lookup (cache, &dev_ino_key); if (!dev_ino_val) return NULL; return dev_ino_val->checksum; } /** * ostree_repo_scan_hardlinks: * @self: An #OstreeRepo * @cancellable: Cancellable * @error: Error * * This function is deprecated in favor of using ostree_repo_devino_cache_new(), * which allows a precise mapping to be built up between hardlink checkout files * and their checksums between `ostree_repo_checkout_at()` and * `ostree_repo_write_directory_to_mtree()`. * * When invoking ostree_repo_write_directory_to_mtree(), it has to compute the * checksum of all files. If your commit contains hardlinks from a checkout, * this functions builds a mapping of device numbers and inodes to their * checksum. * * There is an upfront cost to creating this mapping, as this will scan the * entire objects directory. If your commit is composed of mostly hardlinks to * existing ostree objects, then this will speed up considerably, so call it * before you call ostree_repo_write_directory_to_mtree() or similar. However, * ostree_repo_devino_cache_new() is better as it avoids scanning all objects. * * Multithreading: This function is *not* MT safe. */ gboolean ostree_repo_scan_hardlinks (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_return_val_if_fail (self->in_transaction == TRUE, FALSE); if (!self->loose_object_devino_hash) self->loose_object_devino_hash = (GHashTable*)ostree_repo_devino_cache_new (); g_hash_table_remove_all (self->loose_object_devino_hash); return scan_loose_devino (self, self->loose_object_devino_hash, cancellable, error); } /** * ostree_repo_prepare_transaction: * @self: An #OstreeRepo * @out_transaction_resume: (allow-none) (out): Whether this transaction * is resuming from a previous one. This is a legacy state, now OSTree * pulls use per-commit `state/.commitpartial` files. * @cancellable: Cancellable * @error: Error * * Starts or resumes a transaction. In order to write to a repo, you * need to start a transaction. You can complete the transaction with * ostree_repo_commit_transaction(), or abort the transaction with * ostree_repo_abort_transaction(). * * Currently, transactions may result in partial commits or data in the target * repository if interrupted during ostree_repo_commit_transaction(), and * further writing refs is also not currently atomic. * * There can be at most one transaction active on a repo at a time per instance * of `OstreeRepo`; however, it is safe to have multiple threads writing objects * on a single `OstreeRepo` instance as long as their lifetime is bounded by the * transaction. * * Locking: Acquires a `shared` lock; release via commit or abort * Multithreading: This function is *not* MT safe; only one transaction can be * active at a time. */ gboolean ostree_repo_prepare_transaction (OstreeRepo *self, gboolean *out_transaction_resume, GCancellable *cancellable, GError **error) { g_autoptr(_OstreeRepoAutoTransaction) txn = NULL; guint64 reserved_bytes = 0; g_return_val_if_fail (self->in_transaction == FALSE, FALSE); g_debug ("Preparing transaction in repository %p", self); /* Set up to abort the transaction if we return early from this function. */ txn = self; memset (&self->txn.stats, 0, sizeof (OstreeRepoTransactionStats)); self->txn_locked = _ostree_repo_lock_push (self, OSTREE_REPO_LOCK_SHARED, cancellable, error); if (!self->txn_locked) return FALSE; self->in_transaction = TRUE; self->cleanup_stagedir = FALSE; struct statvfs stvfsbuf; if (TEMP_FAILURE_RETRY (fstatvfs (self->repo_dir_fd, &stvfsbuf)) < 0) return glnx_throw_errno_prefix (error, "fstatvfs"); g_mutex_lock (&self->txn_lock); self->txn.blocksize = stvfsbuf.f_bsize; if (!ostree_repo_get_min_free_space_bytes (self, &reserved_bytes, error)) { g_mutex_unlock (&self->txn_lock); return FALSE; } self->reserved_blocks = reserved_bytes / self->txn.blocksize; /* Use the appropriate free block count if we're unprivileged */ guint64 bfree = (getuid () != 0 ? stvfsbuf.f_bavail : stvfsbuf.f_bfree); if (bfree > self->reserved_blocks) self->txn.max_blocks = bfree - self->reserved_blocks; else { self->txn.max_blocks = 0; /* Don't throw_min_free_space_error here; reason being that * this transaction could be just committing metadata objects * which are relatively small in size and we do not really * want to block them via min-free-space-* value. Metadata * objects helps in housekeeping and hence should be kept * out of the strict min-free-space values. * * The main drivers for writing content objects will always honor * the min-free-space value and throw_min_free_space_error in * case of overstepping the number of reserved blocks. */ } g_mutex_unlock (&self->txn_lock); gboolean ret_transaction_resume = FALSE; if (!_ostree_repo_allocate_tmpdir (self->tmp_dir_fd, self->stagedir_prefix, &self->commit_stagedir, &self->commit_stagedir_lock, &ret_transaction_resume, cancellable, error)) return FALSE; /* Success: do not abort the transaction when returning. */ txn = NULL; if (out_transaction_resume) *out_transaction_resume = ret_transaction_resume; return TRUE; } /* Called for commit, to iterate over the "staging" directory and rename all the * objects into the primary objects/ location. Notably this is called only after * syncfs() has potentially been invoked to ensure that all objects have been * written to disk. In the future we may enhance this; see * https://github.com/ostreedev/ostree/issues/1184 */ static gboolean rename_pending_loose_objects (OstreeRepo *self, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("rename pending", error); g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (self->commit_stagedir.fd, ".", FALSE, &dfd_iter, error)) return FALSE; /* Iterate over the outer checksum dir */ while (TRUE) { struct dirent *dent; gboolean renamed_some_object = FALSE; g_auto(GLnxDirFdIterator) child_dfd_iter = { 0, }; char loose_objpath[_OSTREE_LOOSE_PATH_MAX]; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; if (dent->d_type != DT_DIR) continue; /* All object directories only have two character entries */ if (strlen (dent->d_name) != 2) continue; if (!glnx_dirfd_iterator_init_at (dfd_iter.fd, dent->d_name, FALSE, &child_dfd_iter, error)) return FALSE; loose_objpath[0] = dent->d_name[0]; loose_objpath[1] = dent->d_name[1]; loose_objpath[2] = '/'; /* Iterate over inner checksum dir */ while (TRUE) { struct dirent *child_dent; if (!glnx_dirfd_iterator_next_dent (&child_dfd_iter, &child_dent, cancellable, error)) return FALSE; if (child_dent == NULL) break; g_strlcpy (loose_objpath + 3, child_dent->d_name, sizeof (loose_objpath)-3); if (!_ostree_repo_ensure_loose_objdir_at (self->objects_dir_fd, loose_objpath, cancellable, error)) return FALSE; if (!glnx_renameat (child_dfd_iter.fd, loose_objpath + 3, self->objects_dir_fd, loose_objpath, error)) return FALSE; renamed_some_object = TRUE; } if (renamed_some_object && !self->disable_fsync) { /* Ensure that in the case of a power cut all the directory metadata that we want has reached the disk. In particular, we want this before we update the refs to point to these objects. */ glnx_autofd int target_dir_fd = -1; loose_objpath[2] = 0; if (!glnx_opendirat (self->objects_dir_fd, loose_objpath, FALSE, &target_dir_fd, error)) return FALSE; if (fsync (target_dir_fd) == -1) return glnx_throw_errno_prefix (error, "fsync"); } } /* In case we created any loose object subdirs, make sure they are on disk */ if (!self->disable_fsync) { if (fsync (self->objects_dir_fd) == -1) return glnx_throw_errno_prefix (error, "fsync"); } return TRUE; } /* Try to lock a transaction stage directory created by * ostree_repo_prepare_transaction(). */ static gboolean cleanup_txn_dir (OstreeRepo *self, int dfd, const char *path, GCancellable *cancellable, GError **error) { g_auto(GLnxLockFile) lockfile = { 0, }; gboolean did_lock; /* Try to lock, but if we don't get it, move on */ if (!_ostree_repo_try_lock_tmpdir (dfd, path, &lockfile, &did_lock, error)) return FALSE; if (!did_lock) return TRUE; /* Note early return */ /* If however this is the staging directory for the *current* * boot, then don't delete it now - we may end up reusing it, as * is the point. Delete *only if* we have hit min-free-space* checks * as we don't want to hold onto caches in that case. */ if (g_str_has_prefix (path, self->stagedir_prefix) && !self->cleanup_stagedir) return TRUE; /* Note early return */ /* But, crucially we can now clean up staging directories * from *other* boots. */ if (!glnx_shutil_rm_rf_at (dfd, path, cancellable, error)) return glnx_prefix_error (error, "Removing %s", path); return TRUE; } /* Look in repo/tmp and delete files that are older than a day (by default). * This used to be primarily used by the libsoup fetcher which stored partially * written objects. In practice now that that isn't done anymore, we should * use different logic here. Some more information in * https://github.com/ostreedev/ostree/issues/713 */ static gboolean cleanup_tmpdir (OstreeRepo *self, GCancellable *cancellable, GError **error) { GLNX_AUTO_PREFIX_ERROR ("tmpdir cleanup", error); const guint64 curtime_secs = g_get_real_time () / 1000000; g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (self->tmp_dir_fd, ".", TRUE, &dfd_iter, error)) return FALSE; while (TRUE) { struct dirent *dent; if (!glnx_dirfd_iterator_next_dent (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; /* Special case this; we create it when opening, and don't want * to blow it away. */ if (strcmp (dent->d_name, "cache") == 0) continue; struct stat stbuf; if (!glnx_fstatat_allow_noent (dfd_iter.fd, dent->d_name, &stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; if (errno == ENOENT) /* Did another cleanup win? */ continue; /* Handle transaction tmpdirs */ if (_ostree_repo_is_locked_tmpdir (dent->d_name)) { if (!cleanup_txn_dir (self, dfd_iter.fd, dent->d_name, cancellable, error)) return FALSE; continue; /* We've handled this, move on */ } /* At this point we're looking at an unknown-origin file or directory in * the tmpdir. This could be something like a temporary checkout dir (used * by rpm-ostree), or (from older versions of libostree) a tempfile if we * don't have O_TMPFILE for commits. */ /* Ignore files from the future */ if (stbuf.st_mtime > curtime_secs) continue; /* We're pruning content based on the expiry, which * defaults to a day. That's what we were doing before we * had locking...but in future we can be smarter here. */ guint64 delta = curtime_secs - stbuf.st_mtime; if (delta > self->tmp_expiry_seconds) { if (!glnx_shutil_rm_rf_at (dfd_iter.fd, dent->d_name, cancellable, error)) return glnx_prefix_error (error, "Removing %s", dent->d_name); } } return TRUE; } static void ensure_txn_refs (OstreeRepo *self) { if (self->txn.refs == NULL) self->txn.refs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); if (self->txn.collection_refs == NULL) self->txn.collection_refs = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); } /** * ostree_repo_mark_commit_partial: * @self: Repo * @checksum: Commit SHA-256 * @is_partial: Whether or not this commit is partial * @error: Error * * Commits in "partial" state do not have all their child objects written. This * occurs in various situations, such as during a pull, but also if a "subpath" * pull is used, as well as "commit only" pulls. * * This function is used by ostree_repo_pull_with_options(); you * should use this if you are implementing a different type of transport. * * Since: 2017.15 */ gboolean ostree_repo_mark_commit_partial (OstreeRepo *self, const char *checksum, gboolean is_partial, GError **error) { g_autofree char *commitpartial_path = _ostree_get_commitpartial_path (checksum); if (is_partial) { glnx_autofd int fd = openat (self->repo_dir_fd, commitpartial_path, O_EXCL | O_CREAT | O_WRONLY | O_CLOEXEC | O_NOCTTY, 0644); if (fd == -1) { if (errno != EEXIST) return glnx_throw_errno_prefix (error, "open(%s)", commitpartial_path); } } else { if (!ot_ensure_unlinked_at (self->repo_dir_fd, commitpartial_path, 0)) return FALSE; } return TRUE; } /** * ostree_repo_transaction_set_refspec: * @self: An #OstreeRepo * @refspec: The refspec to write * @checksum: (nullable): The checksum to point it to * * Like ostree_repo_transaction_set_ref(), but takes concatenated * @refspec format as input instead of separate remote and name * arguments. * * Multithreading: Since v2017.15 this function is MT safe. */ void ostree_repo_transaction_set_refspec (OstreeRepo *self, const char *refspec, const char *checksum) { g_return_if_fail (self->in_transaction == TRUE); g_mutex_lock (&self->txn_lock); ensure_txn_refs (self); g_hash_table_replace (self->txn.refs, g_strdup (refspec), g_strdup (checksum)); g_mutex_unlock (&self->txn_lock); } /** * ostree_repo_transaction_set_ref: * @self: An #OstreeRepo * @remote: (allow-none): A remote for the ref * @ref: The ref to write * @checksum: (nullable): The checksum to point it to * * If @checksum is not %NULL, then record it as the target of ref named * @ref; if @remote is provided, the ref will appear to originate from that * remote. * * Otherwise, if @checksum is %NULL, then record that the ref should * be deleted. * * The change will be written when the transaction is completed with * ostree_repo_commit_transaction(); that function takes care of writing all of * the objects (such as the commit referred to by @checksum) before updating the * refs. If the transaction is instead aborted with * ostree_repo_abort_transaction(), no changes to the ref will be made to the * repository. * * Note however that currently writing *multiple* refs is not truly atomic; if * the process or system is terminated during * ostree_repo_commit_transaction(), it is possible that just some of the refs * will have been updated. Your application should take care to handle this * case. * * Multithreading: Since v2017.15 this function is MT safe. */ void ostree_repo_transaction_set_ref (OstreeRepo *self, const char *remote, const char *ref, const char *checksum) { g_return_if_fail (self->in_transaction == TRUE); char *refspec; if (remote) refspec = g_strdup_printf ("%s:%s", remote, ref); else refspec = g_strdup (ref); g_mutex_lock (&self->txn_lock); ensure_txn_refs (self); g_hash_table_replace (self->txn.refs, refspec, g_strdup (checksum)); g_mutex_unlock (&self->txn_lock); } /** * ostree_repo_transaction_set_collection_ref: * @self: An #OstreeRepo * @ref: The collection–ref to write * @checksum: (nullable): The checksum to point it to * * If @checksum is not %NULL, then record it as the target of local ref named * @ref. * * Otherwise, if @checksum is %NULL, then record that the ref should * be deleted. * * The change will not be written out immediately, but when the transaction * is completed with ostree_repo_commit_transaction(). If the transaction * is instead aborted with ostree_repo_abort_transaction(), no changes will * be made to the repository. * * Multithreading: Since v2017.15 this function is MT safe. * * Since: 2018.6 */ void ostree_repo_transaction_set_collection_ref (OstreeRepo *self, const OstreeCollectionRef *ref, const char *checksum) { g_return_if_fail (OSTREE_IS_REPO (self)); g_return_if_fail (self->in_transaction == TRUE); g_return_if_fail (ref != NULL); g_return_if_fail (checksum == NULL || ostree_validate_checksum_string (checksum, NULL)); g_mutex_lock (&self->txn_lock); ensure_txn_refs (self); g_hash_table_replace (self->txn.collection_refs, ostree_collection_ref_dup (ref), g_strdup (checksum)); g_mutex_unlock (&self->txn_lock); } /** * ostree_repo_set_ref_immediate: * @self: An #OstreeRepo * @remote: (allow-none): A remote for the ref * @ref: The ref to write * @checksum: (allow-none): The checksum to point it to, or %NULL to unset * @cancellable: GCancellable * @error: GError * * This is like ostree_repo_transaction_set_ref(), except it may be * invoked outside of a transaction. This is presently safe for the * case where we're creating or overwriting an existing ref. * * Multithreading: This function is MT safe. */ gboolean ostree_repo_set_ref_immediate (OstreeRepo *self, const char *remote, const char *ref, const char *checksum, GCancellable *cancellable, GError **error) { const OstreeCollectionRef _ref = { NULL, (gchar *) ref }; return _ostree_repo_write_ref (self, remote, &_ref, checksum, NULL, cancellable, error); } /** * ostree_repo_set_alias_ref_immediate: * @self: An #OstreeRepo * @remote: (allow-none): A remote for the ref * @ref: The ref to write * @target: (allow-none): The ref target to point it to, or %NULL to unset * @cancellable: GCancellable * @error: GError * * Like ostree_repo_set_ref_immediate(), but creates an alias. */ gboolean ostree_repo_set_alias_ref_immediate (OstreeRepo *self, const char *remote, const char *ref, const char *target, GCancellable *cancellable, GError **error) { const OstreeCollectionRef _ref = { NULL, (gchar *) ref }; return _ostree_repo_write_ref (self, remote, &_ref, NULL, target, cancellable, error); } /** * ostree_repo_set_collection_ref_immediate: * @self: An #OstreeRepo * @ref: The collection–ref to write * @checksum: (nullable): The checksum to point it to, or %NULL to unset * @cancellable: GCancellable * @error: GError * * This is like ostree_repo_transaction_set_collection_ref(), except it may be * invoked outside of a transaction. This is presently safe for the * case where we're creating or overwriting an existing ref. * * Returns: %TRUE on success, %FALSE otherwise * Since: 2018.6 */ gboolean ostree_repo_set_collection_ref_immediate (OstreeRepo *self, const OstreeCollectionRef *ref, const char *checksum, GCancellable *cancellable, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (ref != NULL, FALSE); g_return_val_if_fail (checksum == NULL || ostree_validate_checksum_string (checksum, NULL), FALSE); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); return _ostree_repo_write_ref (self, NULL, ref, checksum, NULL, cancellable, error); } /** * ostree_repo_commit_transaction: * @self: An #OstreeRepo * @out_stats: (allow-none) (out): A set of statistics of things * that happened during this transaction. * @cancellable: Cancellable * @error: Error * * Complete the transaction. Any refs set with * ostree_repo_transaction_set_ref() or * ostree_repo_transaction_set_refspec() will be written out. * * Note that if multiple threads are performing writes, all such threads must * have terminated before this function is invoked. * * Locking: Releases `shared` lock acquired by `ostree_repo_prepare_transaction()` * Multithreading: This function is *not* MT safe; only one transaction can be * active at a time. */ gboolean ostree_repo_commit_transaction (OstreeRepo *self, OstreeRepoTransactionStats *out_stats, GCancellable *cancellable, GError **error) { g_return_val_if_fail (self->in_transaction == TRUE, FALSE); g_debug ("Committing transaction in repository %p", self); if ((self->test_error_flags & OSTREE_REPO_TEST_ERROR_PRE_COMMIT) > 0) return glnx_throw (error, "OSTREE_REPO_TEST_ERROR_PRE_COMMIT specified"); /* FIXME: Added OSTREE_SUPPRESS_SYNCFS since valgrind in el7 doesn't know * about `syncfs`...we should delete this later. */ if (!self->disable_fsync && g_getenv ("OSTREE_SUPPRESS_SYNCFS") == NULL) { if (syncfs (self->tmp_dir_fd) < 0) return glnx_throw_errno_prefix (error, "syncfs"); } if (!rename_pending_loose_objects (self, cancellable, error)) return FALSE; g_debug ("txn commit %s", glnx_basename (self->commit_stagedir.path)); if (!glnx_tmpdir_delete (&self->commit_stagedir, cancellable, error)) return FALSE; glnx_release_lock_file (&self->commit_stagedir_lock); /* This performs a global cleanup */ if (!cleanup_tmpdir (self, cancellable, error)) return FALSE; if (self->loose_object_devino_hash) g_hash_table_remove_all (self->loose_object_devino_hash); if (self->txn.refs) if (!_ostree_repo_update_refs (self, self->txn.refs, cancellable, error)) return FALSE; if (self->txn.collection_refs) if (!_ostree_repo_update_collection_refs (self, self->txn.collection_refs, cancellable, error)) return FALSE; /* Update the summary if auto-update-summary is set, because doing so was * delayed for each ref change during the transaction. */ if ((self->txn.refs || self->txn.collection_refs) && !_ostree_repo_maybe_regenerate_summary (self, cancellable, error)) return FALSE; g_clear_pointer (&self->txn.refs, g_hash_table_destroy); g_clear_pointer (&self->txn.collection_refs, g_hash_table_destroy); self->in_transaction = FALSE; if (!ot_ensure_unlinked_at (self->repo_dir_fd, "transaction", 0)) return FALSE; if (self->txn_locked) { if (!_ostree_repo_lock_pop (self, cancellable, error)) return FALSE; self->txn_locked = FALSE; } if (out_stats) *out_stats = self->txn.stats; return TRUE; } /** * ostree_repo_abort_transaction: * @self: An #OstreeRepo * @cancellable: Cancellable * @error: Error * * Abort the active transaction; any staged objects and ref changes will be * discarded. You *must* invoke this if you have chosen not to invoke * ostree_repo_commit_transaction(). Calling this function when not in a * transaction will do nothing and return successfully. */ gboolean ostree_repo_abort_transaction (OstreeRepo *self, GCancellable *cancellable, GError **error) { g_autoptr(GError) cleanup_error = NULL; /* Always ignore the cancellable to avoid the chance that, if it gets * canceled, the transaction may not be fully cleaned up. * See https://github.com/ostreedev/ostree/issues/1491 . */ cancellable = NULL; /* Note early return */ if (!self->in_transaction) return TRUE; g_debug ("Aborting transaction in repository %p", self); if (self->loose_object_devino_hash) g_hash_table_remove_all (self->loose_object_devino_hash); g_clear_pointer (&self->txn.refs, g_hash_table_destroy); g_clear_pointer (&self->txn.collection_refs, g_hash_table_destroy); glnx_tmpdir_unset (&self->commit_stagedir); glnx_release_lock_file (&self->commit_stagedir_lock); /* Do not propagate failures from cleanup_tmpdir() immediately, as we want * to clean up the rest of the internal transaction state first. */ cleanup_tmpdir (self, cancellable, &cleanup_error); self->in_transaction = FALSE; if (self->txn_locked) { if (!_ostree_repo_lock_pop (self, cancellable, error)) return FALSE; self->txn_locked = FALSE; } /* Propagate cleanup_tmpdir() failure. */ if (cleanup_error != NULL) { g_propagate_error (error, g_steal_pointer (&cleanup_error)); return FALSE; } return TRUE; } /** * ostree_repo_write_metadata: * @self: Repo * @objtype: Object type * @expected_checksum: (allow-none): If provided, validate content against this checksum * @object: Metadata * @out_csum: (out) (array fixed-size=32) (allow-none): Binary checksum * @cancellable: Cancellable * @error: Error * * Store the metadata object @object. Return the checksum * as @out_csum. * * If @expected_checksum is not %NULL, verify it against the * computed checksum. */ gboolean ostree_repo_write_metadata (OstreeRepo *self, OstreeObjectType objtype, const char *expected_checksum, GVariant *object, guchar **out_csum, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) normalized = NULL; /* First, if we have an expected checksum, see if we already have this * object. This mirrors the same logic in ostree_repo_write_content(). */ if (expected_checksum) { gboolean have_obj; if (!_ostree_repo_has_loose_object (self, expected_checksum, objtype, &have_obj, cancellable, error)) return FALSE; if (have_obj) { if (out_csum) *out_csum = ostree_checksum_to_bytes (expected_checksum); return TRUE; } /* If the caller is giving us an expected checksum, the object really has * to be normalized already. Otherwise, how would they know the checksum? * There's no sense in redoing it. */ normalized = g_variant_ref (object); } else { normalized = g_variant_get_normal_form (object); } /* For untrusted objects, verify their structure here */ if (expected_checksum) { if (!_ostree_validate_structureof_metadata (objtype, object, error)) return FALSE; } g_autoptr(GBytes) vdata = g_variant_get_data_as_bytes (normalized); if (!write_metadata_object (self, objtype, expected_checksum, vdata, out_csum, cancellable, error)) return FALSE; return TRUE; } /** * ostree_repo_write_metadata_stream_trusted: * @self: Repo * @objtype: Object type * @checksum: Store object with this ASCII SHA256 checksum * @object_input: Metadata object stream * @length: Length, may be 0 for unknown * @cancellable: Cancellable * @error: Error * * Store the metadata object @variant; the provided @checksum is * trusted. */ gboolean ostree_repo_write_metadata_stream_trusted (OstreeRepo *self, OstreeObjectType objtype, const char *checksum, GInputStream *object_input, guint64 length, GCancellable *cancellable, GError **error) { /* This is all pretty ridiculous, but we're keeping this API for backwards * compatibility, it doesn't really need to be fast. */ g_autoptr(GMemoryOutputStream) tmpbuf = (GMemoryOutputStream*)g_memory_output_stream_new_resizable (); if (g_output_stream_splice ((GOutputStream*)tmpbuf, object_input, G_OUTPUT_STREAM_SPLICE_CLOSE_TARGET, cancellable, error) < 0) return FALSE; g_autoptr(GBytes) tmpb = g_memory_output_stream_steal_as_bytes (tmpbuf); g_autoptr(GVariant) tmpv = g_variant_new_from_bytes (ostree_metadata_variant_type (objtype), tmpb, TRUE); return ostree_repo_write_metadata_trusted (self, objtype, checksum, tmpv, cancellable, error); } /** * ostree_repo_write_metadata_trusted: * @self: Repo * @objtype: Object type * @checksum: Store object with this ASCII SHA256 checksum * @variant: Metadata object * @cancellable: Cancellable * @error: Error * * Store the metadata object @variant; the provided @checksum is * trusted. */ gboolean ostree_repo_write_metadata_trusted (OstreeRepo *self, OstreeObjectType type, const char *checksum, GVariant *variant, GCancellable *cancellable, GError **error) { return ostree_repo_write_metadata (self, type, checksum, variant, NULL, cancellable, error); } typedef struct { OstreeRepo *repo; OstreeObjectType objtype; char *expected_checksum; GVariant *object; GCancellable *cancellable; GSimpleAsyncResult *result; guchar *result_csum; } WriteMetadataAsyncData; static void write_metadata_async_data_free (gpointer user_data) { WriteMetadataAsyncData *data = user_data; g_clear_object (&data->repo); g_clear_object (&data->cancellable); g_variant_unref (data->object); g_free (data->result_csum); g_free (data->expected_checksum); g_free (data); } static void write_metadata_thread (GSimpleAsyncResult *res, GObject *object, GCancellable *cancellable) { GError *error = NULL; WriteMetadataAsyncData *data; data = g_simple_async_result_get_op_res_gpointer (res); if (!ostree_repo_write_metadata (data->repo, data->objtype, data->expected_checksum, data->object, &data->result_csum, cancellable, &error)) g_simple_async_result_take_error (res, error); } /** * ostree_repo_write_metadata_async: * @self: Repo * @objtype: Object type * @expected_checksum: (allow-none): If provided, validate content against this checksum * @object: Metadata * @cancellable: Cancellable * @callback: Invoked when metadata is writed * @user_data: Data for @callback * * Asynchronously store the metadata object @variant. If provided, * the checksum @expected_checksum will be verified. */ void ostree_repo_write_metadata_async (OstreeRepo *self, OstreeObjectType objtype, const char *expected_checksum, GVariant *object, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { WriteMetadataAsyncData *asyncdata; asyncdata = g_new0 (WriteMetadataAsyncData, 1); asyncdata->repo = g_object_ref (self); asyncdata->objtype = objtype; asyncdata->expected_checksum = g_strdup (expected_checksum); asyncdata->object = g_variant_ref (object); asyncdata->cancellable = cancellable ? g_object_ref (cancellable) : NULL; asyncdata->result = g_simple_async_result_new ((GObject*) self, callback, user_data, ostree_repo_write_metadata_async); g_simple_async_result_set_op_res_gpointer (asyncdata->result, asyncdata, write_metadata_async_data_free); g_simple_async_result_run_in_thread (asyncdata->result, write_metadata_thread, G_PRIORITY_DEFAULT, cancellable); g_object_unref (asyncdata->result); } /** * ostree_repo_write_metadata_finish: * @self: Repo * @result: Result * @out_csum: (out) (array fixed-size=32) (element-type guint8): Binary checksum value * @error: Error * * Complete a call to ostree_repo_write_metadata_async(). */ gboolean ostree_repo_write_metadata_finish (OstreeRepo *self, GAsyncResult *result, guchar **out_csum, GError **error) { GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT (result); WriteMetadataAsyncData *data; g_warn_if_fail (g_simple_async_result_get_source_tag (simple) == ostree_repo_write_metadata_async); if (g_simple_async_result_propagate_error (simple, error)) return FALSE; data = g_simple_async_result_get_op_res_gpointer (simple); /* Transfer ownership */ *out_csum = data->result_csum; data->result_csum = NULL; return TRUE; } /* Write an object of type OSTREE_OBJECT_TYPE_DIR_META, using @file_info and @xattrs. * Return its (binary) checksum in @out_csum. */ gboolean _ostree_repo_write_directory_meta (OstreeRepo *self, GFileInfo *file_info, GVariant *xattrs, guchar **out_csum, GCancellable *cancellable, GError **error) { if (g_cancellable_set_error_if_cancelled (cancellable, error)) return FALSE; g_autoptr(GVariant) dirmeta = ostree_create_directory_metadata (file_info, xattrs); return ostree_repo_write_metadata (self, OSTREE_OBJECT_TYPE_DIR_META, NULL, dirmeta, out_csum, cancellable, error); } /** * ostree_repo_write_content_trusted: * @self: Repo * @checksum: Store content using this ASCII SHA256 checksum * @object_input: Content stream * @length: Length of @object_input * @cancellable: Cancellable * @error: Data for @callback * * Store the content object streamed as @object_input, with total * length @length. The given @checksum will be treated as trusted. * * This function should be used when importing file objects from local * disk, for example. */ gboolean ostree_repo_write_content_trusted (OstreeRepo *self, const char *checksum, GInputStream *object_input, guint64 length, GCancellable *cancellable, GError **error) { return ostree_repo_write_content (self, checksum, object_input, length, NULL, cancellable, error); } /** * ostree_repo_write_content: * @self: Repo * @expected_checksum: (allow-none): If provided, validate content against this checksum * @object_input: Content object stream * @length: Length of @object_input * @out_csum: (out) (array fixed-size=32) (allow-none): Binary checksum * @cancellable: Cancellable * @error: Error * * Store the content object streamed as @object_input, * with total length @length. The actual checksum will * be returned as @out_csum. */ gboolean ostree_repo_write_content (OstreeRepo *self, const char *expected_checksum, GInputStream *object_input, guint64 length, guchar **out_csum, GCancellable *cancellable, GError **error) { /* First, if we have an expected checksum, see if we already have this * object. This mirrors the same logic in ostree_repo_write_metadata(). */ if (expected_checksum) { gboolean have_obj; if (!_ostree_repo_has_loose_object (self, expected_checksum, OSTREE_OBJECT_TYPE_FILE, &have_obj, cancellable, error)) return FALSE; if (have_obj) { if (out_csum) *out_csum = ostree_checksum_to_bytes (expected_checksum); return TRUE; } } /* Parse the stream */ g_autoptr(GInputStream) file_input = NULL; g_autoptr(GVariant) xattrs = NULL; g_autoptr(GFileInfo) file_info = NULL; if (!ostree_content_stream_parse (FALSE, object_input, length, FALSE, &file_input, &file_info, &xattrs, cancellable, error)) return FALSE; return write_content_object (self, expected_checksum, file_input, file_info, xattrs, out_csum, cancellable, error); } typedef struct { OstreeRepo *repo; char *expected_checksum; GInputStream *object; guint64 file_object_length; GCancellable *cancellable; GSimpleAsyncResult *result; guchar *result_csum; } WriteContentAsyncData; static void write_content_async_data_free (gpointer user_data) { WriteContentAsyncData *data = user_data; g_clear_object (&data->repo); g_clear_object (&data->cancellable); g_clear_object (&data->object); g_free (data->result_csum); g_free (data->expected_checksum); g_free (data); } static void write_content_thread (GSimpleAsyncResult *res, GObject *object, GCancellable *cancellable) { GError *error = NULL; WriteContentAsyncData *data; data = g_simple_async_result_get_op_res_gpointer (res); if (!ostree_repo_write_content (data->repo, data->expected_checksum, data->object, data->file_object_length, &data->result_csum, cancellable, &error)) g_simple_async_result_take_error (res, error); } /** * ostree_repo_write_content_async: * @self: Repo * @expected_checksum: (allow-none): If provided, validate content against this checksum * @object: Input * @length: Length of @object * @cancellable: Cancellable * @callback: Invoked when content is writed * @user_data: User data for @callback * * Asynchronously store the content object @object. If provided, the * checksum @expected_checksum will be verified. */ void ostree_repo_write_content_async (OstreeRepo *self, const char *expected_checksum, GInputStream *object, guint64 length, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { WriteContentAsyncData *asyncdata; asyncdata = g_new0 (WriteContentAsyncData, 1); asyncdata->repo = g_object_ref (self); asyncdata->expected_checksum = g_strdup (expected_checksum); asyncdata->object = g_object_ref (object); asyncdata->file_object_length = length; asyncdata->cancellable = cancellable ? g_object_ref (cancellable) : NULL; asyncdata->result = g_simple_async_result_new ((GObject*) self, callback, user_data, ostree_repo_write_content_async); g_simple_async_result_set_op_res_gpointer (asyncdata->result, asyncdata, write_content_async_data_free); g_simple_async_result_run_in_thread (asyncdata->result, write_content_thread, G_PRIORITY_DEFAULT, cancellable); g_object_unref (asyncdata->result); } /** * ostree_repo_write_content_finish: * @self: a #OstreeRepo * @result: a #GAsyncResult * @out_csum: (out) (transfer full): A binary SHA256 checksum of the content object * @error: a #GError * * Completes an invocation of ostree_repo_write_content_async(). */ gboolean ostree_repo_write_content_finish (OstreeRepo *self, GAsyncResult *result, guchar **out_csum, GError **error) { GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT (result); WriteContentAsyncData *data; g_warn_if_fail (g_simple_async_result_get_source_tag (simple) == ostree_repo_write_content_async); if (g_simple_async_result_propagate_error (simple, error)) return FALSE; data = g_simple_async_result_get_op_res_gpointer (simple); ot_transfer_out_value (out_csum, &data->result_csum); return TRUE; } static GVariant * create_empty_gvariant_dict (void) { GVariantBuilder builder; g_variant_builder_init (&builder, G_VARIANT_TYPE("a{sv}")); return g_variant_builder_end (&builder); } /** * ostree_repo_write_commit: * @self: Repo * @parent: (allow-none): ASCII SHA256 checksum for parent, or %NULL for none * @subject: (allow-none): Subject * @body: (allow-none): Body * @metadata: (allow-none): GVariant of type a{sv}, or %NULL for none * @root: The tree to point the commit to * @out_commit: (out): Resulting ASCII SHA256 checksum for commit * @cancellable: Cancellable * @error: Error * * Write a commit metadata object, referencing @root_contents_checksum * and @root_metadata_checksum. */ gboolean ostree_repo_write_commit (OstreeRepo *self, const char *parent, const char *subject, const char *body, GVariant *metadata, OstreeRepoFile *root, char **out_commit, GCancellable *cancellable, GError **error) { g_autoptr(GDateTime) now = g_date_time_new_now_utc (); return ostree_repo_write_commit_with_time (self, parent, subject, body, metadata, root, g_date_time_to_unix (now), out_commit, cancellable, error); } /** * ostree_repo_write_commit_with_time: * @self: Repo * @parent: (allow-none): ASCII SHA256 checksum for parent, or %NULL for none * @subject: (allow-none): Subject * @body: (allow-none): Body * @metadata: (allow-none): GVariant of type a{sv}, or %NULL for none * @root: The tree to point the commit to * @time: The time to use to stamp the commit * @out_commit: (out): Resulting ASCII SHA256 checksum for commit * @cancellable: Cancellable * @error: Error * * Write a commit metadata object, referencing @root_contents_checksum * and @root_metadata_checksum. */ gboolean ostree_repo_write_commit_with_time (OstreeRepo *self, const char *parent, const char *subject, const char *body, GVariant *metadata, OstreeRepoFile *root, guint64 time, char **out_commit, GCancellable *cancellable, GError **error) { OstreeRepoFile *repo_root = OSTREE_REPO_FILE (root); /* Add sizes information to our metadata object */ g_autoptr(GVariant) new_metadata = add_size_index_to_metadata (self, metadata); g_autoptr(GVariant) commit = g_variant_new ("(@a{sv}@ay@a(say)sst@ay@ay)", new_metadata ? new_metadata : create_empty_gvariant_dict (), parent ? ostree_checksum_to_bytes_v (parent) : ot_gvariant_new_bytearray (NULL, 0), g_variant_new_array (G_VARIANT_TYPE ("(say)"), NULL, 0), subject ? subject : "", body ? body : "", GUINT64_TO_BE (time), ostree_checksum_to_bytes_v (ostree_repo_file_tree_get_contents_checksum (repo_root)), ostree_checksum_to_bytes_v (ostree_repo_file_tree_get_metadata_checksum (repo_root))); g_variant_ref_sink (commit); g_autofree guchar *commit_csum = NULL; if (!ostree_repo_write_metadata (self, OSTREE_OBJECT_TYPE_COMMIT, NULL, commit, &commit_csum, cancellable, error)) return FALSE; g_autofree char *ret_commit = ostree_checksum_from_bytes (commit_csum); ot_transfer_out_value(out_commit, &ret_commit); return TRUE; } /** * ostree_repo_read_commit_detached_metadata: * @self: Repo * @checksum: ASCII SHA256 commit checksum * @out_metadata: (out) (transfer full): Metadata associated with commit in with format "a{sv}", or %NULL if none exists * @cancellable: Cancellable * @error: Error * * OSTree commits can have arbitrary metadata associated; this * function retrieves them. If none exists, @out_metadata will be set * to %NULL. */ gboolean ostree_repo_read_commit_detached_metadata (OstreeRepo *self, const char *checksum, GVariant **out_metadata, GCancellable *cancellable, GError **error) { char buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (buf, checksum, OSTREE_OBJECT_TYPE_COMMIT_META, self->mode); if (self->commit_stagedir.initialized) { glnx_autofd int fd = -1; if (!ot_openat_ignore_enoent (self->commit_stagedir.fd, buf, &fd, error)) return FALSE; if (fd != -1) return ot_variant_read_fd (fd, 0, G_VARIANT_TYPE ("a{sv}"), TRUE, out_metadata, error); } glnx_autofd int fd = -1; if (!ot_openat_ignore_enoent (self->objects_dir_fd, buf, &fd, error)) return FALSE; if (fd != -1) return ot_variant_read_fd (fd, 0, G_VARIANT_TYPE ("a{sv}"), TRUE, out_metadata, error); if (self->parent_repo) return ostree_repo_read_commit_detached_metadata (self->parent_repo, checksum, out_metadata, cancellable, error); /* Nothing found */ *out_metadata = NULL; return TRUE; } /** * ostree_repo_write_commit_detached_metadata: * @self: Repo * @checksum: ASCII SHA256 commit checksum * @metadata: (allow-none): Metadata to associate with commit in with format "a{sv}", or %NULL to delete * @cancellable: Cancellable * @error: Error * * Replace any existing metadata associated with commit referred to by * @checksum with @metadata. If @metadata is %NULL, then existing * data will be deleted. */ gboolean ostree_repo_write_commit_detached_metadata (OstreeRepo *self, const char *checksum, GVariant *metadata, GCancellable *cancellable, GError **error) { int dest_dfd; if (self->in_transaction) dest_dfd = self->commit_stagedir.fd; else dest_dfd = self->objects_dir_fd; if (!_ostree_repo_ensure_loose_objdir_at (dest_dfd, checksum, cancellable, error)) return FALSE; g_autoptr(GVariant) normalized = NULL; gsize normalized_size = 0; const guint8 *data = NULL; if (metadata != NULL) { normalized = g_variant_get_normal_form (metadata); normalized_size = g_variant_get_size (normalized); data = g_variant_get_data (normalized); } if (data == NULL) data = (guint8*)""; char pathbuf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (pathbuf, checksum, OSTREE_OBJECT_TYPE_COMMIT_META, self->mode); if (!glnx_file_replace_contents_at (dest_dfd, pathbuf, data, normalized_size, 0, cancellable, error)) { g_prefix_error (error, "Unable to write detached metadata: "); return FALSE; } return TRUE; } /* This generates an in-memory OSTREE_OBJECT_TYPE_DIR_TREE variant, using the * content objects and subdirectories. The input hashes will be sorted */ static GVariant * create_tree_variant_from_hashes (GHashTable *file_checksums, GHashTable *dir_contents_checksums, GHashTable *dir_metadata_checksums) { GVariantBuilder files_builder; g_variant_builder_init (&files_builder, G_VARIANT_TYPE ("a(say)")); GVariantBuilder dirs_builder; g_variant_builder_init (&dirs_builder, G_VARIANT_TYPE ("a(sayay)")); GSList *sorted_filenames = NULL; GLNX_HASH_TABLE_FOREACH (file_checksums, const char*, name) { /* Should have been validated earlier, but be paranoid */ g_assert (ot_util_filename_validate (name, NULL)); sorted_filenames = g_slist_prepend (sorted_filenames, (char*)name); } sorted_filenames = g_slist_sort (sorted_filenames, (GCompareFunc)strcmp); for (GSList *iter = sorted_filenames; iter; iter = iter->next) { const char *name = iter->data; const char *value; value = g_hash_table_lookup (file_checksums, name); g_variant_builder_add (&files_builder, "(s@ay)", name, ostree_checksum_to_bytes_v (value)); } g_slist_free (sorted_filenames); sorted_filenames = NULL; GLNX_HASH_TABLE_FOREACH (dir_metadata_checksums, const char*, name) sorted_filenames = g_slist_prepend (sorted_filenames, (char*)name); sorted_filenames = g_slist_sort (sorted_filenames, (GCompareFunc)strcmp); for (GSList *iter = sorted_filenames; iter; iter = iter->next) { const char *name = iter->data; const char *content_checksum = g_hash_table_lookup (dir_contents_checksums, name); const char *meta_checksum = g_hash_table_lookup (dir_metadata_checksums, name); g_variant_builder_add (&dirs_builder, "(s@ay@ay)", name, ostree_checksum_to_bytes_v (content_checksum), ostree_checksum_to_bytes_v (meta_checksum)); } g_slist_free (sorted_filenames); sorted_filenames = NULL; GVariant *serialized_tree = g_variant_new ("(@a(say)@a(sayay))", g_variant_builder_end (&files_builder), g_variant_builder_end (&dirs_builder)); return g_variant_ref_sink (serialized_tree); } /* If any filtering is set up, perform it, and return modified file info in * @out_modified_info. Note that if no filtering is applied, @out_modified_info * will simply be another reference (with incremented refcount) to @file_info. */ OstreeRepoCommitFilterResult _ostree_repo_commit_modifier_apply (OstreeRepo *self, OstreeRepoCommitModifier *modifier, const char *path, GFileInfo *file_info, GFileInfo **out_modified_info) { OstreeRepoCommitFilterResult result = OSTREE_REPO_COMMIT_FILTER_ALLOW; GFileInfo *modified_info; if (modifier == NULL || (modifier->filter == NULL && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CANONICAL_PERMISSIONS) == 0)) { *out_modified_info = g_object_ref (file_info); return OSTREE_REPO_COMMIT_FILTER_ALLOW; } modified_info = g_file_info_dup (file_info); if (modifier->filter) result = modifier->filter (self, path, modified_info, modifier->user_data); if ((modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CANONICAL_PERMISSIONS) != 0) { guint mode = g_file_info_get_attribute_uint32 (modified_info, "unix::mode"); switch (g_file_info_get_file_type (file_info)) { case G_FILE_TYPE_REGULAR: /* In particular, we want to squash the s{ug}id bits, but this also * catches the sticky bit for example. */ g_file_info_set_attribute_uint32 (modified_info, "unix::mode", mode & (S_IFREG | 0755)); break; case G_FILE_TYPE_DIRECTORY: /* Like the above but for directories */ g_file_info_set_attribute_uint32 (modified_info, "unix::mode", mode & (S_IFDIR | 0755)); break; case G_FILE_TYPE_SYMBOLIC_LINK: break; default: g_assert_not_reached (); } g_file_info_set_attribute_uint32 (modified_info, "unix::uid", 0); g_file_info_set_attribute_uint32 (modified_info, "unix::gid", 0); } *out_modified_info = modified_info; return result; } /* Convert @path into a string */ static char * ptrarray_path_join (GPtrArray *path) { GString *path_buf = g_string_new (""); if (path->len == 0) g_string_append_c (path_buf, '/'); else { for (guint i = 0; i < path->len; i++) { const char *elt = path->pdata[i]; g_string_append_c (path_buf, '/'); g_string_append (path_buf, elt); } } return g_string_free (path_buf, FALSE); } static gboolean get_final_xattrs (OstreeRepo *self, OstreeRepoCommitModifier *modifier, const char *relpath, GFileInfo *file_info, GFile *path, int dfd, const char *dfd_subpath, GVariant *source_xattrs, GVariant **out_xattrs, gboolean *out_modified, GCancellable *cancellable, GError **error) { /* track whether the returned xattrs differ from the file on disk */ gboolean modified = TRUE; const gboolean skip_xattrs = (modifier && modifier->flags & (OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS | OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CANONICAL_PERMISSIONS)) > 0; /* fetch on-disk xattrs if needed & not disabled */ g_autoptr(GVariant) original_xattrs = NULL; if (!skip_xattrs && !self->disable_xattrs) { if (source_xattrs) original_xattrs = g_variant_ref (source_xattrs); else if (path && OSTREE_IS_REPO_FILE (path)) { if (!ostree_repo_file_get_xattrs (OSTREE_REPO_FILE (path), &original_xattrs, cancellable, error)) return FALSE; } else if (path) { if (!glnx_dfd_name_get_all_xattrs (AT_FDCWD, gs_file_get_path_cached (path), &original_xattrs, cancellable, error)) return FALSE; } else if (dfd_subpath == NULL) { g_assert (dfd != -1); if (!glnx_fd_get_all_xattrs (dfd, &original_xattrs, cancellable, error)) return FALSE; } else { g_assert (dfd != -1); if (!glnx_dfd_name_get_all_xattrs (dfd, dfd_subpath, &original_xattrs, cancellable, error)) return FALSE; } g_assert (original_xattrs); } g_autoptr(GVariant) ret_xattrs = NULL; if (modifier && modifier->xattr_callback) { ret_xattrs = modifier->xattr_callback (self, relpath, file_info, modifier->xattr_user_data); } /* if callback returned NULL or didn't exist, default to on-disk state */ if (!ret_xattrs && original_xattrs) ret_xattrs = g_variant_ref (original_xattrs); if (modifier && modifier->sepolicy) { g_autofree char *label = NULL; if (!ostree_sepolicy_get_label (modifier->sepolicy, relpath, g_file_info_get_attribute_uint32 (file_info, "unix::mode"), &label, cancellable, error)) return FALSE; if (!label && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_ERROR_ON_UNLABELED) > 0) { return glnx_throw (error, "Failed to look up SELinux label for '%s'", relpath); } else if (label) { g_autoptr(GVariantBuilder) builder = NULL; if (ret_xattrs) { /* drop out any existing SELinux policy from the set, so we don't end up * counting it twice in the checksum */ GVariant* new_ret_xattrs = _ostree_filter_selinux_xattr (ret_xattrs); g_variant_unref (ret_xattrs); ret_xattrs = new_ret_xattrs; } /* ret_xattrs may be NULL */ builder = ot_util_variant_builder_from_variant (ret_xattrs, G_VARIANT_TYPE ("a(ayay)")); g_variant_builder_add_value (builder, g_variant_new ("(@ay@ay)", g_variant_new_bytestring ("security.selinux"), g_variant_new_bytestring (label))); if (ret_xattrs) g_variant_unref (ret_xattrs); ret_xattrs = g_variant_builder_end (builder); g_variant_ref_sink (ret_xattrs); } } if (original_xattrs && ret_xattrs && g_variant_equal (original_xattrs, ret_xattrs)) modified = FALSE; if (out_xattrs) *out_xattrs = g_steal_pointer (&ret_xattrs); if (out_modified) *out_modified = modified; return TRUE; } static gboolean write_directory_to_mtree_internal (OstreeRepo *self, GFile *dir, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error); static gboolean write_dfd_iter_to_mtree_internal (OstreeRepo *self, GLnxDirFdIterator *src_dfd_iter, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error); typedef enum { WRITE_DIR_CONTENT_FLAGS_NONE = 0, WRITE_DIR_CONTENT_FLAGS_CAN_ADOPT = 1, } WriteDirContentFlags; /* Given either a dir_enum or a dfd_iter, writes the directory entry (which is * itself a directory) to the mtree. For subdirs, we go back through either * write_dfd_iter_to_mtree_internal (dfd_iter case) or * write_directory_to_mtree_internal (dir_enum case) which will do the actual * dirmeta + dirent iteration. */ static gboolean write_dir_entry_to_mtree_internal (OstreeRepo *self, OstreeRepoFile *repo_dir, GFileEnumerator *dir_enum, GLnxDirFdIterator *dfd_iter, WriteDirContentFlags writeflags, GFileInfo *child_info, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error) { g_assert (dir_enum != NULL || dfd_iter != NULL); g_assert (g_file_info_get_file_type (child_info) == G_FILE_TYPE_DIRECTORY); const char *name = g_file_info_get_name (child_info); /* We currently only honor the CONSUME flag in the dfd_iter case to avoid even * more complexity in this function, and it'd mostly only be useful when * operating on local filesystems anyways. */ const gboolean delete_after_commit = dfd_iter && modifier && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CONSUME); /* Build the full path which we need for callbacks */ g_ptr_array_add (path, (char*)name); g_autofree char *child_relpath = ptrarray_path_join (path); /* Call the filter */ g_autoptr(GFileInfo) modified_info = NULL; OstreeRepoCommitFilterResult filter_result = _ostree_repo_commit_modifier_apply (self, modifier, child_relpath, child_info, &modified_info); if (filter_result != OSTREE_REPO_COMMIT_FILTER_ALLOW) { g_ptr_array_remove_index (path, path->len - 1); if (delete_after_commit) { g_assert (dfd_iter); if (!glnx_shutil_rm_rf_at (dfd_iter->fd, name, cancellable, error)) return FALSE; } /* Note: early return */ return TRUE; } g_autoptr(GFile) child = NULL; if (dir_enum != NULL) child = g_file_enumerator_get_child (dir_enum, child_info); g_autoptr(OstreeMutableTree) child_mtree = NULL; if (!ostree_mutable_tree_ensure_dir (mtree, name, &child_mtree, error)) return FALSE; /* Finally, recurse on the dir */ if (dir_enum != NULL) { if (!write_directory_to_mtree_internal (self, child, child_mtree, modifier, path, cancellable, error)) return FALSE; } else if (repo_dir) { g_assert (dir_enum != NULL); g_debug ("Adding: %s", gs_file_get_path_cached (child)); if (!ostree_mutable_tree_replace_file (mtree, name, ostree_repo_file_get_checksum ((OstreeRepoFile*) child), error)) return FALSE; } else { g_assert (dfd_iter != NULL); g_auto(GLnxDirFdIterator) child_dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (dfd_iter->fd, name, FALSE, &child_dfd_iter, error)) return FALSE; if (!write_dfd_iter_to_mtree_internal (self, &child_dfd_iter, child_mtree, modifier, path, cancellable, error)) return FALSE; if (delete_after_commit) { if (!glnx_unlinkat (dfd_iter->fd, name, AT_REMOVEDIR, error)) return FALSE; } } g_ptr_array_remove_index (path, path->len - 1); return TRUE; } /* Given either a dir_enum or a dfd_iter, writes a non-dir (regfile/symlink) to * the mtree. */ static gboolean write_content_to_mtree_internal (OstreeRepo *self, OstreeRepoFile *repo_dir, GFileEnumerator *dir_enum, GLnxDirFdIterator *dfd_iter, WriteDirContentFlags writeflags, GFileInfo *child_info, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error) { g_assert (dir_enum != NULL || dfd_iter != NULL); GFileType file_type = g_file_info_get_file_type (child_info); const char *name = g_file_info_get_name (child_info); /* Load flags into boolean constants for ease of readability (we also need to * NULL-check modifier) */ const gboolean canonical_permissions = modifier && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CANONICAL_PERMISSIONS); const gboolean devino_canonical = modifier && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_DEVINO_CANONICAL); /* We currently only honor the CONSUME flag in the dfd_iter case to avoid even * more complexity in this function, and it'd mostly only be useful when * operating on local filesystems anyways. */ const gboolean delete_after_commit = dfd_iter && modifier && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CONSUME); /* See if we have a devino hit; this is used below in a few places. */ const char *loose_checksum = NULL; if (dfd_iter != NULL) { guint32 dev = g_file_info_get_attribute_uint32 (child_info, "unix::device"); guint64 inode = g_file_info_get_attribute_uint64 (child_info, "unix::inode"); loose_checksum = devino_cache_lookup (self, modifier, dev, inode); if (loose_checksum && devino_canonical) { /* Go directly to checksum, do not pass Go, do not collect $200. * In this mode the app is required to break hardlinks for any * files it wants to modify. */ if (!ostree_mutable_tree_replace_file (mtree, name, loose_checksum, error)) return FALSE; if (delete_after_commit) { if (!glnx_shutil_rm_rf_at (dfd_iter->fd, name, cancellable, error)) return FALSE; } g_mutex_lock (&self->txn_lock); self->txn.stats.devino_cache_hits++; g_mutex_unlock (&self->txn_lock); return TRUE; /* Early return */ } } /* Build the full path which we need for callbacks */ g_ptr_array_add (path, (char*)name); g_autofree char *child_relpath = ptrarray_path_join (path); /* For bare-user repos we'll reload our file info from the object * (specifically the ostreemeta xattr), if it was checked out that way (via * hardlink). The on-disk state is not normally what we want to commit. * Basically we're making sure that we pick up "real" uid/gid and any xattrs * there. */ g_autoptr(GVariant) source_xattrs = NULL; g_autoptr(GFileInfo) source_child_info = NULL; if (loose_checksum && self->mode == OSTREE_REPO_MODE_BARE_USER) { if (!ostree_repo_load_file (self, loose_checksum, NULL, &source_child_info, &source_xattrs, cancellable, error)) return FALSE; child_info = source_child_info; } /* Call the filter */ g_autoptr(GFileInfo) modified_info = NULL; OstreeRepoCommitFilterResult filter_result = _ostree_repo_commit_modifier_apply (self, modifier, child_relpath, child_info, &modified_info); const gboolean child_info_was_modified = !_ostree_gfileinfo_equal (child_info, modified_info); if (filter_result != OSTREE_REPO_COMMIT_FILTER_ALLOW) { g_ptr_array_remove_index (path, path->len - 1); if (delete_after_commit) { g_assert (dfd_iter); if (!glnx_shutil_rm_rf_at (dfd_iter->fd, name, cancellable, error)) return FALSE; } /* Note: early return */ return TRUE; } switch (file_type) { case G_FILE_TYPE_SYMBOLIC_LINK: case G_FILE_TYPE_REGULAR: break; default: return glnx_throw (error, "Unsupported file type for file: '%s'", child_relpath); } g_autoptr(GFile) child = NULL; if (dir_enum != NULL) child = g_file_enumerator_get_child (dir_enum, child_info); /* Our filters have passed, etc.; now we prepare to write the content object */ glnx_autofd int file_input_fd = -1; /* Open the file now, since it's better for reading xattrs * rather than using the /proc/self/fd links. * * TODO: Do this lazily, since for e.g. bare-user-only repos * we don't have xattrs and don't need to open every file * for things that have devino cache hits. */ if (file_type == G_FILE_TYPE_REGULAR && dfd_iter != NULL) { if (!glnx_openat_rdonly (dfd_iter->fd, name, FALSE, &file_input_fd, error)) return FALSE; } g_autoptr(GVariant) xattrs = NULL; gboolean xattrs_were_modified; if (dir_enum != NULL) { if (!get_final_xattrs (self, modifier, child_relpath, child_info, child, -1, name, source_xattrs, &xattrs, &xattrs_were_modified, cancellable, error)) return FALSE; } else { /* These contortions are basically so we use glnx_fd_get_all_xattrs() * for regfiles, and glnx_dfd_name_get_all_xattrs() for symlinks. */ int xattr_fd_arg = (file_input_fd != -1) ? file_input_fd : dfd_iter->fd; const char *xattr_path_arg = (file_input_fd != -1) ? NULL : name; if (!get_final_xattrs (self, modifier, child_relpath, child_info, child, xattr_fd_arg, xattr_path_arg, source_xattrs, &xattrs, &xattrs_were_modified, cancellable, error)) return FALSE; } /* Used below to see whether we can do a fast path commit */ const gboolean modified_file_meta = child_info_was_modified || xattrs_were_modified; /* A big prerequisite list of conditions for whether or not we can * "adopt", i.e. just checksum and rename() into place */ const gboolean can_adopt_basic = file_type == G_FILE_TYPE_REGULAR && dfd_iter != NULL && delete_after_commit && ((writeflags & WRITE_DIR_CONTENT_FLAGS_CAN_ADOPT) > 0); gboolean can_adopt = can_adopt_basic; /* If basic prerquisites are met, check repo mode specific ones */ if (can_adopt) { /* For bare repos, we could actually chown/reset the xattrs, but let's * do the basic optimizations here first. */ if (self->mode == OSTREE_REPO_MODE_BARE) can_adopt = !modified_file_meta; else if (self->mode == OSTREE_REPO_MODE_BARE_USER_ONLY) can_adopt = canonical_permissions; else /* This covers bare-user and archive. See comments in adopt_and_commit_regfile() * for notes on adding bare-user later here. */ can_adopt = FALSE; } gboolean did_adopt = FALSE; /* The very fast path - we have a devino cache hit, nothing to write */ if (loose_checksum && !modified_file_meta) { if (!ostree_mutable_tree_replace_file (mtree, name, loose_checksum, error)) return FALSE; g_mutex_lock (&self->txn_lock); self->txn.stats.devino_cache_hits++; g_mutex_unlock (&self->txn_lock); } /* Next fast path - we can "adopt" the file */ else if (can_adopt) { char checksum[OSTREE_SHA256_STRING_LEN+1]; if (!adopt_and_commit_regfile (self, dfd_iter->fd, name, modified_info, xattrs, checksum, cancellable, error)) return FALSE; if (!ostree_mutable_tree_replace_file (mtree, name, checksum, error)) return FALSE; did_adopt = TRUE; } else { g_autoptr(GInputStream) file_input = NULL; if (file_type == G_FILE_TYPE_REGULAR) { if (dir_enum != NULL) { g_assert (child != NULL); file_input = (GInputStream*)g_file_read (child, cancellable, error); if (!file_input) return FALSE; } else { /* We already opened the fd above */ file_input = g_unix_input_stream_new (file_input_fd, FALSE); } } g_autofree guchar *child_file_csum = NULL; if (!write_content_object (self, NULL, file_input, modified_info, xattrs, &child_file_csum, cancellable, error)) return FALSE; char tmp_checksum[OSTREE_SHA256_STRING_LEN+1]; ostree_checksum_inplace_from_bytes (child_file_csum, tmp_checksum); if (!ostree_mutable_tree_replace_file (mtree, name, tmp_checksum, error)) return FALSE; } /* Process delete_after_commit. In the adoption case though, we already * took ownership of the file above, usually via a renameat(). */ if (delete_after_commit && !did_adopt) { if (!glnx_unlinkat (dfd_iter->fd, name, 0, error)) return FALSE; } g_ptr_array_remove_index (path, path->len - 1); return TRUE; } /* Handles the dirmeta for the given GFile dir and then calls * write_{dir_entry,content}_to_mtree_internal() for each directory entry. */ static gboolean write_directory_to_mtree_internal (OstreeRepo *self, GFile *dir, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error) { OstreeRepoCommitFilterResult filter_result; OstreeRepoFile *repo_dir = NULL; if (dir) g_debug ("Examining: %s", gs_file_get_path_cached (dir)); /* If the directory is already in the repository, we can try to * reuse checksums to skip checksumming. */ if (dir && OSTREE_IS_REPO_FILE (dir) && modifier == NULL) repo_dir = (OstreeRepoFile *) dir; if (repo_dir) { if (!ostree_repo_file_ensure_resolved (repo_dir, error)) return FALSE; /* ostree_mutable_tree_fill_from_dirtree returns FALSE if mtree isn't * empty: in which case we're responsible for merging the trees. */ if (ostree_mutable_tree_fill_empty_from_dirtree (mtree, ostree_repo_file_get_repo (repo_dir), ostree_repo_file_tree_get_contents_checksum (repo_dir), ostree_repo_file_get_checksum (repo_dir))) return TRUE; ostree_mutable_tree_set_metadata_checksum (mtree, ostree_repo_file_tree_get_metadata_checksum (repo_dir)); filter_result = OSTREE_REPO_COMMIT_FILTER_ALLOW; } else { g_autoptr(GVariant) xattrs = NULL; g_autoptr(GFileInfo) child_info = g_file_query_info (dir, OSTREE_GIO_FAST_QUERYINFO, G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, cancellable, error); if (!child_info) return FALSE; g_autofree char *relpath = NULL; if (modifier != NULL) relpath = ptrarray_path_join (path); g_autoptr(GFileInfo) modified_info = NULL; filter_result = _ostree_repo_commit_modifier_apply (self, modifier, relpath, child_info, &modified_info); if (filter_result == OSTREE_REPO_COMMIT_FILTER_ALLOW) { if (!get_final_xattrs (self, modifier, relpath, child_info, dir, -1, NULL, NULL, &xattrs, NULL, cancellable, error)) return FALSE; g_autofree guchar *child_file_csum = NULL; if (!_ostree_repo_write_directory_meta (self, modified_info, xattrs, &child_file_csum, cancellable, error)) return FALSE; g_autofree char *tmp_checksum = ostree_checksum_from_bytes (child_file_csum); ostree_mutable_tree_set_metadata_checksum (mtree, tmp_checksum); } } if (filter_result == OSTREE_REPO_COMMIT_FILTER_ALLOW) { g_autoptr(GFileEnumerator) dir_enum = NULL; dir_enum = g_file_enumerate_children ((GFile*)dir, OSTREE_GIO_FAST_QUERYINFO, G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, cancellable, error); if (!dir_enum) return FALSE; while (TRUE) { GFileInfo *child_info; if (!g_file_enumerator_iterate (dir_enum, &child_info, NULL, cancellable, error)) return FALSE; if (child_info == NULL) break; if (g_file_info_get_file_type (child_info) == G_FILE_TYPE_DIRECTORY) { if (!write_dir_entry_to_mtree_internal (self, repo_dir, dir_enum, NULL, WRITE_DIR_CONTENT_FLAGS_NONE, child_info, mtree, modifier, path, cancellable, error)) return FALSE; } else { if (!write_content_to_mtree_internal (self, repo_dir, dir_enum, NULL, WRITE_DIR_CONTENT_FLAGS_NONE, child_info, mtree, modifier, path, cancellable, error)) return FALSE; } } } return TRUE; } /* Handles the dirmeta for the dir described by src_dfd_iter and then calls * write_{dir_entry,content}_to_mtree_internal() for each directory entry. */ static gboolean write_dfd_iter_to_mtree_internal (OstreeRepo *self, GLnxDirFdIterator *src_dfd_iter, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GPtrArray *path, GCancellable *cancellable, GError **error) { g_autoptr(GFileInfo) child_info = NULL; g_autoptr(GFileInfo) modified_info = NULL; g_autoptr(GVariant) xattrs = NULL; g_autofree guchar *child_file_csum = NULL; g_autofree char *tmp_checksum = NULL; g_autofree char *relpath = NULL; OstreeRepoCommitFilterResult filter_result; struct stat dir_stbuf; if (!glnx_fstat (src_dfd_iter->fd, &dir_stbuf, error)) return FALSE; child_info = _ostree_stbuf_to_gfileinfo (&dir_stbuf); if (modifier != NULL) { relpath = ptrarray_path_join (path); filter_result = _ostree_repo_commit_modifier_apply (self, modifier, relpath, child_info, &modified_info); } else { filter_result = OSTREE_REPO_COMMIT_FILTER_ALLOW; modified_info = g_object_ref (child_info); } if (filter_result == OSTREE_REPO_COMMIT_FILTER_ALLOW) { if (!get_final_xattrs (self, modifier, relpath, modified_info, NULL, src_dfd_iter->fd, NULL, NULL, &xattrs, NULL, cancellable, error)) return FALSE; if (!_ostree_repo_write_directory_meta (self, modified_info, xattrs, &child_file_csum, cancellable, error)) return FALSE; g_free (tmp_checksum); tmp_checksum = ostree_checksum_from_bytes (child_file_csum); ostree_mutable_tree_set_metadata_checksum (mtree, tmp_checksum); } if (filter_result != OSTREE_REPO_COMMIT_FILTER_ALLOW) { /* Note - early return */ return TRUE; } /* See if this dir is on the same device; if so we can adopt (if enabled) */ WriteDirContentFlags flags = 0; if (dir_stbuf.st_dev == self->device) flags |= WRITE_DIR_CONTENT_FLAGS_CAN_ADOPT; while (TRUE) { struct dirent *dent; if (!glnx_dirfd_iterator_next_dent (src_dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; struct stat stbuf; if (!glnx_fstatat (src_dfd_iter->fd, dent->d_name, &stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; g_autoptr(GFileInfo) child_info = _ostree_stbuf_to_gfileinfo (&stbuf); g_file_info_set_name (child_info, dent->d_name); if (S_ISDIR (stbuf.st_mode)) { if (!write_dir_entry_to_mtree_internal (self, NULL, NULL, src_dfd_iter, flags, child_info, mtree, modifier, path, cancellable, error)) return FALSE; /* We handled the dir, move onto the next */ continue; } if (S_ISREG (stbuf.st_mode)) ; else if (S_ISLNK (stbuf.st_mode)) { if (!ot_readlinkat_gfile_info (src_dfd_iter->fd, dent->d_name, child_info, cancellable, error)) return FALSE; } else { return glnx_throw (error, "Not a regular file or symlink: %s", dent->d_name); } /* Write a content object, we handled directories above */ if (!write_content_to_mtree_internal (self, NULL, NULL, src_dfd_iter, flags, child_info, mtree, modifier, path, cancellable, error)) return FALSE; } return TRUE; } /** * ostree_repo_write_directory_to_mtree: * @self: Repo * @dir: Path to a directory * @mtree: Overlay directory contents into this tree * @modifier: (allow-none): Optional modifier * @cancellable: Cancellable * @error: Error * * Store objects for @dir and all children into the repository @self, * overlaying the resulting filesystem hierarchy into @mtree. */ gboolean ostree_repo_write_directory_to_mtree (OstreeRepo *self, GFile *dir, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GCancellable *cancellable, GError **error) { /* Short cut local files */ if (g_file_is_native (dir)) { if (!ostree_repo_write_dfd_to_mtree (self, AT_FDCWD, gs_file_get_path_cached (dir), mtree, modifier, cancellable, error)) return FALSE; } else { if (modifier && modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) self->generate_sizes = TRUE; g_autoptr(GPtrArray) path = g_ptr_array_new (); if (!write_directory_to_mtree_internal (self, dir, mtree, modifier, path, cancellable, error)) return FALSE; } return TRUE; } /** * ostree_repo_write_dfd_to_mtree: * @self: Repo * @dfd: Directory file descriptor * @path: Path * @mtree: Overlay directory contents into this tree * @modifier: (allow-none): Optional modifier * @cancellable: Cancellable * @error: Error * * Store as objects all contents of the directory referred to by @dfd * and @path all children into the repository @self, overlaying the * resulting filesystem hierarchy into @mtree. */ gboolean ostree_repo_write_dfd_to_mtree (OstreeRepo *self, int dfd, const char *path, OstreeMutableTree *mtree, OstreeRepoCommitModifier *modifier, GCancellable *cancellable, GError **error) { if (modifier && modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) self->generate_sizes = TRUE; g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; if (!glnx_dirfd_iterator_init_at (dfd, path, FALSE, &dfd_iter, error)) return FALSE; g_autoptr(GPtrArray) pathbuilder = g_ptr_array_new (); if (!write_dfd_iter_to_mtree_internal (self, &dfd_iter, mtree, modifier, pathbuilder, cancellable, error)) return FALSE; /* And now finally remove the toplevel; see also the handling for this flag in * the write_dfd_iter_to_mtree_internal() function. As a special case we don't * try to remove `.` (since we'd get EINVAL); that's what's used in * rpm-ostree. */ const gboolean delete_after_commit = modifier && (modifier->flags & OSTREE_REPO_COMMIT_MODIFIER_FLAGS_CONSUME); if (delete_after_commit && !g_str_equal (path, ".")) { if (!glnx_unlinkat (dfd, path, AT_REMOVEDIR, error)) return FALSE; } return TRUE; } /** * ostree_repo_write_mtree: * @self: Repo * @mtree: Mutable tree * @out_file: (out): An #OstreeRepoFile representing @mtree's root. * @cancellable: Cancellable * @error: Error * * Write all metadata objects for @mtree to repo; the resulting * @out_file points to the %OSTREE_OBJECT_TYPE_DIR_TREE object that * the @mtree represented. */ gboolean ostree_repo_write_mtree (OstreeRepo *self, OstreeMutableTree *mtree, GFile **out_file, GCancellable *cancellable, GError **error) { const char *contents_checksum, *metadata_checksum; g_autoptr(GFile) ret_file = NULL; if (!ostree_mutable_tree_check_error (mtree, error)) return glnx_prefix_error (error, "mtree"); metadata_checksum = ostree_mutable_tree_get_metadata_checksum (mtree); if (!metadata_checksum) return glnx_throw (error, "Can't commit an empty tree"); contents_checksum = ostree_mutable_tree_get_contents_checksum (mtree); if (contents_checksum) { ret_file = G_FILE (_ostree_repo_file_new_root (self, contents_checksum, metadata_checksum)); } else { g_autoptr(GHashTable) dir_metadata_checksums = NULL; g_autoptr(GHashTable) dir_contents_checksums = NULL; g_autoptr(GVariant) serialized_tree = NULL; g_autofree guchar *contents_csum = NULL; char contents_checksum_buf[OSTREE_SHA256_STRING_LEN+1]; dir_contents_checksums = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)g_free); dir_metadata_checksums = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)g_free); GLNX_HASH_TABLE_FOREACH_KV (ostree_mutable_tree_get_subdirs (mtree), const char*, name, OstreeMutableTree*, child_dir) { g_autoptr(GFile) child_file = NULL; if (!ostree_repo_write_mtree (self, child_dir, &child_file, cancellable, error)) return FALSE; g_hash_table_replace (dir_contents_checksums, g_strdup (name), g_strdup (ostree_repo_file_tree_get_contents_checksum (OSTREE_REPO_FILE (child_file)))); g_hash_table_replace (dir_metadata_checksums, g_strdup (name), g_strdup (ostree_repo_file_tree_get_metadata_checksum (OSTREE_REPO_FILE (child_file)))); } serialized_tree = create_tree_variant_from_hashes (ostree_mutable_tree_get_files (mtree), dir_contents_checksums, dir_metadata_checksums); if (!ostree_repo_write_metadata (self, OSTREE_OBJECT_TYPE_DIR_TREE, NULL, serialized_tree, &contents_csum, cancellable, error)) return FALSE; ostree_checksum_inplace_from_bytes (contents_csum, contents_checksum_buf); ostree_mutable_tree_set_contents_checksum (mtree, contents_checksum_buf); ret_file = G_FILE (_ostree_repo_file_new_root (self, contents_checksum_buf, metadata_checksum)); } if (out_file) *out_file = g_steal_pointer (&ret_file); return TRUE; } /** * ostree_repo_commit_modifier_new: * @flags: Control options for filter * @commit_filter: (allow-none): Function that can inspect individual files * @user_data: (allow-none): User data * @destroy_notify: A #GDestroyNotify * * Returns: (transfer full): A new commit modifier. */ OstreeRepoCommitModifier * ostree_repo_commit_modifier_new (OstreeRepoCommitModifierFlags flags, OstreeRepoCommitFilter commit_filter, gpointer user_data, GDestroyNotify destroy_notify) { OstreeRepoCommitModifier *modifier = g_new0 (OstreeRepoCommitModifier, 1); modifier->refcount = 1; modifier->flags = flags; modifier->filter = commit_filter; modifier->user_data = user_data; modifier->destroy_notify = destroy_notify; return modifier; } OstreeRepoCommitModifier * ostree_repo_commit_modifier_ref (OstreeRepoCommitModifier *modifier) { gint refcount = g_atomic_int_add (&modifier->refcount, 1); g_assert (refcount > 0); return modifier; } void ostree_repo_commit_modifier_unref (OstreeRepoCommitModifier *modifier) { if (!modifier) return; if (!g_atomic_int_dec_and_test (&modifier->refcount)) return; if (modifier->destroy_notify) modifier->destroy_notify (modifier->user_data); if (modifier->xattr_destroy) modifier->xattr_destroy (modifier->xattr_user_data); g_clear_object (&modifier->sepolicy); g_clear_pointer (&modifier->devino_cache, (GDestroyNotify)g_hash_table_unref); g_free (modifier); return; } /** * ostree_repo_commit_modifier_set_xattr_callback: * @modifier: An #OstreeRepoCommitModifier * @callback: Function to be invoked, should return extended attributes for path * @destroy: Destroy notification * @user_data: Data for @callback: * * If set, this function should return extended attributes to use for * the given path. This is useful for things like ACLs and SELinux, * where a build system can label the files as it's committing to the * repository. */ void ostree_repo_commit_modifier_set_xattr_callback (OstreeRepoCommitModifier *modifier, OstreeRepoCommitModifierXattrCallback callback, GDestroyNotify destroy, gpointer user_data) { modifier->xattr_callback = callback; modifier->xattr_destroy = destroy; modifier->xattr_user_data = user_data; } /** * ostree_repo_commit_modifier_set_sepolicy: * @modifier: An #OstreeRepoCommitModifier * @sepolicy: (allow-none): Policy to use for labeling * * If @policy is non-%NULL, use it to look up labels to use for * "security.selinux" extended attributes. * * Note that any policy specified this way operates in addition to any * extended attributes provided via * ostree_repo_commit_modifier_set_xattr_callback(). However if both * specify a value for "security.selinux", then the one from the * policy wins. */ void ostree_repo_commit_modifier_set_sepolicy (OstreeRepoCommitModifier *modifier, OstreeSePolicy *sepolicy) { g_clear_object (&modifier->sepolicy); modifier->sepolicy = sepolicy ? g_object_ref (sepolicy) : NULL; } /** * ostree_repo_commit_modifier_set_devino_cache: * @modifier: Modifier * @cache: A hash table caching device,inode to checksums * * See the documentation for * `ostree_repo_devino_cache_new()`. This function can * then be used for later calls to * `ostree_repo_write_directory_to_mtree()` to optimize commits. * * Note if your process has multiple writers, you should use separate * `OSTreeRepo` instances if you want to also use this API. * * This function will add a reference to @cache without copying - you * should avoid further mutation of the cache. * * Since: 2017.13 */ void ostree_repo_commit_modifier_set_devino_cache (OstreeRepoCommitModifier *modifier, OstreeRepoDevInoCache *cache) { modifier->devino_cache = g_hash_table_ref ((GHashTable*)cache); } OstreeRepoDevInoCache * ostree_repo_devino_cache_ref (OstreeRepoDevInoCache *cache) { g_hash_table_ref ((GHashTable*)cache); return cache; } void ostree_repo_devino_cache_unref (OstreeRepoDevInoCache *cache) { g_hash_table_unref ((GHashTable*)cache); } G_DEFINE_BOXED_TYPE(OstreeRepoDevInoCache, ostree_repo_devino_cache, ostree_repo_devino_cache_ref, ostree_repo_devino_cache_unref); G_DEFINE_BOXED_TYPE(OstreeRepoCommitModifier, ostree_repo_commit_modifier, ostree_repo_commit_modifier_ref, ostree_repo_commit_modifier_unref); /* Special case between bare-user and bare-user-only, * mostly for https://github.com/flatpak/flatpak/issues/845 * see below for any more comments. */ static gboolean import_is_bareuser_only_conversion (OstreeRepo *src_repo, OstreeRepo *dest_repo, OstreeObjectType objtype) { return src_repo->mode == OSTREE_REPO_MODE_BARE_USER && dest_repo->mode == OSTREE_REPO_MODE_BARE_USER_ONLY && objtype == OSTREE_OBJECT_TYPE_FILE; } /* Returns TRUE if we can potentially just call link() to copy an object; * if untrusted the repos must be owned by the same uid. */ static gboolean import_via_reflink_is_possible (OstreeRepo *src_repo, OstreeRepo *dest_repo, OstreeObjectType objtype, gboolean trusted) { /* Untrusted pulls require matching ownership */ if (!trusted && (src_repo->owner_uid != dest_repo->owner_uid)) return FALSE; /* Equal modes are always compatible, and metadata * is identical between all modes. */ if (src_repo->mode == dest_repo->mode || OSTREE_OBJECT_TYPE_IS_META (objtype)) return TRUE; /* And now a special case between bare-user and bare-user-only, * mostly for https://github.com/flatpak/flatpak/issues/845 */ if (import_is_bareuser_only_conversion (src_repo, dest_repo, objtype)) return TRUE; return FALSE; } /* Copy the detached metadata for commit @checksum from @source repo * to @self. */ static gboolean copy_detached_metadata (OstreeRepo *self, OstreeRepo *source, const char *checksum, GCancellable *cancellable, GError **error) { g_autoptr(GVariant) detached_meta = NULL; if (!ostree_repo_read_commit_detached_metadata (source, checksum, &detached_meta, cancellable, error)) return FALSE; if (detached_meta) { if (!ostree_repo_write_commit_detached_metadata (self, checksum, detached_meta, cancellable, error)) return FALSE; } return TRUE; } /* Try to import an object via reflink or just linkat(); returns a value in * @out_was_supported if we were able to do it or not. In this path * we're not verifying the checksum. */ static gboolean import_one_object_direct (OstreeRepo *dest_repo, OstreeRepo *src_repo, const char *checksum, OstreeObjectType objtype, gboolean *out_was_supported, GCancellable *cancellable, GError **error) { const char *errprefix = glnx_strjoina ("Importing ", checksum, ".", ostree_object_type_to_string (objtype)); GLNX_AUTO_PREFIX_ERROR (errprefix, error); char loose_path_buf[_OSTREE_LOOSE_PATH_MAX]; _ostree_loose_path (loose_path_buf, checksum, objtype, dest_repo->mode); /* hardlinks require the owner to match and to be on the same device */ const gboolean can_hardlink = src_repo->owner_uid == dest_repo->owner_uid && src_repo->device == dest_repo->device; /* Find our target dfd */ int dest_dfd; if (dest_repo->commit_stagedir.initialized) dest_dfd = dest_repo->commit_stagedir.fd; else dest_dfd = dest_repo->objects_dir_fd; if (!_ostree_repo_ensure_loose_objdir_at (dest_dfd, loose_path_buf, cancellable, error)) return FALSE; gboolean did_hardlink = FALSE; if (can_hardlink) { if (linkat (src_repo->objects_dir_fd, loose_path_buf, dest_dfd, loose_path_buf, 0) != 0) { if (errno == EEXIST) did_hardlink = TRUE; else if (errno == EMLINK || errno == EXDEV || errno == EPERM) { /* EMLINK, EXDEV and EPERM shouldn't be fatal; we just can't do * the optimization of hardlinking instead of copying. Fall * through below. */ } else return glnx_throw_errno_prefix (error, "linkat"); } else did_hardlink = TRUE; } /* If we weren't able to hardlink, fall back to a copy (which might be * reflinked). */ if (!did_hardlink) { struct stat stbuf; if (!glnx_fstatat (src_repo->objects_dir_fd, loose_path_buf, &stbuf, AT_SYMLINK_NOFOLLOW, error)) return FALSE; /* Let's punt for symlinks right now, it's more complicated */ if (!S_ISREG (stbuf.st_mode)) { *out_was_supported = FALSE; return TRUE; } /* This is yet another variation of glnx_file_copy_at() * that basically just optionally does chown(). Perhaps * in the future we should add flags for those things? */ glnx_autofd int src_fd = -1; if (!glnx_openat_rdonly (src_repo->objects_dir_fd, loose_path_buf, FALSE, &src_fd, error)) return FALSE; /* Open a tmpfile for dest */ g_auto(GLnxTmpfile) tmp_dest = { 0, }; if (!glnx_open_tmpfile_linkable_at (dest_dfd, ".", O_WRONLY | O_CLOEXEC, &tmp_dest, error)) return FALSE; if (glnx_regfile_copy_bytes (src_fd, tmp_dest.fd, (off_t) -1) < 0) return glnx_throw_errno_prefix (error, "regfile copy"); /* Only chown for true bare repos */ if (dest_repo->mode == OSTREE_REPO_MODE_BARE) { if (fchown (tmp_dest.fd, stbuf.st_uid, stbuf.st_gid) != 0) return glnx_throw_errno_prefix (error, "fchown"); } /* Don't want to copy xattrs for archive repos, nor for * bare-user-only. We also only do this for content * objects. */ const gboolean src_is_bare_or_bare_user = G_IN_SET (src_repo->mode, OSTREE_REPO_MODE_BARE, OSTREE_REPO_MODE_BARE_USER); if (src_is_bare_or_bare_user && !OSTREE_OBJECT_TYPE_IS_META(objtype)) { if (src_repo == OSTREE_REPO_MODE_BARE) { g_autoptr(GVariant) xattrs = NULL; if (!glnx_fd_get_all_xattrs (src_fd, &xattrs, cancellable, error)) return FALSE; if (!glnx_fd_set_all_xattrs (tmp_dest.fd, xattrs, cancellable, error)) return FALSE; } else { /* bare-user; we just want ostree.usermeta */ g_autoptr(GBytes) bytes = glnx_fgetxattr_bytes (src_fd, "user.ostreemeta", error); if (bytes == NULL) return FALSE; if (TEMP_FAILURE_RETRY (fsetxattr (src_fd, "user.ostreemeta", (char*)g_bytes_get_data (bytes, NULL), g_bytes_get_size (bytes), 0)) != 0) return glnx_throw_errno_prefix (error, "fsetxattr"); } } if (fchmod (tmp_dest.fd, stbuf.st_mode & ~S_IFMT) != 0) return glnx_throw_errno_prefix (error, "fchmod"); /* For archive repos, we just let the timestamps be object creation. * Otherwise, copy the ostree timestamp value. */ if (_ostree_repo_mode_is_bare (dest_repo->mode)) { struct timespec ts[2]; ts[0] = stbuf.st_atim; ts[1] = stbuf.st_mtim; (void) futimens (tmp_dest.fd, ts); } if (!_ostree_repo_commit_tmpf_final (dest_repo, checksum, objtype, &tmp_dest, cancellable, error)) return FALSE; } if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { if (!copy_detached_metadata (dest_repo, src_repo, checksum, cancellable, error)) return FALSE; } else if (objtype == OSTREE_OBJECT_TYPE_FILE) { if (!_import_payload_link (dest_repo, src_repo, checksum, cancellable, error)) return FALSE; } *out_was_supported = TRUE; return TRUE; } /* A version of ostree_repo_import_object_from_with_trust() * with flags; may make this public API later. */ gboolean _ostree_repo_import_object (OstreeRepo *self, OstreeRepo *source, OstreeObjectType objtype, const char *checksum, OstreeRepoImportFlags flags, GCancellable *cancellable, GError **error) { const gboolean trusted = (flags & _OSTREE_REPO_IMPORT_FLAGS_TRUSTED) > 0; /* Implements OSTREE_REPO_PULL_FLAGS_BAREUSERONLY_FILES which was designed for flatpak */ const gboolean verify_bareuseronly = (flags & _OSTREE_REPO_IMPORT_FLAGS_VERIFY_BAREUSERONLY) > 0; /* A special case between bare-user and bare-user-only, * mostly for https://github.com/flatpak/flatpak/issues/845 */ const gboolean is_bareuseronly_conversion = import_is_bareuser_only_conversion (source, self, objtype); gboolean try_direct = TRUE; /* If we need to do bareuseronly verification, or we're potentially doing a * bareuseronly conversion, let's verify those first so we don't complicate * the rest of the code below. */ if ((verify_bareuseronly || is_bareuseronly_conversion) && !OSTREE_OBJECT_TYPE_IS_META (objtype)) { g_autoptr(GFileInfo) src_finfo = NULL; if (!ostree_repo_load_file (source, checksum, NULL, &src_finfo, NULL, cancellable, error)) return FALSE; if (verify_bareuseronly) { if (!_ostree_validate_bareuseronly_mode_finfo (src_finfo, checksum, error)) return FALSE; } if (is_bareuseronly_conversion) { switch (g_file_info_get_file_type (src_finfo)) { case G_FILE_TYPE_REGULAR: /* This is OK, we'll try a hardlink */ break; case G_FILE_TYPE_SYMBOLIC_LINK: /* Symlinks in bare-user are regular files, we can't * hardlink them to another repo mode. */ try_direct = FALSE; break; default: g_assert_not_reached (); break; } } } /* First, let's see if we can import via reflink/hardlink. */ if (try_direct && import_via_reflink_is_possible (source, self, objtype, trusted)) { /* For local repositories, if the untrusted flag is set, we verify the * checksum first. This assumes then that the files are immutable - the * above check verified that the owner uids match. */ if (!trusted) { if (!ostree_repo_fsck_object (source, objtype, checksum, cancellable, error)) return FALSE; } gboolean direct_was_supported = FALSE; if (!import_one_object_direct (self, source, checksum, objtype, &direct_was_supported, cancellable, error)) return FALSE; /* If direct import succeeded, we're done! */ if (direct_was_supported) return TRUE; } /* The more expensive copy path; involves parsing the object. For * example the input might be an archive repo and the destination bare, * or vice versa. Or we may simply need to verify the checksum. */ /* First, do we have the object already? */ gboolean has_object; if (!ostree_repo_has_object (self, objtype, checksum, &has_object, cancellable, error)) return FALSE; /* If we have it, we're done */ if (has_object) { if (objtype == OSTREE_OBJECT_TYPE_FILE) { if (!_import_payload_link (self, source, checksum, cancellable, error)) return FALSE; } return TRUE; } if (OSTREE_OBJECT_TYPE_IS_META (objtype)) { /* Metadata object */ g_autoptr(GVariant) variant = NULL; if (objtype == OSTREE_OBJECT_TYPE_COMMIT) { /* FIXME - cleanup detached metadata if copy below fails */ if (!copy_detached_metadata (self, source, checksum, cancellable, error)) return FALSE; } if (!ostree_repo_load_variant (source, objtype, checksum, &variant, error)) return FALSE; /* Note this one also now verifies structure in the !trusted case */ g_autofree guchar *real_csum = NULL; if (!ostree_repo_write_metadata (self, objtype, checksum, variant, trusted ? NULL : &real_csum, cancellable, error)) return FALSE; } else { /* Content object */ guint64 length; g_autoptr(GInputStream) object_stream = NULL; if (!ostree_repo_load_object_stream (source, objtype, checksum, &object_stream, &length, cancellable, error)) return FALSE; g_autofree guchar *real_csum = NULL; if (!ostree_repo_write_content (self, checksum, object_stream, length, trusted ? NULL : &real_csum, cancellable, error)) return FALSE; } return TRUE; } static OstreeRepoTransactionStats * ostree_repo_transaction_stats_copy (OstreeRepoTransactionStats *stats) { return g_memdup (stats, sizeof (OstreeRepoTransactionStats)); } static void ostree_repo_transaction_stats_free (OstreeRepoTransactionStats *stats) { return g_free (stats); } G_DEFINE_BOXED_TYPE(OstreeRepoTransactionStats, ostree_repo_transaction_stats, ostree_repo_transaction_stats_copy, ostree_repo_transaction_stats_free);
1
16,371
Ouch! :man_facepalming: And of course, this worked for me because that evaluated to false when I was testing the bare-user path.
ostreedev-ostree
c
@@ -380,7 +380,10 @@ func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Res if err != nil { return } - resp.Status = statusParts[1] + if (len(statusParts) > 0) { + resp.Status = statusParts[1] + } + } else { resp.StatusCode = http.StatusOK }
1
// Forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client // (which is forked from https://code.google.com/p/go-fastcgi-client/) // This fork contains several fixes and improvements by Matt Holt and // other contributors to this project. // Copyright 2012 Junqing Tan <[email protected]> and The Go Authors // Use of this source code is governed by a BSD-style // Part of source code is from Go fcgi package package fastcgi import ( "bufio" "bytes" "encoding/binary" "errors" "io" "io/ioutil" "mime/multipart" "net" "net/http" "net/http/httputil" "net/textproto" "net/url" "os" "path/filepath" "strconv" "strings" "sync" ) const FCGI_LISTENSOCK_FILENO uint8 = 0 const FCGI_HEADER_LEN uint8 = 8 const VERSION_1 uint8 = 1 const FCGI_NULL_REQUEST_ID uint8 = 0 const FCGI_KEEP_CONN uint8 = 1 const doubleCRLF = "\r\n\r\n" const ( FCGI_BEGIN_REQUEST uint8 = iota + 1 FCGI_ABORT_REQUEST FCGI_END_REQUEST FCGI_PARAMS FCGI_STDIN FCGI_STDOUT FCGI_STDERR FCGI_DATA FCGI_GET_VALUES FCGI_GET_VALUES_RESULT FCGI_UNKNOWN_TYPE FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE ) const ( FCGI_RESPONDER uint8 = iota + 1 FCGI_AUTHORIZER FCGI_FILTER ) const ( FCGI_REQUEST_COMPLETE uint8 = iota FCGI_CANT_MPX_CONN FCGI_OVERLOADED FCGI_UNKNOWN_ROLE ) const ( FCGI_MAX_CONNS string = "MAX_CONNS" FCGI_MAX_REQS string = "MAX_REQS" FCGI_MPXS_CONNS string = "MPXS_CONNS" ) const ( maxWrite = 65500 // 65530 may work, but for compatibility maxPad = 255 ) type header struct { Version uint8 Type uint8 Id uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 } // for padding so we don't have to allocate all the time // not synchronized because we don't care what the contents are var pad [maxPad]byte func (h *header) init(recType uint8, reqID uint16, contentLength int) { h.Version = 1 h.Type = recType h.Id = reqID h.ContentLength = uint16(contentLength) h.PaddingLength = uint8(-contentLength & 7) } type record struct { h header rbuf []byte } func (rec *record) read(r io.Reader) (buf []byte, err error) { if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { return } if rec.h.Version != 1 { err = errors.New("fcgi: invalid header version") return } if rec.h.Type == FCGI_END_REQUEST { err = io.EOF return } n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) if len(rec.rbuf) < n { rec.rbuf = make([]byte, n) } if n, err = io.ReadFull(r, rec.rbuf[:n]); err != nil { return } buf = rec.rbuf[:int(rec.h.ContentLength)] return } type FCGIClient struct { mutex sync.Mutex rwc io.ReadWriteCloser h header buf bytes.Buffer keepAlive bool reqId uint16 } // Dial connects to the fcgi responder at the specified network address. // See func net.Dial for a description of the network and address parameters. func Dial(network, address string) (fcgi *FCGIClient, err error) { var conn net.Conn conn, err = net.Dial(network, address) if err != nil { return } fcgi = &FCGIClient{ rwc: conn, keepAlive: false, reqId: 1, } return } // Close closes fcgi connnection func (c *FCGIClient) Close() { c.rwc.Close() } func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) { c.mutex.Lock() defer c.mutex.Unlock() c.buf.Reset() c.h.init(recType, c.reqId, len(content)) if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } if _, err := c.buf.Write(content); err != nil { return err } if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { return err } _, err = c.rwc.Write(c.buf.Bytes()) return err } func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} return c.writeRecord(FCGI_BEGIN_REQUEST, b[:]) } func (c *FCGIClient) writeEndRequest(appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus return c.writeRecord(FCGI_END_REQUEST, b) } func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error { w := newWriter(c, recType) b := make([]byte, 8) nn := 0 for k, v := range pairs { m := 8 + len(k) + len(v) if m > maxWrite { // param data size exceed 65535 bytes" vl := maxWrite - 8 - len(k) v = v[:vl] } n := encodeSize(b, uint32(len(k))) n += encodeSize(b[n:], uint32(len(v))) m = n + len(k) + len(v) if (nn + m) > maxWrite { w.Flush() nn = 0 } nn += m if _, err := w.Write(b[:n]); err != nil { return err } if _, err := w.WriteString(k); err != nil { return err } if _, err := w.WriteString(v); err != nil { return err } } w.Close() return nil } func readSize(s []byte) (uint32, int) { if len(s) == 0 { return 0, 0 } size, n := uint32(s[0]), 1 if size&(1<<7) != 0 { if len(s) < 4 { return 0, 0 } n = 4 size = binary.BigEndian.Uint32(s) size &^= 1 << 31 } return size, n } func readString(s []byte, size uint32) string { if size > uint32(len(s)) { return "" } return string(s[:size]) } func encodeSize(b []byte, size uint32) int { if size > 127 { size |= 1 << 31 binary.BigEndian.PutUint32(b, size) return 4 } b[0] = byte(size) return 1 } // bufWriter encapsulates bufio.Writer but also closes the underlying stream when // Closed. type bufWriter struct { closer io.Closer *bufio.Writer } func (w *bufWriter) Close() error { if err := w.Writer.Flush(); err != nil { w.closer.Close() return err } return w.closer.Close() } func newWriter(c *FCGIClient, recType uint8) *bufWriter { s := &streamWriter{c: c, recType: recType} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} } // streamWriter abstracts out the separation of a stream into discrete records. // It only writes maxWrite bytes at a time. type streamWriter struct { c *FCGIClient recType uint8 } func (w *streamWriter) Write(p []byte) (int, error) { nn := 0 for len(p) > 0 { n := len(p) if n > maxWrite { n = maxWrite } if err := w.c.writeRecord(w.recType, p[:n]); err != nil { return nn, err } nn += n p = p[n:] } return nn, nil } func (w *streamWriter) Close() error { // send empty record to close the stream return w.c.writeRecord(w.recType, nil) } type streamReader struct { c *FCGIClient buf []byte } func (w *streamReader) Read(p []byte) (n int, err error) { if len(p) > 0 { if len(w.buf) == 0 { rec := &record{} w.buf, err = rec.read(w.c.rwc) if err != nil { return } } n = len(p) if n > len(w.buf) { n = len(w.buf) } copy(p, w.buf[:n]) w.buf = w.buf[n:] } return } // Do made the request and returns a io.Reader that translates the data read // from fcgi responder out of fcgi packet before returning it. func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) { err = c.writeBeginRequest(uint16(FCGI_RESPONDER), 0) if err != nil { return } err = c.writePairs(FCGI_PARAMS, p) if err != nil { return } body := newWriter(c, FCGI_STDIN) if req != nil { io.Copy(body, req) } body.Close() r = &streamReader{c: c} return } // Request returns a HTTP Response with Header and Body // from fcgi responder func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { r, err := c.Do(p, req) if err != nil { return } rb := bufio.NewReader(r) tp := textproto.NewReader(rb) resp = new(http.Response) // Parse the response headers. mimeHeader, err := tp.ReadMIMEHeader() if err != nil && err != io.EOF { return } resp.Header = http.Header(mimeHeader) if resp.Header.Get("Status") != "" { statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2) resp.StatusCode, err = strconv.Atoi(statusParts[0]) if err != nil { return } resp.Status = statusParts[1] } else { resp.StatusCode = http.StatusOK } // TODO: fixTransferEncoding ? resp.TransferEncoding = resp.Header["Transfer-Encoding"] resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if chunked(resp.TransferEncoding) { resp.Body = ioutil.NopCloser(httputil.NewChunkedReader(rb)) } else { resp.Body = ioutil.NopCloser(rb) } return } // Get issues a GET request to the fcgi responder. func (c *FCGIClient) Get(p map[string]string) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "GET" p["CONTENT_LENGTH"] = "0" return c.Request(p, nil) } // Head issues a HEAD request to the fcgi responder. func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "HEAD" p["CONTENT_LENGTH"] = "0" return c.Request(p, nil) } // Options issues an OPTIONS request to the fcgi responder. func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "OPTIONS" p["CONTENT_LENGTH"] = "0" return c.Request(p, nil) } // Post issues a POST request to the fcgi responder. with request body // in the format that bodyType specified func (c *FCGIClient) Post(p map[string]string, bodyType string, body io.Reader, l int) (resp *http.Response, err error) { if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" { p["REQUEST_METHOD"] = "POST" } p["CONTENT_LENGTH"] = strconv.Itoa(l) if len(bodyType) > 0 { p["CONTENT_TYPE"] = bodyType } else { p["CONTENT_TYPE"] = "application/x-www-form-urlencoded" } return c.Request(p, body) } // Put issues a PUT request to the fcgi responder. func (c *FCGIClient) Put(p map[string]string, bodyType string, body io.Reader, l int) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "PUT" return c.Post(p, bodyType, body, l) } // Patch issues a PATCH request to the fcgi responder. func (c *FCGIClient) Patch(p map[string]string, bodyType string, body io.Reader, l int) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "PATCH" return c.Post(p, bodyType, body, l) } // Delete issues a DELETE request to the fcgi responder. func (c *FCGIClient) Delete(p map[string]string, bodyType string, body io.Reader, l int) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "DELETE" return c.Post(p, bodyType, body, l) } // PostForm issues a POST to the fcgi responder, with form // as a string key to a list values (url.Values) func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) { body := bytes.NewReader([]byte(data.Encode())) return c.Post(p, "application/x-www-form-urlencoded", body, body.Len()) } // PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard, // with form as a string key to a list values (url.Values), // and/or with file as a string key to a list file path. func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) { buf := &bytes.Buffer{} writer := multipart.NewWriter(buf) bodyType := writer.FormDataContentType() for key, val := range data { for _, v0 := range val { err = writer.WriteField(key, v0) if err != nil { return } } } for key, val := range file { fd, e := os.Open(val) if e != nil { return nil, e } defer fd.Close() part, e := writer.CreateFormFile(key, filepath.Base(val)) if e != nil { return nil, e } _, err = io.Copy(part, fd) } err = writer.Close() if err != nil { return } return c.Post(p, bodyType, buf, buf.Len()) } // Checks whether chunked is part of the encodings stack func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
1
6,971
Parentheses aren't needed here. `if len(statusParts) > 0 {` will suffice.
caddyserver-caddy
go
@@ -44,9 +44,10 @@ const serverName = "yarpc-test" // TT is the gauntlets table test struct type TT struct { - Service string // thrift service name; defaults to ThriftTest - Function string // name of the Go function on the client - Oneway bool // if the function is a oneway function + Service string // thrift service name; defaults to ThriftTest + Function string // name of the Go function on the client + Oneway bool // if the function is a oneway function + SkipOnServers []string // if the test needs to be skipped on particular server Details string // optional extra details about what this test does Give []interface{} // arguments besides context
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package gauntlet import ( "context" "reflect" "strings" "time" "go.uber.org/yarpc" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/encoding/thrift" disp "go.uber.org/yarpc/internal/crossdock/client/dispatcher" "go.uber.org/yarpc/internal/crossdock/client/params" "go.uber.org/yarpc/internal/crossdock/client/random" "go.uber.org/yarpc/internal/crossdock/thrift/gauntlet" "go.uber.org/yarpc/internal/crossdock/thrift/gauntlet/secondserviceclient" "go.uber.org/yarpc/internal/crossdock/thrift/gauntlet/thrifttestclient" "github.com/crossdock/crossdock-go" "go.uber.org/thriftrw/ptr" ) const serverName = "yarpc-test" // TT is the gauntlets table test struct type TT struct { Service string // thrift service name; defaults to ThriftTest Function string // name of the Go function on the client Oneway bool // if the function is a oneway function Details string // optional extra details about what this test does Give []interface{} // arguments besides context Want interface{} // expected response; nil for void WantError error // expected error WantErrorLike string // for just matching error messages } // Run executes the thriftgauntlet behavior. func Run(t crossdock.T) { fatals := crossdock.Fatals(t) dispatcher := disp.Create(t) fatals.NoError(dispatcher.Start(), "could not start Dispatcher") defer dispatcher.Stop() t.Tag("transport", t.Param(params.Transport)) t.Tag("server", t.Param(params.Server)) RunGauntlet(t, Config{ Dispatcher: dispatcher, ServerName: serverName, }) } // ServiceSet specifies which services the Gauntlet should make requests to. type ServiceSet int // The different Thrift services that the gauntlet can make requests to. const ( ThriftTest ServiceSet = 1 << iota SecondService AllServices = ThriftTest | SecondService ) // Config configures a gauntlet run type Config struct { Dispatcher *yarpc.Dispatcher // Name of the outbound to which the requests will be sent ServerName string // Whether requests should use Thrift envelopes. Defaults to false. Envelope bool // Bit mask of the different services to call. Defaults to AllServices. Services ServiceSet // Extra options for the Thrift client ClientOptions []thrift.ClientOption // Whether to run oneway tests EnableOneway bool } // RunGauntlet takes an rpc object and runs the gauntlet func RunGauntlet(t crossdock.T, c Config) { checks := crossdock.Checks(t) if c.Services == 0 { c.Services = AllServices } bytesToken := random.Bytes(10) tests := []TT{ { Function: "TestBinary", Give: []interface{}{bytesToken}, Want: bytesToken, }, { Function: "TestByte", Give: []interface{}{ptr.Int8(42)}, Want: int8(42), }, { Function: "TestDouble", Give: []interface{}{ptr.Float64(12.34)}, Want: float64(12.34), }, { Function: "TestEnum", Details: "MyNumberz", Give: []interface{}{numberzp(gauntlet.MyNumberz)}, Want: gauntlet.MyNumberz, }, { Function: "TestEnum", Details: "NumberzThree", Give: []interface{}{numberzp(gauntlet.NumberzThree)}, Want: gauntlet.NumberzThree, }, { Function: "TestEnum", Details: "unrecognized Numberz", Give: []interface{}{numberzp(gauntlet.Numberz(42))}, Want: gauntlet.Numberz(42), }, { Function: "TestException", Details: "Xception", Give: []interface{}{ptr.String("Xception")}, WantError: &gauntlet.Xception{ ErrorCode: ptr.Int32(1001), Message: ptr.String("Xception"), }, }, { Function: "TestException", Details: "TException", Give: []interface{}{ptr.String("TException")}, WantErrorLike: "great sadness", }, { Function: "TestException", Details: "no error", Give: []interface{}{ptr.String("yolo")}, }, { Function: "TestI32", Give: []interface{}{ptr.Int32(123)}, Want: int32(123), }, { Function: "TestI64", Give: []interface{}{ptr.Int64(18934714)}, Want: int64(18934714), }, { Function: "TestInsanity", Give: []interface{}{ &gauntlet.Insanity{ UserMap: map[gauntlet.Numberz]gauntlet.UserId{ gauntlet.NumberzThree: gauntlet.UserId(100), gauntlet.Numberz(100): gauntlet.UserId(200), }, Xtructs: []*gauntlet.Xtruct{ {StringThing: ptr.String("0")}, {ByteThing: ptr.Int8(1)}, {I32Thing: ptr.Int32(2)}, {I64Thing: ptr.Int64(3)}, }, }, }, Want: map[gauntlet.UserId]map[gauntlet.Numberz]*gauntlet.Insanity{ 1: { gauntlet.NumberzTwo: &gauntlet.Insanity{ UserMap: map[gauntlet.Numberz]gauntlet.UserId{ gauntlet.NumberzThree: gauntlet.UserId(100), gauntlet.Numberz(100): gauntlet.UserId(200), }, Xtructs: []*gauntlet.Xtruct{ {StringThing: ptr.String("0")}, {ByteThing: ptr.Int8(1)}, {I32Thing: ptr.Int32(2)}, {I64Thing: ptr.Int64(3)}, }, }, gauntlet.NumberzThree: &gauntlet.Insanity{ UserMap: map[gauntlet.Numberz]gauntlet.UserId{ gauntlet.NumberzThree: gauntlet.UserId(100), gauntlet.Numberz(100): gauntlet.UserId(200), }, Xtructs: []*gauntlet.Xtruct{ {StringThing: ptr.String("0")}, {ByteThing: ptr.Int8(1)}, {I32Thing: ptr.Int32(2)}, {I64Thing: ptr.Int64(3)}, }, }, }, 2: { gauntlet.NumberzSix: &gauntlet.Insanity{}, }, }, }, { Function: "TestList", Give: []interface{}{[]int32{1, 2, 3}}, Want: []int32{1, 2, 3}, }, { Function: "TestMap", Give: []interface{}{map[int32]int32{1: 2, 3: 4, 5: 6}}, Want: map[int32]int32{1: 2, 3: 4, 5: 6}, }, { Function: "TestMapMap", Give: []interface{}{ptr.Int32(42)}, Want: map[int32]map[int32]int32{ -4: { -4: -4, -3: -3, -2: -2, -1: -1, }, 4: { 1: 1, 2: 2, 3: 3, 4: 4, }, }, }, { Function: "TestMulti", Give: []interface{}{ ptr.Int8(100), ptr.Int32(200), ptr.Int64(300), map[int16]string{1: "1", 2: "2", 3: "3"}, numberzp(gauntlet.NumberzEight), useridp(42), }, Want: &gauntlet.Xtruct{ StringThing: ptr.String("Hello2"), ByteThing: ptr.Int8(100), I32Thing: ptr.Int32(200), I64Thing: ptr.Int64(300), }, }, { Function: "TestMultiException", Details: "Xception", Give: []interface{}{ptr.String("Xception"), ptr.String("foo")}, WantError: &gauntlet.Xception{ ErrorCode: ptr.Int32(1001), Message: ptr.String("This is an Xception"), }, }, { Function: "TestMultiException", Details: "Xception2", Give: []interface{}{ptr.String("Xception2"), ptr.String("foo")}, WantError: &gauntlet.Xception2{ ErrorCode: ptr.Int32(2002), StructThing: &gauntlet.Xtruct{StringThing: ptr.String("foo")}, }, }, { Function: "TestMultiException", Details: "no error", Give: []interface{}{ptr.String("hello"), ptr.String("foo")}, Want: &gauntlet.Xtruct{StringThing: ptr.String("foo")}, }, { Function: "TestNest", Give: []interface{}{ &gauntlet.Xtruct2{ ByteThing: ptr.Int8(-1), I32Thing: ptr.Int32(-1234), StructThing: &gauntlet.Xtruct{ StringThing: ptr.String("0"), ByteThing: ptr.Int8(1), I32Thing: ptr.Int32(2), I64Thing: ptr.Int64(3), }, }, }, Want: &gauntlet.Xtruct2{ ByteThing: ptr.Int8(-1), I32Thing: ptr.Int32(-1234), StructThing: &gauntlet.Xtruct{ StringThing: ptr.String("0"), ByteThing: ptr.Int8(1), I32Thing: ptr.Int32(2), I64Thing: ptr.Int64(3), }, }, }, { Function: "TestSet", Give: []interface{}{ map[int32]struct{}{ 1: {}, 2: {}, -1: {}, -2: {}, }, }, Want: map[int32]struct{}{ 1: {}, 2: {}, -1: {}, -2: {}, }, }, { Function: "TestString", Give: []interface{}{ptr.String("hello")}, Want: "hello", }, { Function: "TestStringMap", Give: []interface{}{ map[string]string{ "foo": "bar", "hello": "world", }, }, Want: map[string]string{ "foo": "bar", "hello": "world", }, }, { Function: "TestStruct", Give: []interface{}{ &gauntlet.Xtruct{ StringThing: ptr.String("0"), ByteThing: ptr.Int8(1), I32Thing: ptr.Int32(2), I64Thing: ptr.Int64(3), }, }, Want: &gauntlet.Xtruct{ StringThing: ptr.String("0"), ByteThing: ptr.Int8(1), I32Thing: ptr.Int32(2), I64Thing: ptr.Int64(3), }, }, { Function: "TestTypedef", Give: []interface{}{useridp(42)}, Want: gauntlet.UserId(42), }, { Function: "TestVoid", Give: []interface{}{}, }, { Function: "TestOneway", Oneway: true, Give: []interface{}{ptr.Int32(123)}, WantError: nil, }, { Service: "SecondService", Function: "BlahBlah", Give: []interface{}{}, }, { Service: "SecondService", Function: "SecondtestString", Give: []interface{}{ptr.String("hello")}, Want: "hello", }, } for _, tt := range tests { if tt.Service == "" { tt.Service = "ThriftTest" } switch tt.Service { case "ThriftTest": if c.Services&ThriftTest == 0 { continue } case "SecondService": if c.Services&SecondService == 0 { continue } } t.Tag("service", tt.Service) t.Tag("function", tt.Function) //only run oneway tests if specified if !c.EnableOneway && tt.Oneway { continue } desc := BuildDesc(tt) client := buildClient(t, desc, tt.Service, c) f := client.MethodByName(tt.Function) if !checks.True(f.IsValid(), "%v: invalid function", desc) { continue } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() args := []reflect.Value{reflect.ValueOf(ctx)} if give, ok := BuildArgs(t, desc, f.Type(), tt.Give, len(args)); ok { args = append(args, give...) } else { continue } got, err := extractCallResponse(t, desc, f.Call(args)) if isUnrecognizedProcedure(err) { t.Skipf("%v: procedure not defined", desc) continue } Assert(t, tt, desc, got, err) } } // BuildDesc creates a logging string for the test // // We log in one of the following formats, // // $Function: $message // $Function: $description: $message // $Service: $function: $message // $Service: $function: $description: $message // func BuildDesc(tt TT) string { desc := tt.Function if tt.Details != "" { desc = desc + ": " + tt.Details } if tt.Service != "" { desc = tt.Service + ": " + desc } return desc } func buildClient(t crossdock.T, desc string, service string, c Config) reflect.Value { clientConfig := c.Dispatcher.ClientConfig(c.ServerName) opts := c.ClientOptions if c.Envelope { opts = append(opts, thrift.Enveloped) } switch service { case "", "ThriftTest": client := thrifttestclient.New(clientConfig, opts...) return reflect.ValueOf(client) case "SecondService": client := secondserviceclient.New(clientConfig, opts...) return reflect.ValueOf(client) default: crossdock.Fatals(t).Fail("", "%v: unknown thrift service", desc) return reflect.Value{} // we'll never actually get here } } // BuildArgs creates an args slice than can be used to make a f.Call(args) func BuildArgs(t crossdock.T, desc string, ft reflect.Type, give []interface{}, initialArgs int) (_ []reflect.Value, ok bool) { check := crossdock.Checks(t) var args []reflect.Value for i, v := range give { var val reflect.Value vt := ft.In(i + initialArgs) if v == nil { // nil is an invalid argument to ValueOf. For nil, use the zero // value for that argument. val = reflect.Zero(vt) } else { val = reflect.ValueOf(v) } if !check.Equal(vt, val.Type(), "%v: argument %v type mismatch", desc, i) { return nil, false } args = append(args, val) } return args, true } func isUnrecognizedProcedure(err error) bool { if transport.IsBadRequestError(err) { // TODO: Once all other languages implement the gauntlet test // subject, we can remove this check. return strings.Contains(err.Error(), "unrecognized procedure") } return false } func extractCallResponse(t crossdock.T, desc string, returns []reflect.Value) (got interface{}, err error) { switch len(returns) { case 1: e := returns[0].Interface() if e != nil { err = e.(error) } case 2: got = returns[0].Interface() if e := returns[1].Interface(); e != nil { err = e.(error) } default: crossdock.Assert(t).Fail("", "%v: received unexpected number of return values: %v", desc, returns) } return got, err } // Assert verifies the call response against TT func Assert(t crossdock.T, tt TT, desc string, got interface{}, err error) { checks := crossdock.Checks(t) assert := crossdock.Assert(t) if tt.WantError != nil || tt.WantErrorLike != "" { if !checks.Error(err, "%v: expected failure but got: %v", desc, got) { return } if tt.WantError != nil { assert.Equal(tt.WantError, err, "%v: server returned error: %v", desc, err) } if tt.WantErrorLike != "" { assert.Contains(err.Error(), tt.WantErrorLike, "%v: server returned error: %v", desc, err) } } else { if !checks.NoError(err, "%v: call failed", desc) { return } if tt.Want != nil { assert.Equal(tt.Want, got, "%v: server returned: %v", desc, got) } } } func numberzp(x gauntlet.Numberz) *gauntlet.Numberz { return &x } func useridp(x gauntlet.UserId) *gauntlet.UserId { return &x }
1
13,167
// the test is skipped on given servers because it will fail.
yarpc-yarpc-go
go
@@ -220,8 +220,7 @@ describe 'OrganizationsController' do it 'should return unauthorized if api key is invalid' do get :index, format: :xml, api_key: 'dummy_id' - - must_respond_with :unauthorized + must_respond_with :bad_request end end
1
require 'test_helper' require 'test_helpers/xml_parsing_helpers' describe 'OrganizationsController' do let(:account) { create(:account) } let(:organization) { create(:organization) } before do @proj1 = create(:project) @proj2 = create(:project) @proj3 = create(:project) @organization = @proj1.organization @account = create(:account, organization_id: @organization.id) create_position(account: @account, project: @proj1, organization: @organization) create_position(account: @account, project: @proj2, organization: @proj2.organization) create_position(account: @account, project: @proj3, organization: @proj3.organization) end it '#outside_projects allows viewing by unlogged users' do login_as nil get :outside_projects, id: @organization must_respond_with :ok end it '#outside_projects can be accessed via the API' do api_key = create(:api_key, account_id: account.id) get :outside_projects, id: @organization, format: :xml, api_key: api_key.oauth_application.uid must_respond_with :ok end it '#outside_projects gracefully handles non-existant organizations' do get :outside_projects, id: 'I_AM_A_BANANA' must_respond_with :not_found end it '#affiliated_committers allows viewing by unlogged users' do login_as nil get :affiliated_committers, id: @organization must_respond_with :ok end it '#affiliated_committers supports xml api' do api_key = create(:api_key, account_id: account.id) get :affiliated_committers, id: @organization, format: :xml, api_key: api_key.oauth_application.uid must_respond_with :ok end it '#affiliated_committers gracefully handles non-existant organizations' do get :affiliated_committers, id: 'I_AM_A_BANANA' must_respond_with :not_found end it 'should return affiliated projects for unlogged users' do login_as nil get :projects, id: @organization must_respond_with :ok assigns(:affiliated_projects).count.must_equal 1 end it 'outside_committers' do get :outside_committers, id: @organization must_respond_with :ok end it 'should get outside_committers in xml format with valid api key' do key = create(:api_key, account_id: @account.id) get :outside_committers, id: @organization, format: :xml, api_key: key.oauth_application.uid must_respond_with :ok end it 'should get show page for a valid organization' do get :show, id: @organization must_respond_with :ok assert_select 'div#org_summary' assert_select 'div#addthis_sharing' assert_select 'div#org_infographic' end it 'should support show page via xml api' do key = create(:api_key, account_id: create(:account).id) get :show, id: @organization, format: :xml, api_key: key.oauth_application.uid must_respond_with :ok end it 'show should render for organizations that contain projects that have been analyzed' do organization = create(:organization) project = create(:project, organization: organization) af_1 = create(:activity_fact, analysis: project.best_analysis, code_added: 8_000, comments_added: 8_000) create(:factoid, analysis: project.best_analysis, language: af_1.language) af_2 = create(:activity_fact, analysis: project.best_analysis) create(:factoid, analysis: project.best_analysis, language: af_2.language) af_3 = create(:activity_fact, analysis: project.best_analysis) create(:factoid, analysis: project.best_analysis, language: af_3.language) af_4 = create(:activity_fact, analysis: project.best_analysis) create(:factoid, analysis: project.best_analysis, language: af_4.language) ats = project.best_analysis.all_time_summary ats.update_attributes(recent_contributors: [create(:person).id, create(:person).id]) cf = create(:commit_flag) create(:analysis_sloc_set, analysis: project.best_analysis, sloc_set: cf.sloc_set) key = create(:api_key, account_id: create(:account).id) get :show, id: organization.to_param, format: :xml, api_key: key.oauth_application.uid must_respond_with :ok end it 'should support show page via xhr' do xhr :get, :show, id: @organization must_respond_with :ok JSON.parse(response.body)['subview_html'].must_match 'Affiliated Committers' end it 'should support ?view=portfolio_projects for show action' do get :show, id: @organization, view: 'portfolio_projects' must_respond_with :ok assert_select 'div#org_summary' end it 'should support projects view as xml' do key = create(:api_key, account_id: create(:account).id) get :show, id: @organization, format: :xml, api_key: key.oauth_application.uid must_respond_with :ok end it 'should get show page for a invalid organization' do get :show, id: 'some_invalid_id' must_respond_with :not_found end it 'should get infographic print view' do get :print_infographic, id: @organization must_respond_with :ok end describe 'settings' do it 'must ask user to log in' do restrict_edits_to_managers(organization, account) get :settings, id: organization.to_param flash[:notice].must_equal I18n.t('permissions.must_log_in') end it 'must alert non managers about read only data' do admin = create(:admin) create(:manage, target: organization, account: admin, approved_by: admin.id) restrict_edits_to_managers(organization, admin) login_as account get :settings, id: organization.to_param flash[:notice].must_equal I18n.t('permissions.not_manager') end it 'must alert non managers even if project and organization url name are same' do create(:project, vanity_url: organization.to_param) restrict_edits_to_managers(organization, account) login_as account get :settings, id: organization.to_param flash[:notice].must_equal I18n.t('permissions.not_manager') end it 'wont show permission alert to an authorized manager' do create(:manage, target: organization, account: account, approved_by: create(:admin).id) restrict_edits_to_managers(organization, account) login_as account get :settings, id: organization.to_param flash[:notice].must_be_nil end end describe 'index' do it 'should redirect to explores path' do get :index must_redirect_to orgs_explores_path end it 'should return organizations when search term is present' do org_1 = create(:organization, name: 'test name1', projects_count: 2) org_2 = create(:organization, name: 'test name2', projects_count: 3) org_3 = create(:organization, name: 'test name3', projects_count: 4) get :index, query: 'test' must_respond_with :ok assigns(:organizations).must_equal [org_3, org_2, org_1] end it 'should return organizations via xml' do create(:organization, name: 'test name1', projects_count: 2) create(:organization, name: 'test name2', projects_count: 3) org_3 = create(:organization, name: 'test name3', projects_count: 4, description: 'test description') api_key = create(:api_key, account_id: account.id) client_id = api_key.oauth_application.uid get :index, format: :xml, api_key: client_id, query: 'test' xml = xml_hash(@response.body)['response'] must_respond_with :ok xml['status'].must_equal 'success' xml['items_returned'].must_equal '3' xml['items_available'].must_equal '3' xml['first_item_position'].must_equal '0' org = xml['result']['org'].first xml['result']['org'].length.must_equal 3 org['name'].must_equal 'test name3' org['url'].must_equal "http://test.host/orgs/#{org_3.vanity_url}.xml" org['html_url'].must_equal "http://test.host/orgs/#{org_3.vanity_url}" org['description'].must_equal 'test description' org['vanity_url'].must_equal org_3.vanity_url org['type'].must_equal 'Commercial' org['projects_count'].must_equal org_3.projects_count.to_s org['affiliated_committers'].must_equal '0' end it 'should return unauthorized if api key is invalid' do get :index, format: :xml, api_key: 'dummy_id' must_respond_with :unauthorized end end describe 'list_managers' do it 'should return managers' do login_as account create(:manage, target: organization, account: account) get :list_managers, id: organization.id must_respond_with :ok assigns(:managers).must_equal [account] end end describe 'claim_projects_list' do it 'should return no projects without search term' do get :claim_projects_list, id: organization.to_param must_respond_with :ok assigns(:projects).must_equal [] assigns(:organization).must_equal organization end it 'should return projects with search term' do login_as account pro_1 = create(:project, name: 'test name1', organization_id: nil) pro_2 = create(:project, name: 'test name2', organization_id: nil) pro_3 = create(:project, name: 'test name3', organization_id: nil) get :claim_projects_list, id: organization.to_param, query: 'test' must_respond_with :ok assigns(:projects).pluck(:id).sort.must_equal [pro_1.id, pro_2.id, pro_3.id].sort assigns(:organization).must_equal organization end it 'should return projects with search term with sorting' do pro_1 = create(:project, name: 'test name1') pro_2 = create(:project, name: 'test name2') pro_3 = create(:project, name: 'test name3') get :claim_projects_list, id: organization.to_param, query: 'test', sort: 'new' must_respond_with :ok assigns(:projects).must_equal [pro_3, pro_2, pro_1] assigns(:organization).must_equal organization end end describe 'manage_projects' do it 'should return org managed projects' do pro_1 = create(:project, name: 'test name1', organization_id: organization.id) pro_2 = create(:project, name: 'test name2', organization_id: organization.id) pro_3 = create(:project, name: 'test name3', organization_id: organization.id) get :manage_projects, id: organization.to_param, query: 'test' must_respond_with :ok assigns(:projects).must_equal [pro_3, pro_2, pro_1] assigns(:organization).must_equal organization end it 'should return org managed projects with sorting' do pro_1 = create(:project, name: 'test name1', organization_id: organization.id) pro_2 = create(:project, name: 'test name2', organization_id: organization.id) pro_3 = create(:project, name: 'test name3', organization_id: organization.id) get :manage_projects, id: organization.to_param, query: 'test', sort: 'project_name' must_respond_with :ok assigns(:projects).must_equal [pro_1, pro_2, pro_3] assigns(:organization).must_equal organization end end describe 'claim_project' do it 'should claim a project for the given org' do login_as account pro_1 = create(:project, name: 'test name1') xhr :get, :claim_project, id: organization.to_param, project_id: pro_1.id must_respond_with :ok assigns(:project).organization_id.must_equal organization.id end end describe 'remove_project' do it 'should remove project from org' do login_as account pro_1 = create(:project, name: 'test name1', organization_id: organization.id) get :remove_project, id: organization.to_param, project_id: pro_1.id, source: 'manage_projects' must_redirect_to manage_projects_organization_path(organization) flash[:success].must_equal I18n.t('organizations.remove_project.success', name: pro_1.name) pro_1.reload.organization_id.must_equal nil end it 'should remove project from org and redirect to claim_projects_list' do login_as account pro_1 = create(:project, name: 'test name1', organization_id: organization.id) get :remove_project, id: organization.to_param, project_id: pro_1.id, source: 'claim_projects_list' must_redirect_to claim_projects_list_organization_path(organization) flash[:success].must_equal I18n.t('organizations.remove_project.success', name: pro_1.name) pro_1.reload.organization_id.must_equal nil end end describe 'new_manager' do it 'should show new manager form for get request' do get :new_manager, id: organization, account_id: account.id must_respond_with :ok end it 'should show new manager form for get request' do post :new_manager, id: organization, account_id: account.id must_redirect_to list_managers_organization_path(organization) assigns(:manage).target organization end end describe 'create' do it 'should show validation errors' do account.update_column(:level, 10) login_as account post :create, organization: { name: 'test', description: 'tes', vanity_url: '', org_type: '2', homepage_url: 'http://test.com' } must_respond_with :ok assigns(:organization).errors[:vanity_url].must_equal ['can\'t be blank', 'is too short (minimum is 1 character)'] end it 'should save record successfully' do account.update_column(:level, 10) login_as account post :create, organization: { name: 'test', description: 'tes', vanity_url: 'test', org_type: '2', homepage_url: 'http://test.com' } must_redirect_to organization_path(assigns(:organization)) assigns(:organization).valid?.must_equal true end it 'should gracefully handle duplicate vanity_urls' do old_org = create(:organization) account.update_column(:level, 10) login_as account post :create, organization: { name: 'test', description: 'tes', vanity_url: old_org.vanity_url, org_type: '2', homepage_url: 'http://test.com' } must_respond_with :ok assigns(:organization).errors[:vanity_url].must_equal ['has already been taken'] end end describe 'update' do it 'should show validation errors' do login_as account account.update_column(:level, 10) org = create(:organization, name: 'test') put :update, id: org.id, organization: { name: '', description: 'tes', vanity_url: 'test', org_type: '2', homepage_url: 'http://test.com' } must_respond_with 422 assigns(:organization).errors[:name].must_equal ['can\'t be blank', 'is too short (minimum is 3 characters)'] end it 'should save record successfully' do account.update_column(:level, 10) login_as account org = create(:organization, name: 'test') put :update, id: org.id, organization: { name: 'test2', description: 'tes', vanity_url: 'test', org_type: '2', homepage_url: 'http://test.com' } must_redirect_to organization_path(assigns(:organization)) assigns(:organization).name.must_equal 'test2' assigns(:organization).valid?.must_equal true end end describe 'edit' do it 'should set organization' do login_as account account.update_column(:level, 10) org = create(:organization, name: 'test') get :edit, id: org.id assigns(:organization).must_equal org end end describe 'new' do it 'should set new organization' do login_as account account.update_column(:level, 10) get :new assigns(:organization).new_record?.must_equal true end end describe 'claimed projects' do it 'should render show page if organization does not have any claimed projects' do get :projects, id: organization.to_param must_respond_with :redirect must_redirect_to organization_path(organization) end it 'should render projects page if it has projects page' do create(:project, organization_id: organization.id) get :projects, id: organization.to_param must_respond_with :ok must_render_template :projects end end end
1
8,985
The name of the test should be updated to "it 'should return bad_request if api key is invalid"
blackducksoftware-ohloh-ui
rb
@@ -806,6 +806,12 @@ class _HyperSearchRunner(object): # pull out best Model from jobs table jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID()) + + # Try to return a decent error message if the job was cancelled for some + # reason. + if jobInfo.cancel is 1: + raise Exception(jobInfo.workerCompletionMsg) + try: results = json.loads(jobInfo.results) except Exception, e:
1
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ @file permutations_runner.py """ import collections import imp import csv from datetime import datetime, timedelta import os import cPickle as pickle import pprint import shutil import signal import sys import time import subprocess import tempfile import uuid from nupic.support import object_json as json import nupic.database.ClientJobsDAO as cjdao from nupic.swarming import HypersearchWorker, utils from nupic.swarming.HypersearchV2 import HypersearchV2 from nupic.frameworks.opf.exp_generator.ExpGenerator import expGenerator g_currentVerbosityLevel = 0 gCurrentSearch = None DEFAULT_OPTIONS = {"overwrite": False, "expDescJsonPath": None, "expDescConfig": None, "permutationsScriptPath": None, "outputLabel": "swarm_out", "permWorkDir": None, "action": "run", "searchMethod": "v2", "timeout": None, "exports": None, "useTerminators": False, "maxWorkers": 2, "replaceReport": False, "maxPermutations": None, "genTopNDescriptions": 1} class Verbosity(object): """ @private """ WARNING = 0 INFO = 1 DEBUG = 2 def _termHandler(signal, frame): try: jobrunner = gCurrentSearch jobID = jobrunner._HyperSearchRunner__searchJob.getJobID() except Exception as exc: print exc else: print "Canceling jobs due to receiving SIGTERM" cjdao.ClientJobsDAO.get().jobCancel(jobID) def _setupInterruptHandling(): signal.signal(signal.SIGTERM, _termHandler) signal.signal(signal.SIGINT, _termHandler) def _verbosityEnabled(verbosityLevel): return verbosityLevel <= g_currentVerbosityLevel def _emit(verbosityLevel, info): if _verbosityEnabled(verbosityLevel): print info def _escape(s): """Escape commas, tabs, newlines and dashes in a string Commas are encoded as tabs """ assert isinstance(s, str), \ "expected %s but got %s; value=%s" % (type(str), type(s), s) s = s.replace("\\", "\\\\") s = s.replace("\n", "\\n") s = s.replace("\t", "\\t") s = s.replace(",", "\t") return s def _engineServicesRunning(): """ Return true if the engine services are running """ process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE) stdout = process.communicate()[0] result = process.returncode if result != 0: raise RuntimeError("Unable to check for running client job manager") # See if the CJM is running running = False for line in stdout.split("\n"): if "python" in line and "clientjobmanager.client_job_manager" in line: running = True break return running def _runHyperSearch(runOptions): global gCurrentSearch # Run HyperSearch startTime = time.time() search = _HyperSearchRunner(runOptions) # Save in global for the signal handler. gCurrentSearch = search if runOptions["action"] in ("run", "dryRun"): search.runNewSearch() else: search.pickupSearch() # Generate reports # Print results and generate report csv file modelParams = _HyperSearchRunner.generateReport( options=runOptions, replaceReport=runOptions["replaceReport"], hyperSearchJob=search.peekSearchJob(), metricsKeys=search.getDiscoveredMetricsKeys()) secs = time.time() - startTime hours = int(secs) / (60 * 60) secs -= hours * (60 * 60) minutes = int(secs) / 60 secs -= minutes * 60 print "Elapsed time (h:mm:ss): %d:%02d:%02d" % (hours, minutes, int(secs)) jobID = search.peekSearchJob().getJobID() print "Hypersearch ClientJobs job ID: ", jobID return modelParams def _injectDefaultOptions(options): return dict(DEFAULT_OPTIONS, **options) def _validateOptions(options): if "expDescJsonPath" not in options \ and "expDescConfig" not in options \ and "permutationsScriptPath" not in options: raise Exception("Options must contain one of the following: " "expDescJsonPath, expDescConfig, or " "permutationsScriptPath.") def _generateExpFilesFromSwarmDescription(swarmDescriptionJson, outDir): # The expGenerator expects the JSON without newlines for an unknown reason. expDescConfig = json.dumps(swarmDescriptionJson) expDescConfig = expDescConfig.splitlines() expDescConfig = "".join(expDescConfig) expGenerator([ "--description=%s" % (expDescConfig), "--outDir=%s" % (outDir)]) def _runAction(runOptions): action = runOptions["action"] # Print Nupic HyperSearch results from the current or last run if action == "report": returnValue = _HyperSearchRunner.generateReport( options=runOptions, replaceReport=runOptions["replaceReport"], hyperSearchJob=None, metricsKeys=None) # Run HyperSearch elif action in ("run", "dryRun", "pickup"): returnValue = _runHyperSearch(runOptions) else: raise Exception("Unhandled action: %s" % action) return returnValue def _checkOverwrite(options, outDir): overwrite = options["overwrite"] if not overwrite: for name in ("description.py", "permutations.py"): if os.path.exists(os.path.join(outDir, name)): raise RuntimeError("The %s file already exists and will be " "overwritten by this tool. If it is OK to overwrite " "this file, use the --overwrite option." % \ os.path.join(outDir, "description.py")) # The overwrite option has already been used, so should be removed from the # config at this point. del options["overwrite"] def runWithConfig(swarmConfig, options, outDir=None, outputLabel="default", permWorkDir=None, verbosity=1): """ Starts a swarm, given an dictionary configuration. @param swarmConfig {dict} A complete [swarm description](https://github.com/numenta/nupic/wiki/Running-Swarms#the-swarm-description) object. @param outDir {string} Optional path to write swarm details (defaults to current working directory). @param outputLabel {string} Optional label for output (defaults to "default"). @param permWorkDir {string} Optional location of working directory (defaults to current working directory). @param verbosity {int} Optional (1,2,3) increasing verbosity of output. @returns {object} Model parameters """ global g_currentVerbosityLevel g_currentVerbosityLevel = verbosity # Generate the description and permutations.py files in the same directory # for reference. if outDir is None: outDir = os.getcwd() if permWorkDir is None: permWorkDir = os.getcwd() _checkOverwrite(options, outDir) _generateExpFilesFromSwarmDescription(swarmConfig, outDir) options["expDescConfig"] = swarmConfig options["outputLabel"] = outputLabel options["permWorkDir"] = permWorkDir runOptions = _injectDefaultOptions(options) _validateOptions(runOptions) return _runAction(runOptions) def runWithJsonFile(expJsonFilePath, options, outputLabel, permWorkDir): """ Starts a swarm, given a path to a JSON file containing configuration. This function is meant to be used with a CLI wrapper that passes command line arguments in through the options parameter. @param expJsonFilePath {string} Path to a JSON file containing the complete [swarm description](https://github.com/numenta/nupic/wiki/Running-Swarms#the-swarm-description). @param options {dict} CLI options. @param outputLabel {string} Label for output. @param permWorkDir {string} Location of working directory. @returns {int} Swarm job id. """ if "verbosityCount" in options: verbosity = options["verbosityCount"] del options["verbosityCount"] else: verbosity = 1 _setupInterruptHandling() with open(expJsonFilePath, "rb") as jsonFile: expJsonConfig = json.loads(jsonFile.read()) outDir = os.path.dirname(expJsonFilePath) return runWithConfig(expJsonConfig, options, outDir=outDir, outputLabel=outputLabel, permWorkDir=permWorkDir, verbosity=verbosity) def runWithPermutationsScript(permutationsFilePath, options, outputLabel, permWorkDir): """ Starts a swarm, given a path to a permutations.py script. This function is meant to be used with a CLI wrapper that passes command line arguments in through the options parameter. @param permutationsFilePath {string} Path to permutations.py. @param options {dict} CLI options. @param outputLabel {string} Label for output. @param permWorkDir {string} Location of working directory. @returns {object} Model parameters. """ global g_currentVerbosityLevel if "verbosityCount" in options: g_currentVerbosityLevel = options["verbosityCount"] del options["verbosityCount"] else: g_currentVerbosityLevel = 1 _setupInterruptHandling() options["permutationsScriptPath"] = permutationsFilePath options["outputLabel"] = outputLabel options["permWorkDir"] = permWorkDir # Assume it's a permutations python script runOptions = _injectDefaultOptions(options) _validateOptions(runOptions) return _runAction(runOptions) def runPermutations(_): """ DEPRECATED. Use @ref runWithConfig. """ raise DeprecationWarning( "nupic.swarming.permutations_runner.runPermutations() is no longer " "implemented. It has been replaced with a simpler function for library " "usage: nupic.swarming.permutations_runner.runWithConfig(). See docs " "at https://github.com/numenta/nupic/wiki/Running-Swarms#running-a-swarm-" "programmatically for details.") def _setUpExports(exports): ret = "" if exports is None: return ret exportDict = json.loads(exports) for key in exportDict.keys(): ret+= "export %s=%s;" % (str(key), str(exportDict[key])) return ret def _clientJobsDB(): """ Returns: The shared cjdao.ClientJobsDAO instance """ return cjdao.ClientJobsDAO.get() def _nupicHyperSearchHasErrors(hyperSearchJob): """Check whether any experiments failed in our latest hypersearch Parameters: hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved jobID, if any Returns: False if all models succeeded, True if one or more had errors """ # TODO flesh me out # Get search ID for our latest hypersearch # Query Nupic for experiment failures in the given search return False class _HyperSearchRunner(object): """ @private Manages one instance of HyperSearch""" def __init__(self, options): """ Parameters: ---------------------------------------------------------------------- options: NupicRunPermutations options dict retval: nothing """ self.__cjDAO = _clientJobsDB() self._options = options # _HyperSearchJob instance set up by runNewSearch() and pickupSearch() self.__searchJob = None self.__foundMetrcsKeySet = set() # If we are instead relying on the engine to launch workers for us, this # will stay as None, otherwise it becomes an array of subprocess Popen # instances. self._workers = None return def runNewSearch(self): """Start a new hypersearch job and monitor it to completion Parameters: ---------------------------------------------------------------------- retval: nothing """ self.__searchJob = self.__startSearch() self.monitorSearchJob() def pickupSearch(self): """Pick up the latest search from a saved jobID and monitor it to completion Parameters: ---------------------------------------------------------------------- retval: nothing """ self.__searchJob = self.loadSavedHyperSearchJob( permWorkDir=self._options["permWorkDir"], outputLabel=self._options["outputLabel"]) self.monitorSearchJob() def monitorSearchJob(self): """ Parameters: ---------------------------------------------------------------------- retval: nothing """ assert self.__searchJob is not None jobID = self.__searchJob.getJobID() startTime = time.time() lastUpdateTime = datetime.now() # Monitor HyperSearch and report progress # NOTE: may be -1 if it can't be determined expectedNumModels = self.__searchJob.getExpectedNumModels( searchMethod = self._options["searchMethod"]) lastNumFinished = 0 finishedModelIDs = set() finishedModelStats = _ModelStats() # Keep track of the worker state, results, and milestones from the job # record lastWorkerState = None lastJobResults = None lastModelMilestones = None lastEngStatus = None hyperSearchFinished = False while not hyperSearchFinished: jobInfo = self.__searchJob.getJobStatus(self._workers) # Check for job completion BEFORE processing models; NOTE: this permits us # to process any models that we may not have accounted for in the # previous iteration. hyperSearchFinished = jobInfo.isFinished() # Look for newly completed models, and process them modelIDs = self.__searchJob.queryModelIDs() _emit(Verbosity.DEBUG, "Current number of models is %d (%d of them completed)" % ( len(modelIDs), len(finishedModelIDs))) if len(modelIDs) > 0: # Build a list of modelIDs to check for completion checkModelIDs = [] for modelID in modelIDs: if modelID not in finishedModelIDs: checkModelIDs.append(modelID) del modelIDs # Process newly completed models if checkModelIDs: _emit(Verbosity.DEBUG, "Checking %d models..." % (len(checkModelIDs))) errorCompletionMsg = None for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)): _emit(Verbosity.DEBUG, "[%s] Checking completion: %s" % (i, modelInfo)) if modelInfo.isFinished(): finishedModelIDs.add(modelInfo.getModelID()) finishedModelStats.update(modelInfo) if (modelInfo.getCompletionReason().isError() and not errorCompletionMsg): errorCompletionMsg = modelInfo.getCompletionMsg() # Update the set of all encountered metrics keys (we will use # these to print column names in reports.csv) metrics = modelInfo.getReportMetrics() self.__foundMetrcsKeySet.update(metrics.keys()) numFinished = len(finishedModelIDs) # Print current completion stats if numFinished != lastNumFinished: lastNumFinished = numFinished if expectedNumModels is None: expModelsStr = "" else: expModelsStr = "of %s" % (expectedNumModels) stats = finishedModelStats print ("<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: " "%s; %s: %s; %s: %s; %s: %s; %s: %s]" % ( jobID, numFinished, expModelsStr, #stats.numCompletedSuccess, (stats.numCompletedEOF+stats.numCompletedStopped), "EOF" if stats.numCompletedEOF else "eof", stats.numCompletedEOF, "STOPPED" if stats.numCompletedStopped else "stopped", stats.numCompletedStopped, "KILLED" if stats.numCompletedKilled else "killed", stats.numCompletedKilled, "ERROR" if stats.numCompletedError else "error", stats.numCompletedError, "ORPHANED" if stats.numCompletedError else "orphaned", stats.numCompletedOrphaned, "UNKNOWN" if stats.numCompletedOther else "unknown", stats.numCompletedOther)) # Print the first error message from the latest batch of completed # models if errorCompletionMsg: print "ERROR MESSAGE: %s" % errorCompletionMsg # Print the new worker state, if it changed workerState = jobInfo.getWorkerState() if workerState != lastWorkerState: print "##>> UPDATED WORKER STATE: \n%s" % (pprint.pformat(workerState, indent=4)) lastWorkerState = workerState # Print the new job results, if it changed jobResults = jobInfo.getResults() if jobResults != lastJobResults: print "####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)" \ % (pprint.pformat(jobResults, indent=4), time.time()-startTime) lastJobResults = jobResults # Print the new model milestones if they changed modelMilestones = jobInfo.getModelMilestones() if modelMilestones != lastModelMilestones: print "##>> UPDATED MODEL MILESTONES: \n%s" % ( pprint.pformat(modelMilestones, indent=4)) lastModelMilestones = modelMilestones # Print the new engine status if it changed engStatus = jobInfo.getEngStatus() if engStatus != lastEngStatus: print "##>> UPDATED STATUS: \n%s" % (engStatus) lastEngStatus = engStatus # Sleep before next check if not hyperSearchFinished: if self._options["timeout"] != None: if ((datetime.now() - lastUpdateTime) > timedelta(minutes=self._options["timeout"])): print "Timeout reached, exiting" self.__cjDAO.jobCancel(jobID) sys.exit(1) time.sleep(1) # Tabulate results modelIDs = self.__searchJob.queryModelIDs() print "Evaluated %s models" % len(modelIDs) print "HyperSearch finished!" jobInfo = self.__searchJob.getJobStatus(self._workers) print "Worker completion message: %s" % (jobInfo.getWorkerCompletionMsg()) def _launchWorkers(self, cmdLine, numWorkers): """ Launch worker processes to execute the given command line Parameters: ----------------------------------------------- cmdLine: The command line for each worker numWorkers: number of workers to launch """ self._workers = [] for i in range(numWorkers): args = ["bash", "-c", cmdLine] stdout = tempfile.TemporaryFile() stderr = tempfile.TemporaryFile() p = subprocess.Popen(args, bufsize=1, env=os.environ, shell=False, stdin=None, stdout=stdout, stderr=stderr) self._workers.append(p) def __startSearch(self): """Starts HyperSearch as a worker or runs it inline for the "dryRun" action Parameters: ---------------------------------------------------------------------- retval: the new _HyperSearchJob instance representing the HyperSearch job """ # This search uses a pre-existing permutations script params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options, forRunning=True) if self._options["action"] == "dryRun": args = [sys.argv[0], "--params=%s" % (json.dumps(params))] print print "==================================================================" print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..." print "==================================================================" jobID = HypersearchWorker.main(args) else: cmdLine = _setUpExports(self._options["exports"]) # Begin the new search. The {JOBID} string is replaced by the actual # jobID returned from jobInsert. cmdLine += "$HYPERSEARCH" maxWorkers = self._options["maxWorkers"] jobID = self.__cjDAO.jobInsert( client="GRP", cmdLine=cmdLine, params=json.dumps(params), minimumWorkers=1, maximumWorkers=maxWorkers, jobType=self.__cjDAO.JOB_TYPE_HS) cmdLine = "python -m nupic.swarming.HypersearchWorker" \ " --jobID=%d" % (jobID) self._launchWorkers(cmdLine, maxWorkers) searchJob = _HyperSearchJob(jobID) # Save search ID to file (this is used for report generation) self.__saveHyperSearchJobID( permWorkDir=self._options["permWorkDir"], outputLabel=self._options["outputLabel"], hyperSearchJob=searchJob) if self._options["action"] == "dryRun": print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID) else: print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID) _emit(Verbosity.DEBUG, "Each worker executing the command line: %s" % (cmdLine,)) return searchJob def peekSearchJob(self): """Retrieves the runner's _HyperSearchJob instance; NOTE: only available after run(). Parameters: ---------------------------------------------------------------------- retval: _HyperSearchJob instance or None """ assert self.__searchJob is not None return self.__searchJob def getDiscoveredMetricsKeys(self): """Returns a tuple of all metrics keys discovered while running HyperSearch. NOTE: This is an optimization so that our client may use this info for generating the report csv file without having to pre-scan all modelInfos Parameters: ---------------------------------------------------------------------- retval: Tuple of metrics keys discovered while running HyperSearch; """ return tuple(self.__foundMetrcsKeySet) @classmethod def printModels(cls, options): """Prints a listing of experiments that would take place without actually executing them. Parameters: ---------------------------------------------------------------------- options: NupicRunPermutations options dict retval: nothing """ print "Generating experiment requests..." searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options) @classmethod def generateReport(cls, options, replaceReport, hyperSearchJob, metricsKeys): """Prints all available results in the given HyperSearch job and emits model information to the permutations report csv. The job may be completed or still in progress. Parameters: ---------------------------------------------------------------------- options: NupicRunPermutations options dict replaceReport: True to replace existing report csv, if any; False to append to existing report csv, if any hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved jobID, if any metricsKeys: sequence of report metrics key names to include in report; if None, will pre-scan all modelInfos to generate a complete list of metrics key names. retval: model parameters """ # Load _HyperSearchJob instance from storage, if not provided if hyperSearchJob is None: hyperSearchJob = cls.loadSavedHyperSearchJob( permWorkDir=options["permWorkDir"], outputLabel=options["outputLabel"]) modelIDs = hyperSearchJob.queryModelIDs() bestModel = None # If metricsKeys was not provided, pre-scan modelInfos to create the list; # this is needed by _ReportCSVWriter # Also scan the parameters to generate a list of encoders and search # parameters metricstmp = set() searchVar = set() for modelInfo in _iterModels(modelIDs): if modelInfo.isFinished(): vars = modelInfo.getParamLabels().keys() searchVar.update(vars) metrics = modelInfo.getReportMetrics() metricstmp.update(metrics.keys()) if metricsKeys is None: metricsKeys = metricstmp # Create a csv report writer reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob, metricsKeys=metricsKeys, searchVar=searchVar, outputDirAbsPath=options["permWorkDir"], outputLabel=options["outputLabel"], replaceReport=replaceReport) # Tallies of experiment dispositions modelStats = _ModelStats() #numCompletedOther = long(0) print "\nResults from all experiments:" print "----------------------------------------------------------------" # Get common optimization metric info from permutations script searchParams = hyperSearchJob.getParams() (optimizationMetricKey, maximizeMetric) = ( _PermutationUtils.getOptimizationMetricInfo(searchParams)) # Print metrics, while looking for the best model formatStr = None # NOTE: we may find additional metrics if HyperSearch is still running foundMetricsKeySet = set(metricsKeys) sortedMetricsKeys = [] # pull out best Model from jobs table jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID()) try: results = json.loads(jobInfo.results) except Exception, e: print "json.loads(jobInfo.results) raised an exception. " \ "Here is some info to help with debugging:" print "jobInfo: ", jobInfo print "jobInfo.results: ", jobInfo.results print "EXCEPTION: ", e raise bestModelNum = results["bestModel"] bestModelIterIndex = None # performance metrics for the entire job totalWallTime = 0 totalRecords = 0 # At the end, we will sort the models by their score on the optimization # metric scoreModelIDDescList = [] for (i, modelInfo) in enumerate(_iterModels(modelIDs)): # Output model info to report csv reportWriter.emit(modelInfo) # Update job metrics totalRecords+=modelInfo.getNumRecords() format = "%Y-%m-%d %H:%M:%S" startTime = modelInfo.getStartTime() if modelInfo.isFinished(): endTime = modelInfo.getEndTime() st = datetime.strptime(startTime, format) et = datetime.strptime(endTime, format) totalWallTime+=(et-st).seconds # Tabulate experiment dispositions modelStats.update(modelInfo) # For convenience expDesc = modelInfo.getModelDescription() reportMetrics = modelInfo.getReportMetrics() optimizationMetrics = modelInfo.getOptimizationMetrics() if modelInfo.getModelID() == bestModelNum: bestModel = modelInfo bestModelIterIndex=i bestMetric = optimizationMetrics.values()[0] # Keep track of the best-performing model if optimizationMetrics: assert len(optimizationMetrics) == 1, ( "expected 1 opt key, but got %d (%s) in %s" % ( len(optimizationMetrics), optimizationMetrics, modelInfo)) # Append to our list of modelIDs and scores if modelInfo.getCompletionReason().isEOF(): scoreModelIDDescList.append((optimizationMetrics.values()[0], modelInfo.getModelID(), modelInfo.getGeneratedDescriptionFile(), modelInfo.getParamLabels())) print "[%d] Experiment %s\n(%s):" % (i, modelInfo, expDesc) if (modelInfo.isFinished() and not (modelInfo.getCompletionReason().isStopped or modelInfo.getCompletionReason().isEOF())): print ">> COMPLETION MESSAGE: %s" % modelInfo.getCompletionMsg() if reportMetrics: # Update our metrics key set and format string foundMetricsKeySet.update(reportMetrics.iterkeys()) if len(sortedMetricsKeys) != len(foundMetricsKeySet): sortedMetricsKeys = sorted(foundMetricsKeySet) maxKeyLen = max([len(k) for k in sortedMetricsKeys]) formatStr = " %%-%ds" % (maxKeyLen+2) # Print metrics for key in sortedMetricsKeys: if key in reportMetrics: if key == optimizationMetricKey: m = "%r (*)" % reportMetrics[key] else: m = "%r" % reportMetrics[key] print formatStr % (key+":"), m print # Summarize results print "--------------------------------------------------------------" if len(modelIDs) > 0: print "%d experiments total (%s).\n" % ( len(modelIDs), ("all completed successfully" if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) == len(modelIDs) else "WARNING: %d models have not completed or there were errors" % ( len(modelIDs) - ( modelStats.numCompletedKilled + modelStats.numCompletedEOF + modelStats.numCompletedStopped)))) if modelStats.numStatusOther > 0: print "ERROR: models with unexpected status: %d" % ( modelStats.numStatusOther) print "WaitingToStart: %d" % modelStats.numStatusWaitingToStart print "Running: %d" % modelStats.numStatusRunning print "Completed: %d" % modelStats.numStatusCompleted if modelStats.numCompletedOther > 0: print " ERROR: models with unexpected completion reason: %d" % ( modelStats.numCompletedOther) print " ran to EOF: %d" % modelStats.numCompletedEOF print " ran to stop signal: %d" % modelStats.numCompletedStopped print " were orphaned: %d" % modelStats.numCompletedOrphaned print " killed off: %d" % modelStats.numCompletedKilled print " failed: %d" % modelStats.numCompletedError assert modelStats.numStatusOther == 0, "numStatusOther=%s" % ( modelStats.numStatusOther) assert modelStats.numCompletedOther == 0, "numCompletedOther=%s" % ( modelStats.numCompletedOther) else: print "0 experiments total." # Print out the field contributions print global gCurrentSearch jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers) jobResults = jobStatus.getResults() if "fieldContributions" in jobResults: print "Field Contributions:" pprint.pprint(jobResults["fieldContributions"], indent=4) else: print "Field contributions info not available" # Did we have an optimize key? if bestModel is not None: maxKeyLen = max([len(k) for k in sortedMetricsKeys]) maxKeyLen = max(maxKeyLen, len(optimizationMetricKey)) formatStr = " %%-%ds" % (maxKeyLen+2) bestMetricValue = bestModel.getOptimizationMetrics().values()[0] optimizationMetricName = bestModel.getOptimizationMetrics().keys()[0] print print "Best results on the optimization metric %s (maximize=%s):" % ( optimizationMetricName, maximizeMetric) print "[%d] Experiment %s (%s):" % ( bestModelIterIndex, bestModel, bestModel.getModelDescription()) print formatStr % (optimizationMetricName+":"), bestMetricValue print print "Total number of Records processed: %d" % totalRecords print print "Total wall time for all models: %d" % totalWallTime hsJobParams = hyperSearchJob.getParams() # Were we asked to write out the top N model description files? if options["genTopNDescriptions"] > 0: print "\nGenerating description files for top %d models..." % ( options["genTopNDescriptions"]) scoreModelIDDescList.sort() scoreModelIDDescList = scoreModelIDDescList[ 0:options["genTopNDescriptions"]] i = -1 for (score, modelID, description, paramLabels) in scoreModelIDDescList: i += 1 outDir = os.path.join(options["permWorkDir"], "model_%d" % (i)) print "Generating description file for model %s at %s" % \ (modelID, outDir) if not os.path.exists(outDir): os.makedirs(outDir) # Fix up the location to the base description file description = description.replace( "importBaseDescription('base.py', config)", "importBaseDescription('../description.py', config)") fd = open(os.path.join(outDir, "description.py"), "wb") fd.write(description) fd.close() # Generate a csv file with the parameter settings in it fd = open(os.path.join(outDir, "params.csv"), "wb") writer = csv.writer(fd) colNames = paramLabels.keys() colNames.sort() writer.writerow(colNames) row = [paramLabels[x] for x in colNames] writer.writerow(row) fd.close() print "Generating model params file..." # Generate a model params file alongside the description.py mod = imp.load_source("description", os.path.join(outDir, "description.py")) model_description = mod.descriptionInterface.getModelDescription() fd = open(os.path.join(outDir, "model_params.py"), "wb") fd.write("%s\nMODEL_PARAMS = %s" % (utils.getCopyrightHead(), pprint.pformat(model_description))) fd.close() print reportWriter.finalize() return model_description @classmethod def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel): """Instantiates a _HyperSearchJob instance from info saved in file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: _HyperSearchJob instance; raises exception if not found """ jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir, outputLabel=outputLabel) searchJob = _HyperSearchJob(nupicJobID=jobID) return searchJob @classmethod def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob): """Saves the given _HyperSearchJob instance's jobID to file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID hyperSearchJob: _HyperSearchJob instance retval: nothing """ jobID = hyperSearchJob.getJobID() filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) if os.path.exists(filePath): _backupFile(filePath) d = dict(hyperSearchJobID = jobID) with open(filePath, "wb") as jobIdPickleFile: pickle.dump(d, jobIdPickleFile) @classmethod def __loadHyperSearchJobID(cls, permWorkDir, outputLabel): """Loads a saved jobID from file Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: HyperSearch jobID; raises exception if not found. """ filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) jobID = None with open(filePath, "rb") as jobIdPickleFile: jobInfo = pickle.load(jobIdPickleFile) jobID = jobInfo["hyperSearchJobID"] return jobID @classmethod def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel): """Returns filepath where to store HyperSearch JobID Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: Filepath where to store HyperSearch JobID """ # Get the base path and figure out the path of the report file. basePath = permWorkDir # Form the name of the output csv file that will contain all the results filename = "%s_HyperSearchJobID.pkl" % (outputLabel,) filepath = os.path.join(basePath, filename) return filepath class _ModelStats(object): """ @private """ def __init__(self): # Tallies of experiment dispositions self.numStatusWaitingToStart = long(0) self.numStatusRunning = long(0) self.numStatusCompleted = long(0) self.numStatusOther = long(0) #self.numCompletedSuccess = long(0) self.numCompletedKilled = long(0) self.numCompletedError = long(0) self.numCompletedStopped = long(0) self.numCompletedEOF = long(0) self.numCompletedOther = long(0) self.numCompletedOrphaned = long(0) def update(self, modelInfo): # Tabulate experiment dispositions if modelInfo.isWaitingToStart(): self.numStatusWaitingToStart += 1 elif modelInfo.isRunning(): self.numStatusRunning += 1 elif modelInfo.isFinished(): self.numStatusCompleted += 1 reason = modelInfo.getCompletionReason() # if reason.isSuccess(): # self.numCompletedSuccess += 1 if reason.isEOF(): self.numCompletedEOF += 1 elif reason.isKilled(): self.numCompletedKilled += 1 elif reason.isStopped(): self.numCompletedStopped += 1 elif reason.isError(): self.numCompletedError += 1 elif reason.isOrphaned(): self.numCompletedOrphaned += 1 else: self.numCompletedOther += 1 else: self.numStatusOther += 1 class _ReportCSVWriter(object): """ @private """ __totalModelTime = timedelta() def __init__(self, hyperSearchJob, metricsKeys, searchVar, outputDirAbsPath, outputLabel, replaceReport): """ Parameters: ---------------------------------------------------------------------- hyperSearchJob: _HyperSearchJob instance metricsKeys: sequence of report metrics key names to include in report outputDirAbsPath: Directory for creating report CSV file (absolute path) outputLabel: A string label to incorporate into report CSV file name replaceReport: True to replace existing report csv, if any; False to append to existing report csv, if any retval: nothing """ self.__searchJob = hyperSearchJob self.__searchJobID = hyperSearchJob.getJobID() self.__sortedMetricsKeys = sorted(metricsKeys) self.__outputDirAbsPath = os.path.abspath(outputDirAbsPath) self.__outputLabel = outputLabel self.__replaceReport = replaceReport self.__sortedVariableNames=searchVar # These are set up by __openAndInitCSVFile self.__csvFileObj = None self.__reportCSVPath = None self.__backupCSVPath = None def emit(self, modelInfo): """Emit model info to csv file Parameters: ---------------------------------------------------------------------- modelInfo: _NupicModelInfo instance retval: nothing """ # Open/init csv file, if needed if self.__csvFileObj is None: # sets up self.__sortedVariableNames and self.__csvFileObj self.__openAndInitCSVFile(modelInfo) csv = self.__csvFileObj # Emit model info row to report.csv print >> csv, "%s, " % (self.__searchJobID), print >> csv, "%s, " % (modelInfo.getModelID()), print >> csv, "%s, " % (modelInfo.statusAsString()), if modelInfo.isFinished(): print >> csv, "%s, " % (modelInfo.getCompletionReason()), else: print >> csv, "NA, ", if not modelInfo.isWaitingToStart(): print >> csv, "%s, " % (modelInfo.getStartTime()), else: print >> csv, "NA, ", if modelInfo.isFinished(): dateFormat = "%Y-%m-%d %H:%M:%S" startTime = modelInfo.getStartTime() endTime = modelInfo.getEndTime() print >> csv, "%s, " % endTime, st = datetime.strptime(startTime, dateFormat) et = datetime.strptime(endTime, dateFormat) print >> csv, "%s, " % (str((et - st).seconds)), else: print >> csv, "NA, ", print >> csv, "NA, ", print >> csv, "%s, " % str(modelInfo.getModelDescription()), print >> csv, "%s, " % str(modelInfo.getNumRecords()), paramLabelsDict = modelInfo.getParamLabels() for key in self.__sortedVariableNames: # Some values are complex structures,.. which need to be represented as # strings if key in paramLabelsDict: print >> csv, "%s, " % (paramLabelsDict[key]), else: print >> csv, "None, ", metrics = modelInfo.getReportMetrics() for key in self.__sortedMetricsKeys: value = metrics.get(key, "NA") value = str(value) value = value.replace("\n", " ") print >> csv, "%s, " % (value), print >> csv def finalize(self): """Close file and print report/backup csv file paths Parameters: ---------------------------------------------------------------------- retval: nothing """ if self.__csvFileObj is not None: # Done with file self.__csvFileObj.close() self.__csvFileObj = None print "Report csv saved in %s" % (self.__reportCSVPath,) if self.__backupCSVPath: print "Previous report csv file was backed up to %s" % \ (self.__backupCSVPath,) else: print "Nothing was written to report csv file." def __openAndInitCSVFile(self, modelInfo): """ - Backs up old report csv file; - opens the report csv file in append or overwrite mode (per self.__replaceReport); - emits column fields; - sets up self.__sortedVariableNames, self.__csvFileObj, self.__backupCSVPath, and self.__reportCSVPath Parameters: ---------------------------------------------------------------------- modelInfo: First _NupicModelInfo instance passed to emit() retval: nothing """ # Get the base path and figure out the path of the report file. basePath = self.__outputDirAbsPath # Form the name of the output csv file that will contain all the results reportCSVName = "%s_Report.csv" % (self.__outputLabel,) reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName) # If a report CSV file already exists, back it up backupCSVPath = None if os.path.exists(reportCSVPath): backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath) # Open report file if self.__replaceReport: mode = "w" else: mode = "a" csv = self.__csvFileObj = open(reportCSVPath, mode) # If we are appending, add some blank line separators if not self.__replaceReport and backupCSVPath: print >> csv print >> csv # Print the column names print >> csv, "jobID, ", print >> csv, "modelID, ", print >> csv, "status, " , print >> csv, "completionReason, ", print >> csv, "startTime, ", print >> csv, "endTime, ", print >> csv, "runtime(s), " , print >> csv, "expDesc, ", print >> csv, "numRecords, ", for key in self.__sortedVariableNames: print >> csv, "%s, " % key, for key in self.__sortedMetricsKeys: print >> csv, "%s, " % key, print >> csv class _NupicJob(object): """ @private Our Nupic Job abstraction""" def __init__(self, nupicJobID): """_NupicJob constructor Parameters: ---------------------------------------------------------------------- retval: Nupic Client JobID of the job """ self.__nupicJobID = nupicJobID jobInfo = _clientJobsDB().jobInfo(nupicJobID) assert jobInfo is not None, "jobID=%s not found" % nupicJobID assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID) _emit(Verbosity.DEBUG, "_NupicJob: \n%s" % pprint.pformat(jobInfo, indent=4)) if jobInfo.params is not None: self.__params = json.loads(jobInfo.params) else: self.__params = None def __repr__(self): """ Parameters: ---------------------------------------------------------------------- retval: representation of this _NupicJob instance """ return "%s(jobID=%s)" % (self.__class__.__name__, self.__nupicJobID) def getJobStatus(self, workers): """ Parameters: ---------------------------------------------------------------------- workers: If this job was launched outside of the nupic job engine, then this is an array of subprocess Popen instances, one for each worker retval: _NupicJob.JobStatus instance """ jobInfo = self.JobStatus(self.__nupicJobID, workers) return jobInfo def getJobID(self): """Semi-private method for retrieving the jobId Parameters: ---------------------------------------------------------------------- retval: Nupic Client JobID of this _NupicJob instance """ return self.__nupicJobID def getParams(self): """Semi-private method for retrieving the job-specific params Parameters: ---------------------------------------------------------------------- retval: Job params dict corresponding to the JSON params value returned by ClientJobsDAO.jobInfo() """ return self.__params class JobStatus(object): """ @private Our Nupic Job Info abstraction class""" # Job Status values (per ClientJobsDAO.py): __nupicJobStatus_NotStarted = cjdao.ClientJobsDAO.STATUS_NOTSTARTED __nupicJobStatus_Starting = cjdao.ClientJobsDAO.STATUS_STARTING __nupicJobStatus_running = cjdao.ClientJobsDAO.STATUS_RUNNING __nupicJobStatus_completed = cjdao.ClientJobsDAO.STATUS_COMPLETED def __init__(self, nupicJobID, workers): """_NupicJob.JobStatus Constructor Parameters: ---------------------------------------------------------------------- nupicJobID: Nupic ClientJob ID workers: If this job was launched outside of the Nupic job engine, then this is an array of subprocess Popen instances, one for each worker retval: nothing """ jobInfo = _clientJobsDB().jobInfo(nupicJobID) assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID) # If we launched the workers ourself, set the job status based on the # workers that are still running if workers is not None: runningCount = 0 for worker in workers: retCode = worker.poll() if retCode is None: runningCount += 1 if runningCount > 0: status = cjdao.ClientJobsDAO.STATUS_RUNNING else: status = cjdao.ClientJobsDAO.STATUS_COMPLETED jobInfo = jobInfo._replace(status=status) _emit(Verbosity.DEBUG, "JobStatus: \n%s" % pprint.pformat(jobInfo, indent=4)) self.__jobInfo = jobInfo def __repr__(self): return "%s(jobId=%s, status=%s, completionReason=%s, " \ "startTime=%s, endTime=%s)" % ( self.__class__.__name__, self.__jobInfo.jobId, self.statusAsString(), self.__jobInfo.completionReason, self.__jobInfo.startTime, self.__jobInfo.endTime) def statusAsString(self): """ Parameters: ---------------------------------------------------------------------- retval: Job status as a human-readable string """ return self.__jobInfo.status def isWaitingToStart(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job has not been started yet """ waiting = (self.__jobInfo.status == self.__nupicJobStatus_NotStarted) return waiting def isStarting(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job is starting """ starting = (self.__jobInfo.status == self.__nupicJobStatus_Starting) return starting def isRunning(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job is running """ running = (self.__jobInfo.status == self.__nupicJobStatus_running) return running def isFinished(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job has finished (either with success or failure) """ done = (self.__jobInfo.status == self.__nupicJobStatus_completed) return done def getCompletionReason(self): """Returns _JobCompletionReason. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: _JobCompletionReason instance """ assert self.isFinished(), "Too early to tell: %s" % self return _JobCompletionReason(self.__jobInfo.completionReason) def getCompletionMsg(self): """Returns job completion message. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: completion message """ assert self.isFinished(), "Too early to tell: %s" % self return "%s" % self.__jobInfo.completionMsg def getWorkerCompletionMsg(self): """Returns the worker generated completion message. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: completion message """ assert self.isFinished(), "Too early to tell: %s" % self return "%s" % self.__jobInfo.workerCompletionMsg def getStartTime(self): """Returns job start time. NOTE: it's an error to call this method if isWaitingToStart() would return True. Parameters: ---------------------------------------------------------------------- retval: job processing start time """ assert not self.isWaitingToStart(), "Too early to tell: %s" % self return "%s" % self.__jobInfo.startTime def getEndTime(self): """Returns job end time. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: job processing end time """ assert self.isFinished(), "Too early to tell: %s" % self return "%s" % self.__jobInfo.endTime def getWorkerState(self): """Returns the worker state field. Parameters: ---------------------------------------------------------------------- retval: worker state field as a dict """ if self.__jobInfo.engWorkerState is not None: return json.loads(self.__jobInfo.engWorkerState) else: return None def getResults(self): """Returns the results field. Parameters: ---------------------------------------------------------------------- retval: job results field as a dict """ if self.__jobInfo.results is not None: return json.loads(self.__jobInfo.results) else: return None def getModelMilestones(self): """Returns the model milestones field. Parameters: ---------------------------------------------------------------------- retval: model milestones as a dict """ if self.__jobInfo.engModelMilestones is not None: return json.loads(self.__jobInfo.engModelMilestones) else: return None def getEngStatus(self): """Returns the engine status field - used for progress messages Parameters: ---------------------------------------------------------------------- retval: engine status field as string """ return self.__jobInfo.engStatus class _JobCompletionReason(object): """ @private Represents completion reason for Client Jobs and Models""" def __init__(self, reason): """ Parameters: ---------------------------------------------------------------------- reason: completion reason value from ClientJobsDAO.jobInfo() """ self.__reason = reason def __str__(self): return "%s" % self.__reason def __repr__(self): return "%s(reason=%s)" % (self.__class__.__name__, self.__reason) def isEOF(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_EOF def isSuccess(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_SUCCESS def isStopped(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_STOPPED def isKilled(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_KILLED def isOrphaned(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_ORPHAN def isError(self): return self.__reason == cjdao.ClientJobsDAO.CMPL_REASON_ERROR class _HyperSearchJob(_NupicJob): """ @private This class represents a single running Nupic HyperSearch job""" def __init__(self, nupicJobID): """ Parameters: ---------------------------------------------------------------------- nupicJobID: Nupic Client JobID of a HyperSearch job retval: nothing """ super(_HyperSearchJob, self).__init__(nupicJobID) # Cache of the total count of expected models or -1 if it can't be # deteremined. # # Set by getExpectedNumModels() # # TODO: update code to handle non-ronomatic search algorithms self.__expectedNumModels = None def queryModelIDs(self): """Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: ---------------------------------------------------------------------- retval: A sequence of Nupic modelIDs """ jobID = self.getJobID() modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID) modelIDs = tuple(x[0] for x in modelCounterPairs) return modelIDs def getExpectedNumModels(self, searchMethod): """Returns: the total number of expected models if known, -1 if it can't be determined. NOTE: this can take a LONG time to complete for HyperSearches with a huge number of possible permutations. Parameters: ---------------------------------------------------------------------- searchMethod: "v2" is the only method currently supported retval: The total number of expected models, if known; -1 if unknown """ return self.__expectedNumModels class _ClientJobUtils(object): """ @private Our Nupic Client Job utilities""" @classmethod def makeSearchJobParamsDict(cls, options, forRunning=False): """Constructs a dictionary of HyperSearch parameters suitable for converting to json and passing as the params argument to ClientJobsDAO.jobInsert() Parameters: ---------------------------------------------------------------------- options: NupicRunPermutations options dict forRunning: True if the params are for running a Hypersearch job; False if params are for introspection only. retval: A dictionary of HyperSearch parameters for ClientJobsDAO.jobInsert() """ if options["searchMethod"] == "v2": hsVersion = "v2" else: raise Exception("Unsupported search method: %r" % options["searchMethod"]) maxModels = options["maxPermutations"] if options["action"] == "dryRun" and maxModels is None: maxModels = 1 useTerminators = options["useTerminators"] if useTerminators is None: params = { "hsVersion": hsVersion, "maxModels": maxModels, } else: params = { "hsVersion": hsVersion, "useTerminators": useTerminators, "maxModels": maxModels, } if forRunning: params["persistentJobGUID"] = str(uuid.uuid1()) if options["permutationsScriptPath"]: params["permutationsPyFilename"] = options["permutationsScriptPath"] elif options["expDescConfig"]: params["description"] = options["expDescConfig"] else: with open(options["expDescJsonPath"], mode="r") as fp: params["description"] = json.load(fp) return params class _PermutationUtils(object): """ @private Utilities for running permutations""" @classmethod def getOptimizationMetricInfo(cls, searchJobParams): """Retrives the optimization key name and optimization function. Parameters: --------------------------------------------------------- searchJobParams: Parameter for passing as the searchParams arg to Hypersearch constructor. retval: (optimizationMetricKey, maximize) optimizationMetricKey: which report key to optimize for maximize: True if we should try and maximize the optimizeKey metric. False if we should minimize it. """ if searchJobParams["hsVersion"] == "v2": search = HypersearchV2(searchParams=searchJobParams) else: raise RuntimeError("Unsupported hypersearch version \"%s\"" % \ (searchJobParams["hsVersion"])) info = search.getOptimizationMetricInfo() return info def _backupFile(filePath): """Back up a file Parameters: ---------------------------------------------------------------------- retval: Filepath of the back-up """ assert os.path.exists(filePath) stampNum = 0 (prefix, suffix) = os.path.splitext(filePath) while True: backupPath = "%s.%d%s" % (prefix, stampNum, suffix) stampNum += 1 if not os.path.exists(backupPath): break shutil.copyfile(filePath, backupPath) return backupPath def _getOneModelInfo(nupicModelID): """A convenience function that retrieves inforamtion about a single model See also: _iterModels() Parameters: ---------------------------------------------------------------------- nupicModelID: Nupic modelID retval: _NupicModelInfo instance for the given nupicModelID. """ return _iterModels([nupicModelID]).next() def _iterModels(modelIDs): """Creates an iterator that returns ModelInfo elements for the given modelIDs WARNING: The order of ModelInfo elements returned by the iterator may not match the order of the given modelIDs Parameters: ---------------------------------------------------------------------- modelIDs: A sequence of model identifiers (e.g., as returned by _HyperSearchJob.queryModelIDs()). retval: Iterator that returns ModelInfo elements for the given modelIDs (NOTE:possibly in a different order) """ class ModelInfoIterator(object): """ModelInfo iterator implementation class """ # Maximum number of ModelInfo elements to load into cache whenever # cache empties __CACHE_LIMIT = 1000 debug=False def __init__(self, modelIDs): """ Parameters: ---------------------------------------------------------------------- modelIDs: a sequence of Nupic model identifiers for which this iterator will return _NupicModelInfo instances. NOTE: The returned instances are NOT guaranteed to be in the same order as the IDs in modelIDs sequence. retval: nothing """ # Make our own copy in case caller changes model id list during iteration self.__modelIDs = tuple(modelIDs) if self.debug: _emit(Verbosity.DEBUG, "MODELITERATOR: __init__; numModelIDs=%s" % len(self.__modelIDs)) self.__nextIndex = 0 self.__modelCache = collections.deque() return def __iter__(self): """Iterator Protocol function Parameters: ---------------------------------------------------------------------- retval: self """ return self def next(self): """Iterator Protocol function Parameters: ---------------------------------------------------------------------- retval: A _NupicModelInfo instance or raises StopIteration to signal end of iteration. """ return self.__getNext() def __getNext(self): """Implementation of the next() Iterator Protocol function. When the modelInfo cache becomes empty, queries Nupic and fills the cache with the next set of NupicModelInfo instances. Parameters: ---------------------------------------------------------------------- retval: A _NupicModelInfo instance or raises StopIteration to signal end of iteration. """ if self.debug: _emit(Verbosity.DEBUG, "MODELITERATOR: __getNext(); modelCacheLen=%s" % ( len(self.__modelCache))) if not self.__modelCache: self.__fillCache() if not self.__modelCache: raise StopIteration() return self.__modelCache.popleft() def __fillCache(self): """Queries Nupic and fills an empty modelInfo cache with the next set of _NupicModelInfo instances Parameters: ---------------------------------------------------------------------- retval: nothing """ assert (not self.__modelCache) # Assemble a list of model IDs to look up numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0 if self.__nextIndex >= numModelIDs: return idRange = self.__nextIndex + self.__CACHE_LIMIT if idRange > numModelIDs: idRange = numModelIDs lookupIDs = self.__modelIDs[self.__nextIndex:idRange] self.__nextIndex += (idRange - self.__nextIndex) # Query Nupic for model info of all models in the look-up list # NOTE: the order of results may not be the same as lookupIDs infoList = _clientJobsDB().modelsInfo(lookupIDs) assert len(infoList) == len(lookupIDs), \ "modelsInfo returned %s elements; expected %s." % \ (len(infoList), len(lookupIDs)) # Create _NupicModelInfo instances and add them to cache for rawInfo in infoList: modelInfo = _NupicModelInfo(rawInfo=rawInfo) self.__modelCache.append(modelInfo) assert len(self.__modelCache) == len(lookupIDs), \ "Added %s elements to modelCache; expected %s." % \ (len(self.__modelCache), len(lookupIDs)) if self.debug: _emit(Verbosity.DEBUG, "MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s" % \ (len(self.__modelCache),)) return ModelInfoIterator(modelIDs) class _NupicModelInfo(object): """ @private This class represents information obtained from ClientJobManager about a model """ __nupicModelStatus_notStarted = cjdao.ClientJobsDAO.STATUS_NOTSTARTED __nupicModelStatus_running = cjdao.ClientJobsDAO.STATUS_RUNNING __nupicModelStatus_completed = cjdao.ClientJobsDAO.STATUS_COMPLETED __rawInfo = None def __init__(self, rawInfo): """ Parameters: ---------------------------------------------------------------------- rawInfo: A single model information element as returned by ClientJobsDAO.modelsInfo() retval: nothing. """ # Namedtuple returned by ClientJobsDAO.modelsInfo() self.__rawInfo = rawInfo # Cached model metrics (see __unwrapResults()) self.__cachedResults = None assert self.__rawInfo.params is not None # Cached model params (see __unwrapParams()) self.__cachedParams = None def __repr__(self): """ Parameters: ---------------------------------------------------------------------- retval: Representation of this _NupicModelInfo instance. """ return ("%s(jobID=%s, modelID=%s, status=%s, completionReason=%s, " "updateCounter=%s, numRecords=%s)" % ( "_NupicModelInfo", self.__rawInfo.jobId, self.__rawInfo.modelId, self.__rawInfo.status, self.__rawInfo.completionReason, self.__rawInfo.updateCounter, self.__rawInfo.numRecords)) def getModelID(self): """ Parameters: ---------------------------------------------------------------------- retval: Nupic modelID associated with this model info. """ return self.__rawInfo.modelId def statusAsString(self): """ Parameters: ---------------------------------------------------------------------- retval: Human-readable string representation of the model's status. """ return "%s" % self.__rawInfo.status def getModelDescription(self): """ Parameters: ---------------------------------------------------------------------- retval: Printable description of the model. """ params = self.__unwrapParams() if "experimentName" in params: return params["experimentName"] else: paramSettings = self.getParamLabels() # Form a csv friendly string representation of this model items = [] for key, value in paramSettings.items(): items.append("%s_%s" % (key, value)) return ".".join(items) def getGeneratedDescriptionFile(self): """ Parameters: ---------------------------------------------------------------------- retval: Contents of the sub-experiment description file for this model """ return self.__rawInfo.genDescription def getNumRecords(self): """ Paramets: ---------------------------------------------------------------------- retval: The number of records processed by the model. """ return self.__rawInfo.numRecords def getParamLabels(self): """ Parameters: ---------------------------------------------------------------------- retval: a dictionary of model parameter labels. For each entry the key is the name of the parameter and the value is the value chosen for it. """ params = self.__unwrapParams() # Hypersearch v2 stores the flattened parameter settings in "particleState" if "particleState" in params: retval = dict() queue = [(pair, retval) for pair in params["particleState"]["varStates"].iteritems()] while len(queue) > 0: pair, output = queue.pop() k, v = pair if ("position" in v and "bestPosition" in v and "velocity" in v): output[k] = v["position"] else: if k not in output: output[k] = dict() queue.extend((pair, output[k]) for pair in v.iteritems()) return retval def __unwrapParams(self): """Unwraps self.__rawInfo.params into the equivalent python dictionary and caches it in self.__cachedParams. Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: Model params dictionary as correpsonding to the json as returned in ClientJobsDAO.modelsInfo()[x].params """ if self.__cachedParams is None: self.__cachedParams = json.loads(self.__rawInfo.params) assert self.__cachedParams is not None, \ "%s resulted in None" % self.__rawInfo.params return self.__cachedParams def getReportMetrics(self): """Retrives a dictionary of metrics designated for report Parameters: ---------------------------------------------------------------------- retval: a dictionary of metrics that were collected for the model or an empty dictionary if there aren't any. """ return self.__unwrapResults().reportMetrics def getOptimizationMetrics(self): """Retrives a dictionary of metrics designagted for optimization Parameters: ---------------------------------------------------------------------- retval: a dictionary of optimization metrics that were collected for the model or an empty dictionary if there aren't any. """ return self.__unwrapResults().optimizationMetrics def getAllMetrics(self): """Retrives a dictionary of metrics that combines all report and optimization metrics Parameters: ---------------------------------------------------------------------- retval: a dictionary of optimization metrics that were collected for the model; an empty dictionary if there aren't any. """ result = self.getReportMetrics() result.update(self.getOptimizationMetrics()) return result ModelResults = collections.namedtuple("ModelResultsTuple", ["reportMetrics", "optimizationMetrics"]) """Each element is a dictionary: property name is the metric name and property value is the metric value as generated by the model """ def __unwrapResults(self): """Unwraps self.__rawInfo.results and caches it in self.__cachedResults; Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: ModelResults namedtuple instance """ if self.__cachedResults is None: if self.__rawInfo.results is not None: resultList = json.loads(self.__rawInfo.results) assert len(resultList) == 2, \ "Expected 2 elements, but got %s (%s)." % ( len(resultList), resultList) self.__cachedResults = self.ModelResults( reportMetrics=resultList[0], optimizationMetrics=resultList[1]) else: self.__cachedResults = self.ModelResults( reportMetrics={}, optimizationMetrics={}) return self.__cachedResults def isWaitingToStart(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job has not been started yet """ waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted) return waiting def isRunning(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the job has not been started yet """ running = (self.__rawInfo.status == self.__nupicModelStatus_running) return running def isFinished(self): """ Parameters: ---------------------------------------------------------------------- retval: True if the model's processing has completed (either with success or failure). """ finished = (self.__rawInfo.status == self.__nupicModelStatus_completed) return finished def getCompletionReason(self): """Returns _ModelCompletionReason. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: _ModelCompletionReason instance """ assert self.isFinished(), "Too early to tell: %s" % self return _ModelCompletionReason(self.__rawInfo.completionReason) def getCompletionMsg(self): """Returns model completion message. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: completion message """ assert self.isFinished(), "Too early to tell: %s" % self return self.__rawInfo.completionMsg def getStartTime(self): """Returns model evaluation start time. NOTE: it's an error to call this method if isWaitingToStart() would return True. Parameters: ---------------------------------------------------------------------- retval: model evaluation start time """ assert not self.isWaitingToStart(), "Too early to tell: %s" % self return "%s" % self.__rawInfo.startTime def getEndTime(self): """Returns mode evaluation end time. NOTE: it's an error to call this method if isFinished() would return False. Parameters: ---------------------------------------------------------------------- retval: model evaluation end time """ assert self.isFinished(), "Too early to tell: %s" % self return "%s" % self.__rawInfo.endTime class _ModelCompletionReason(_JobCompletionReason): """ @private """ pass
1
15,192
Use `==` in this case instead of `is`.
numenta-nupic
py
@@ -53,6 +53,7 @@ type CStorPoolAttr struct { OverProvisioning bool `json:"overProvisioning"` //true or false } +// CStorPoolPhase is a CStorPool phase. type CStorPoolPhase string // Status written onto CStorPool and CStorVolumeReplica objects.
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient:nonNamespaced // +resource:path=cstorpool // CStorPool describes a cstor pool resource created as custom resource. type CStorPool struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec CStorPoolSpec `json:"spec"` Status CStorPoolStatus `json:"status"` } // CStorPoolSpec is the spec listing fields for a CStorPool resource. type CStorPoolSpec struct { Disks DiskAttr `json:"disks"` PoolSpec CStorPoolAttr `json:"poolSpec"` } // DiskAttr stores the disk related attributes. type DiskAttr struct { DiskList []string `json:"diskList"` } // CStorPoolAttr is to describe zpool related attributes. type CStorPoolAttr struct { CacheFile string `json:"cacheFile"` //optional, faster if specified PoolType string `json:"poolType"` //mirror, striped OverProvisioning bool `json:"overProvisioning"` //true or false } type CStorPoolPhase string // Status written onto CStorPool and CStorVolumeReplica objects. const ( // CStorPoolStatusEmpty ensures the create operation is to be done, if import fails. CStorPoolStatusEmpty CStorPoolPhase = "" // CStorPoolStatusOnline ensures the resource is available. CStorPoolStatusOnline CStorPoolPhase = "Online" // CStorPoolStatusOffline ensures the resource is not available. CStorPoolStatusOffline CStorPoolPhase = "Offline" // CStorPoolStatusDeletionFailed ensures the resource deletion has failed. CStorPoolStatusDeletionFailed CStorPoolPhase = "DeletionFailed" // CStorPoolStatusInvalid ensures invalid resource. CStorPoolStatusInvalid CStorPoolPhase = "Invalid" // CStorPoolStatusErrorDuplicate ensures error due to duplicate resource. CStorPoolStatusErrorDuplicate CStorPoolPhase = "ErrorDuplicate" // CStorPoolStatusPending ensures pending task for cstorpool. CStorPoolStatusPending CStorPoolPhase = "Pending" ) // CStorPoolStatus is for handling status of pool. type CStorPoolStatus struct { Phase CStorPoolPhase `json:"phase"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +resource:path=cstorpools // CStorPoolList is a list of CStorPoolList resources type CStorPoolList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []CStorPool `json:"items"` }
1
9,930
Please put it like this: `CStorPoolPhase is a typed string for phase field of CStorPool`
openebs-maya
go